max_stars_repo_path stringlengths 3 269 | max_stars_repo_name stringlengths 4 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.05M | score float64 0.23 5.13 | int_score int64 0 5 |
|---|---|---|---|---|---|---|
flatisfy/cmds.py | guix77/Flatisfy | 0 | 12771351 | # coding: utf-8
"""
Main commands available for flatisfy.
"""
from __future__ import absolute_import, print_function, unicode_literals
import collections
import logging
import os
import flatisfy.filters
from flatisfy import database
from flatisfy import email
from flatisfy.models import flat as flat_model
from flatisfy.models import postal_code as postal_code_model
from flatisfy.models import public_transport as public_transport_model
from flatisfy import fetch
from flatisfy import tools
from flatisfy.filters import metadata
from flatisfy.web import app as web_app
LOGGER = logging.getLogger(__name__)
def filter_flats_list(config, constraint_name, flats_list, fetch_details=True):
"""
Filter the available flats list. Then, filter it according to criteria.
:param config: A config dict.
:param constraint_name: The constraint name that the ``flats_list`` should
satisfy.
:param fetch_details: Whether additional details should be fetched between
the two passes.
:param flats_list: The initial list of flat objects to filter.
:return: A dict mapping flat status and list of flat objects.
"""
# Add the flatisfy metadata entry and prepare the flat objects
flats_list = metadata.init(flats_list, constraint_name)
# Get the associated constraint from config
try:
constraint = config["constraints"][constraint_name]
except KeyError:
LOGGER.error(
"Missing constraint %s. Skipping filtering for these posts.",
constraint_name
)
return {
"new": [],
"duplicate": [],
"ignored": []
}
first_pass_result = collections.defaultdict(list)
second_pass_result = collections.defaultdict(list)
third_pass_result = collections.defaultdict(list)
# Do a first pass with the available infos to try to remove as much
# unwanted postings as possible
if config["passes"] > 0:
first_pass_result = flatisfy.filters.first_pass(flats_list,
constraint,
config)
else:
first_pass_result["new"] = flats_list
# Load additional infos
if fetch_details:
for i, flat in enumerate(first_pass_result["new"]):
details = fetch.fetch_details(config, flat["id"])
first_pass_result["new"][i] = tools.merge_dicts(flat, details)
# Do a second pass to consolidate all the infos we found and make use of
# additional infos
if config["passes"] > 1:
second_pass_result = flatisfy.filters.second_pass(
first_pass_result["new"], constraint, config
)
else:
second_pass_result["new"] = first_pass_result["new"]
# Do a third pass to deduplicate better
if config["passes"] > 2:
third_pass_result = flatisfy.filters.third_pass(
second_pass_result["new"],
config
)
else:
third_pass_result["new"] = second_pass_result["new"]
return {
"new": third_pass_result["new"],
"duplicate": (
first_pass_result["duplicate"] +
second_pass_result["duplicate"] +
third_pass_result["duplicate"]
),
"ignored": (
first_pass_result["ignored"] +
second_pass_result["ignored"] +
third_pass_result["ignored"]
)
}
def filter_fetched_flats(config, fetched_flats, fetch_details=True):
"""
Filter the available flats list. Then, filter it according to criteria.
:param config: A config dict.
:param fetch_details: Whether additional details should be fetched between
the two passes.
:param fetched_flats: The initial dict mapping constraints to the list of
fetched flat objects to filter.
:return: A dict mapping constraints to a dict mapping flat status and list
of flat objects.
"""
for constraint_name, flats_list in fetched_flats.items():
fetched_flats[constraint_name] = filter_flats_list(
config,
constraint_name,
flats_list,
fetch_details
)
return fetched_flats
def import_and_filter(config, load_from_db=False):
"""
Fetch the available flats list. Then, filter it according to criteria.
Finally, store it in the database.
:param config: A config dict.
:param load_from_db: Whether to load flats from database or fetch them
using WebOOB.
:return: ``None``.
"""
# Fetch and filter flats list
if load_from_db:
fetched_flats = fetch.load_flats_from_db(config)
else:
fetched_flats = fetch.fetch_flats(config)
# Do not fetch additional details if we loaded data from the db.
flats_by_status = filter_fetched_flats(config, fetched_flats=fetched_flats,
fetch_details=(not load_from_db))
# Create database connection
get_session = database.init_db(config["database"], config["search_index"])
new_flats = []
LOGGER.info("Merging fetched flats in database...")
# Flatten the flats_by_status dict
flatten_flats_by_status = collections.defaultdict(list)
for flats in flats_by_status.values():
for status, flats_list in flats.items():
flatten_flats_by_status[status].extend(flats_list)
with get_session() as session:
# Set is_expired to true for all existing flats.
# This will be set back to false if we find them during importing.
for flat in session.query(flat_model.Flat).all():
flat.is_expired = True;
for status, flats_list in flatten_flats_by_status.items():
# Build SQLAlchemy Flat model objects for every available flat
flats_objects = {
flat_dict["id"]: flat_model.Flat.from_dict(flat_dict)
for flat_dict in flats_list
}
if flats_objects:
# If there are some flats, try to merge them with the ones in
# db
existing_flats_queries = session.query(flat_model.Flat).filter(
flat_model.Flat.id.in_(flats_objects.keys())
)
for each in existing_flats_queries.all():
# For each flat to merge, take care not to overwrite the
# status if the user defined it
flat_object = flats_objects[each.id]
if each.status in flat_model.AUTOMATED_STATUSES:
flat_object.status = getattr(
flat_model.FlatStatus, status
)
else:
flat_object.status = each.status
# Every flat we fetched isn't expired
flat_object.is_expired = False
# For each flat already in the db, merge it (UPDATE)
# instead of adding it
session.merge(flats_objects.pop(each.id))
# For any other flat, it is not already in the database, so we can
# just set the status field without worrying
for flat in flats_objects.values():
flat.status = getattr(flat_model.FlatStatus, status)
if flat.status == flat_model.FlatStatus.new:
new_flats.append(flat)
session.add_all(flats_objects.values())
if config["send_email"]:
email.send_notification(config, new_flats)
# Touch a file to indicate last update timestamp
ts_file = os.path.join(
config["data_directory"],
"timestamp"
)
with open(ts_file, 'w'):
os.utime(ts_file, None)
LOGGER.info("Done!")
def purge_db(config):
"""
Purge the database.
:param config: A config dict.
:return: ``None``
"""
get_session = database.init_db(config["database"], config["search_index"])
with get_session() as session:
# Delete every flat in the db
LOGGER.info("Purge all flats from the database.")
for flat in session.query(flat_model.Flat).all():
# Use (slower) deletion by object, to ensure whoosh index is
# updated
session.delete(flat)
LOGGER.info("Purge all postal codes from the database.")
session.query(postal_code_model.PostalCode).delete()
LOGGER.info("Purge all public transportations from the database.")
session.query(public_transport_model.PublicTransport).delete()
def serve(config):
"""
Serve the web app.
:param config: A config dict.
:return: ``None``, long-running process.
"""
app = web_app.get_app(config)
server = config.get("webserver", None)
if not server:
# Default webserver is quiet, as Bottle is used with Canister for
# standard logging
server = web_app.QuietWSGIRefServer
app.run(host=config["host"], port=config["port"], server=server)
| 2.390625 | 2 |
individuals/arantza/03_language_replacement/paragraph_3.py | periode/software-art-text | 2 | 12771352 | <reponame>periode/software-art-text
#### import modules ####
from textblob import TextBlob
import random
import re
#### import twitter database into program ####
word_database = []
with open('final_dataset.txt') as my_file:
word_database = my_file.read().split()
#### join and process words ####
database_sentence = " ".join(word_database)
new_database_sentence = re.sub(r'http\S+', '', database_sentence)
processed_database_sentence = TextBlob(new_database_sentence)
#### my_grammar directory ####
my_grammar = {
"S": ["NP VP NP"],
"NP": ["N", "det N", "det N"],
"det": ["the J"],
"VP": ["V", "can V"],
"N": [""],
"V": [""],
"J": [""]
}
#### append words into dictionary according to their type ####
for word, tag in processed_database_sentence.tags:
## nouns ##
if tag == "NN":
my_grammar['N'].append(word)
## verbs ##
elif tag == 'VB':
my_grammar['V'].append(word)
## adjectives ##
elif tag == 'JJ':
my_grammar['J'].append(word)
#### function for writing a sentence ####
def write_a_sentence(grammar, axiom):
sentence = list()
if axiom in grammar:
expansion = random.choice(grammar[axiom])
for token in expansion.split():
sentence.extend(write_a_sentence(grammar, token))
else:
sentence.append(axiom)
return sentence
#### outputing the final poem ####
for i in range(0, 2):
words_1 = write_a_sentence(my_grammar, "NP")
my_sentence_1 = " ".join(words_1)
print my_sentence_1 + "."
for i in range(0, 5):
words_2 = write_a_sentence(my_grammar, 'S')
my_sentence_2 = " ".join(words_2)
print my_sentence_2 + "." | 3.125 | 3 |
tutorial/relationship/many_to_many.py | aebrahim/FastAPIQuickCRUD | 0 | 12771353 | import uvicorn
from fastapi import FastAPI, Depends
from sqlalchemy.orm import declarative_base, sessionmaker
from fastapi_quickcrud import CrudMethods
from fastapi_quickcrud import crud_router_builder
from fastapi_quickcrud import sqlalchemy_to_pydantic
from fastapi_quickcrud.misc.memory_sql import sync_memory_db
app = FastAPI()
Base = declarative_base()
metadata = Base.metadata
from sqlalchemy import CHAR, Column, ForeignKey, Integer, Table
from sqlalchemy.orm import relationship
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
metadata = Base.metadata
association_table = Table('association', Base.metadata,
Column('left_id', ForeignKey('left.id')),
Column('right_id', ForeignKey('right.id'))
)
class Parent(Base):
__tablename__ = 'left'
id = Column(Integer, primary_key=True)
children = relationship("Child",
secondary=association_table)
class Child(Base):
__tablename__ = 'right'
id = Column(Integer, primary_key=True)
name = Column(CHAR, nullable=True)
user_model_m2m = sqlalchemy_to_pydantic(db_model=association_table,
crud_methods=[
CrudMethods.FIND_MANY,
CrudMethods.UPSERT_ONE,
CrudMethods.UPDATE_MANY,
CrudMethods.DELETE_MANY,
CrudMethods.PATCH_MANY,
],
exclude_columns=[])
user_model_set = sqlalchemy_to_pydantic(db_model=Parent,
crud_methods=[
CrudMethods.FIND_MANY,
CrudMethods.FIND_ONE,
CrudMethods.CREATE_ONE,
CrudMethods.UPDATE_MANY,
CrudMethods.UPDATE_ONE,
CrudMethods.DELETE_ONE,
CrudMethods.DELETE_MANY,
CrudMethods.PATCH_MANY,
],
exclude_columns=[])
friend_model_set = sqlalchemy_to_pydantic(db_model=Child,
crud_methods=[
CrudMethods.FIND_MANY,
CrudMethods.UPSERT_MANY,
CrudMethods.UPDATE_MANY,
CrudMethods.DELETE_MANY,
CrudMethods.CREATE_ONE,
CrudMethods.PATCH_MANY,
],
exclude_columns=[])
crud_route_1 = crud_router_builder(crud_models=user_model_set,
db_model=Parent,
prefix="/Parent",
dependencies=[],
async_mode=True,
tags=["Parent"]
)
crud_route_3 = crud_router_builder(crud_models=user_model_m2m,
db_model=association_table,
prefix="/Parent2child",
dependencies=[],
async_mode=True,
tags=["m2m"]
)
crud_route_2 = crud_router_builder(crud_models=friend_model_set,
db_model=Child,
async_mode=True,
prefix="/Child",
dependencies=[],
tags=["Child"]
)
post_model = friend_model_set.POST[CrudMethods.CREATE_ONE]
sync_memory_db.create_memory_table(Child)
@app.post("/hello",
status_code=201,
tags=["Child"],
response_model=post_model.responseModel,
dependencies=[])
async def my_api(
body: post_model.requestBodyModel = Depends(post_model.requestBodyModel),
session=Depends(sync_memory_db.get_memory_db_session)
):
db_item = Child(**body.__dict__)
session.add(db_item)
session.commit()
session.refresh(db_item)
return db_item.__dict__
app.include_router(crud_route_1)
app.include_router(crud_route_2)
app.include_router(crud_route_3)
uvicorn.run(app, host="0.0.0.0", port=8000, debug=False)
| 2.234375 | 2 |
aliyun-python-sdk-videoenhan/aliyunsdkvideoenhan/request/v20200320/ChangeVideoSizeRequest.py | yndu13/aliyun-openapi-python-sdk | 1,001 | 12771354 | <filename>aliyun-python-sdk-videoenhan/aliyunsdkvideoenhan/request/v20200320/ChangeVideoSizeRequest.py<gh_stars>1000+
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkvideoenhan.endpoint import endpoint_data
class ChangeVideoSizeRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'videoenhan', '2020-03-20', 'ChangeVideoSize','videoenhan')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_Height(self):
return self.get_body_params().get('Height')
def set_Height(self,Height):
self.add_body_params('Height', Height)
def get_B(self):
return self.get_body_params().get('B')
def set_B(self,B):
self.add_body_params('B', B)
def get_FillType(self):
return self.get_body_params().get('FillType')
def set_FillType(self,FillType):
self.add_body_params('FillType', FillType)
def get_G(self):
return self.get_body_params().get('G')
def set_G(self,G):
self.add_body_params('G', G)
def get_CropType(self):
return self.get_body_params().get('CropType')
def set_CropType(self,CropType):
self.add_body_params('CropType', CropType)
def get_R(self):
return self.get_body_params().get('R')
def set_R(self,R):
self.add_body_params('R', R)
def get_VideoUrl(self):
return self.get_body_params().get('VideoUrl')
def set_VideoUrl(self,VideoUrl):
self.add_body_params('VideoUrl', VideoUrl)
def get_Width(self):
return self.get_body_params().get('Width')
def set_Width(self,Width):
self.add_body_params('Width', Width)
def get_Tightness(self):
return self.get_body_params().get('Tightness')
def set_Tightness(self,Tightness):
self.add_body_params('Tightness', Tightness) | 1.9375 | 2 |
tests/r/test_bio_chemists.py | hajime9652/observations | 199 | 12771355 | <reponame>hajime9652/observations<gh_stars>100-1000
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import shutil
import sys
import tempfile
from observations.r.bio_chemists import bio_chemists
def test_bio_chemists():
"""Test module bio_chemists.py by downloading
bio_chemists.csv and testing shape of
extracted data has 915 rows and 6 columns
"""
test_path = tempfile.mkdtemp()
x_train, metadata = bio_chemists(test_path)
try:
assert x_train.shape == (915, 6)
except:
shutil.rmtree(test_path)
raise()
| 2.40625 | 2 |
src/relstorage/adapters/tests/test_batch.py | enfold/relstorage | 40 | 12771356 | ##############################################################################
#
# Copyright (c) 2009 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
from relstorage.tests import TestCase
from relstorage.tests import MockCursor
from relstorage._util import consume
class RowBatcherTests(TestCase):
def getClass(self):
from relstorage.adapters.batch import RowBatcher
return RowBatcher
def test_delete_defer(self):
cursor = MockCursor()
batcher = self.getClass()(cursor)
batcher.delete_from("mytable", id=2)
self.assertEqual(cursor.executed, [])
self.assertEqual(batcher.rows_added, 1)
self.assertEqual(batcher.size_added, 0)
self.assertEqual(batcher.total_rows_inserted, 0)
self.assertEqual(batcher.total_rows_deleted, 0)
self.assertEqual(batcher.total_size_inserted, 0)
self.assertEqual(dict(batcher.deletes),
{('mytable', ('id',)): set([(2,)])})
def test_delete_multiple_column(self):
cursor = MockCursor()
batcher = self.getClass()(cursor)
batcher.delete_from("mytable", id=2, tid=10)
self.assertEqual(cursor.executed, [])
self.assertEqual(batcher.rows_added, 1)
self.assertEqual(batcher.size_added, 0)
self.assertEqual(dict(batcher.deletes),
{('mytable', ('id', 'tid')): set([(2, 10)])})
IN_ROWS_FLATTENED = False
delete_auto_flush = 'DELETE FROM mytable WHERE id IN (%s,%s)'
update_set_static_stmt = 'UPDATE pack_object SET foo=1 WHERE zoid IN (%s,%s)'
def _in(self, *params, **kw):
do_sort = kw.pop("do_sort", True)
assert not kw
params = sorted(params) if do_sort else params
if self.IN_ROWS_FLATTENED:
l = list(params)
return (l,)
return tuple(params)
def test_delete_auto_flush(self):
cursor = MockCursor()
cursor.sort_sequence_params = True
batcher = self.getClass()(cursor, 2)
batcher.sorted_deletes = True
batcher.delete_from("mytable", id=2)
batcher.delete_from("mytable", id=1)
self.assertEqual(
cursor.executed,
[
(self.delete_auto_flush,
self._in(1, 2)
)
])
self.assertEqual(batcher.rows_added, 0)
self.assertEqual(batcher.size_added, 0)
self.assertEqual(batcher.deletes, {})
self.assertEqual(batcher.total_rows_inserted, 0)
self.assertEqual(batcher.total_rows_deleted, 2)
self.assertEqual(batcher.total_size_inserted, 0)
def test_update_set_static(self):
cursor = MockCursor()
batcher = self.getClass()(cursor, 2)
cnt = batcher.update_set_static(
'UPDATE pack_object SET foo=1',
zoid=iter((1, 2, 3, 4, 5, 6, 7))
)
self.assertEqual(cnt, 7)
self.assertEqual(
cursor.executed,
[
(self.update_set_static_stmt,
self._in(2, 1, do_sort=False)
),
(self.update_set_static_stmt,
self._in(4, 3, do_sort=False)
),
(self.update_set_static_stmt,
self._in(6, 5, do_sort=False)
),
(self.update_set_static_stmt.replace(',%s', ''),
self._in(7)
),
])
maxDiff = None
def test_insert_defer(self):
cursor = MockCursor()
batcher = self.getClass()(cursor)
batcher.insert_into(
"mytable (id, name)",
"%s, id || %s",
(1, 'a'),
rowkey=1,
size=3,
)
self.assertEqual(cursor.executed, [])
self.assertEqual(batcher.rows_added, 1)
self.assertEqual(batcher.size_added, 3)
self.assertEqual(batcher.inserts, {
('INSERT', 'mytable (id, name)', '%s, id || %s', ''): {1: (1, 'a')}
})
self.assertEqual(batcher.total_rows_inserted, 0)
self.assertEqual(batcher.total_rows_deleted, 0)
self.assertEqual(batcher.total_size_inserted, 0)
def test_insert_defer_multi_table(self):
cursor = MockCursor()
batcher = self.getClass()(cursor)
batcher.insert_into(
"mytable (id, name)",
"%s, id || %s",
(1, 'a'),
rowkey=1,
size=3,
)
batcher.insert_into(
"othertable (name)",
"?",
('a'),
rowkey=1,
size=1,
)
self.assertEqual(cursor.executed, [])
self.assertEqual(batcher.rows_added, 2)
self.assertEqual(batcher.size_added, 4)
self.assertEqual(dict(batcher.inserts), {
('INSERT', 'mytable (id, name)', '%s, id || %s', ''): {1: (1, 'a')},
('INSERT', 'othertable (name)', '?', ''): {1: ('a')},
})
self.assertEqual(batcher.total_rows_inserted, 0)
self.assertEqual(batcher.total_rows_deleted, 0)
self.assertEqual(batcher.total_size_inserted, 0)
def test_insert_replace(self):
cursor = MockCursor()
batcher = self.getClass()(cursor)
batcher.insert_into(
"mytable (id, name)",
"%s, id || %s",
(1, 'a'),
rowkey=1,
size=3,
command='REPLACE',
)
self.assertEqual(cursor.executed, [])
self.assertEqual(batcher.rows_added, 1)
self.assertEqual(batcher.size_added, 3)
self.assertEqual(batcher.inserts, {
('REPLACE', 'mytable (id, name)', '%s, id || %s', ''): {1: (1, 'a')}
})
def test_insert_duplicate(self):
# A second insert on the same rowkey replaces the first insert.
cursor = MockCursor()
batcher = self.getClass()(cursor)
batcher.insert_into(
"mytable (id, name)",
"%s, id || %s",
(1, 'a'),
rowkey=1,
size=3,
)
batcher.insert_into(
"mytable (id, name)",
"%s, id || %s",
(1, 'b'),
rowkey=1,
size=3,
)
self.assertEqual(cursor.executed, [])
self.assertEqual(batcher.rows_added, 2)
self.assertEqual(batcher.size_added, 6)
self.assertEqual(batcher.inserts, {
('INSERT', 'mytable (id, name)', '%s, id || %s', ''): {1: (1, 'b')}
})
def test_insert_auto_flush(self):
cursor = MockCursor()
batcher = self.getClass()(cursor)
batcher.size_limit = 10
batcher.insert_into(
"mytable (id, name)",
"%s, id || %s",
(1, 'a'),
rowkey=1,
size=5,
)
batcher.insert_into(
"mytable (id, name)",
"%s, id || %s",
(2, 'B'),
rowkey=2,
size=5,
)
self.assertEqual(
cursor.executed,
[(
'INSERT INTO mytable (id, name) VALUES\n'
'(%s, id || %s), '
'(%s, id || %s)\n',
(1, 'a', 2, 'B'))
])
self.assertEqual(batcher.rows_added, 0)
self.assertEqual(batcher.size_added, 0)
self.assertEqual(batcher.inserts, {})
self.assertEqual(batcher.total_rows_inserted, 2)
self.assertEqual(batcher.total_rows_deleted, 0)
self.assertEqual(batcher.total_size_inserted, 10)
def test_insert_auto_flush_multi_table(self):
cursor = MockCursor()
batcher = self.getClass()(cursor)
batcher.size_limit = 10
batcher.insert_into(
"mytable (id, name)",
"%s, id || %s",
(1, 'a'),
rowkey=1,
size=5,
)
batcher.insert_into(
"mytable (id, name)",
"%s, id || %s",
(2, 'B'),
rowkey=2,
size=5,
)
self.assertLength(cursor.executed, 1)
self.assertEqual(
cursor.executed[0][0],
'INSERT INTO mytable (id, name) VALUES\n'
'(%s, id || %s), '
'(%s, id || %s)\n')
self.assertEqual(
cursor.executed[0][1],
(1, 'a', 2, 'B')
)
self.assertEqual(batcher.rows_added, 0)
self.assertEqual(batcher.size_added, 0)
self.assertEqual(batcher.inserts, {})
self.assertEqual(batcher.total_rows_inserted, 2)
self.assertEqual(batcher.total_rows_deleted, 0)
self.assertEqual(batcher.total_size_inserted, 10)
flush_delete_one = 'DELETE FROM mytable WHERE id IN (?)'
def test_flush(self):
cursor = MockCursor()
batcher = self.getClass()(cursor, delete_placeholder="?")
# Make sure we preserve order in multi-column
batcher.sorted_deletes = True
batcher.delete_from("mytable", id=1)
batcher.insert_into(
"mytable (id, name)",
"%s, id || %s",
(1, 'a'),
rowkey=1,
size=5,
)
batcher.delete_from("mytable", id=1, key='abc')
batcher.delete_from("mytable", id=2, key='def')
batcher.flush()
self.assertEqual(cursor.executed, [
(self.flush_delete_one,
self._in(1)),
('DELETE FROM mytable WHERE (id=? AND key=?) OR (id=? AND key=?)',
(1, 'abc', 2, 'def')),
('INSERT INTO mytable (id, name) VALUES\n(%s, id || %s)\n',
(1, 'a')),
])
select_one = 'SELECT zoid,tid FROM object_state WHERE oids IN (%s)'
def test_select_one(self):
cursor = MockCursor()
batcher = self.getClass()(cursor)
consume(batcher.select_from(('zoid', 'tid'), 'object_state', oids=(1,)))
self.assertEqual(cursor.executed, [
(self.select_one,
self._in(1,))
])
select_multiple_one_batch = 'SELECT zoid,tid FROM object_state WHERE oids IN (%s,%s,%s,%s)'
def test_select_multiple_one_batch(self):
cursor = MockCursor()
cursor.sort_sequence_params = True
batcher = self.getClass()(cursor)
list(batcher.select_from(('zoid', 'tid'), 'object_state',
oids=(1, 2, 3, 4)))
self.assertEqual(cursor.executed, [
(self.select_multiple_one_batch,
self._in(1, 2, 3, 4))
])
select_multiple_many_batch = 'SELECT zoid,tid FROM object_state WHERE oids IN (%s,%s)'
def test_select_multiple_many_batch(self, batch_limit_attr='row_limit'):
cursor = MockCursor()
cursor.sort_sequence_params = True
cursor.many_results = [
[(1, 1)],
[(3, 1)],
[]
]
batcher = self.getClass()(cursor)
setattr(batcher, batch_limit_attr, 2)
rows = batcher.select_from(('zoid', 'tid'), 'object_state',
oids=iter((1, 2, 3, 4, 5)))
rows = list(rows)
self.assertEqual(cursor.executed, [
(self.select_multiple_many_batch,
self._in(1, 2)),
(self.select_multiple_many_batch,
self._in(3, 4)),
(self.select_one,
self._in(5)),
])
self.assertEqual(rows, [
(1, 1),
(3, 1)
])
def test_select_multiple_many_batch_bind_limit(self):
self.test_select_multiple_many_batch(batch_limit_attr='bind_limit')
def test_select_from_timeout(self):
from relstorage.tests import mock
from relstorage.adapters.interfaces import AggregateOperationTimeoutError
cursor = MockCursor()
cursor.sort_sequence_params = True
cursor.many_results = [
[(1, 1)],
[(2, 1)],
[(3, 1)],
[]
]
batcher = self.getClass()(cursor)
batcher.bind_limit = 1
batcher.perf_counter = mock.Mock()
# These will be the time values returned from perf_counter()
batcher.perf_counter.side_effect = (
12345, # Begin
12346, # First batch
12347, # Second batch
)
gener = batcher.select_from(('zoid', 'tid',), 'object_state',
timeout=2,
oids=[1, 2, 3, 4, 5])
rows = []
with self.assertRaises(AggregateOperationTimeoutError):
for row in gener:
rows.append(row)
# We ran exactly twice before the perf_counter exceeded the timeout.
self.assertEqual(rows, [
(1, 1),
(2, 1),
])
class OracleRowBatcherTests(TestCase):
def getClass(self):
from relstorage.adapters.oracle.batch import OracleRowBatcher
return OracleRowBatcher
def test_insert_one_row(self):
cursor = MockCursor()
batcher = self.getClass()(cursor, {})
batcher.insert_into(
"mytable (id, name)",
"%s, id || %s",
(1, 'a'),
rowkey=1,
size=3,
)
self.assertEqual(cursor.executed, [])
batcher.flush()
self.assertEqual(cursor.executed, [
('INSERT INTO mytable (id, name) VALUES (%s, id || %s)', (1, 'a')),
])
def test_insert_two_rows(self):
cursor = MockCursor()
batcher = self.getClass()(cursor, {})
batcher.insert_into(
"mytable (id, name)",
":id, :id || :name",
{'id': 1, 'name': 'a'},
rowkey=1,
size=3,
)
batcher.insert_into(
"mytable (id, name)",
":id, :id || :name",
{'id': 2, 'name': 'b'},
rowkey=2,
size=3,
)
self.assertEqual(cursor.executed, [])
batcher.flush()
self.assertEqual(
cursor.executed,
[(
'INSERT ALL\n'
'INTO mytable (id, name) VALUES (:id_0, :id_0 || :name_0)\n'
'INTO mytable (id, name) VALUES (:id_1, :id_1 || :name_1)\n'
'SELECT * FROM DUAL',
{'id_0': 1, 'id_1': 2, 'name_1': 'b', 'name_0': 'a'})
])
def test_insert_one_raw_row(self):
class MockRawType(object):
pass
cursor = MockCursor()
batcher = self.getClass()(cursor, {'rawdata': MockRawType})
batcher.insert_into(
"mytable (id, data)",
":id, :rawdata",
{'id': 1, 'rawdata': 'xyz'},
rowkey=1,
size=3,
)
batcher.flush()
self.assertEqual(cursor.executed, [
('INSERT INTO mytable (id, data) VALUES (:id, :rawdata)',
{'id': 1, 'rawdata': 'xyz'})
])
self.assertEqual(cursor.inputsizes, {'rawdata': MockRawType})
def test_insert_two_raw_rows(self):
class MockRawType(object):
pass
cursor = MockCursor()
batcher = self.getClass()(cursor, {'rawdata': MockRawType})
batcher.insert_into(
"mytable (id, data)",
":id, :rawdata",
{'id': 1, 'rawdata': 'xyz'},
rowkey=1,
size=3,
)
batcher.insert_into(
"mytable (id, data)",
":id, :rawdata",
{'id': 2, 'rawdata': 'abc'},
rowkey=2,
size=3,
)
batcher.flush()
self.assertEqual(
cursor.executed,
[(
'INSERT ALL\n'
'INTO mytable (id, data) VALUES (:id_0, :rawdata_0)\n'
'INTO mytable (id, data) VALUES (:id_1, :rawdata_1)\n'
'SELECT * FROM DUAL',
{'id_0': 1, 'id_1': 2, 'rawdata_0': 'xyz', 'rawdata_1': 'abc'})
])
self.assertEqual(cursor.inputsizes, {
'rawdata_0': MockRawType,
'rawdata_1': MockRawType,
})
class PostgreSQLRowBatcherTests(RowBatcherTests):
def getClass(self):
from relstorage.adapters.postgresql.batch import PostgreSQLRowBatcher
return PostgreSQLRowBatcher
IN_ROWS_FLATTENED = True
delete_auto_flush = 'DELETE FROM mytable WHERE id = ANY (%s)'
flush_delete_one = 'DELETE FROM mytable WHERE id = ANY (?)'
select_one = 'SELECT zoid,tid FROM object_state WHERE oids = ANY (%s)'
select_multiple_one_batch = 'SELECT zoid,tid FROM object_state WHERE oids = ANY (%s)'
select_multiple_many_batch = 'SELECT zoid,tid FROM object_state WHERE oids = ANY (%s)'
update_set_static_stmt = 'UPDATE pack_object SET foo=1 WHERE zoid = ANY (%s)'
| 2.125 | 2 |
sasmodels/models/two_power_law.py | jmborr/sasmodels | 0 | 12771357 | r"""
Definition
----------
The scattering intensity $I(q)$ is calculated as
.. math::
I(q) = \begin{cases}
A q^{-m1} + \text{background} & q <= q_c \\
C q^{-m2} + \text{background} & q > q_c
\end{cases}
where $q_c$ = the location of the crossover from one slope to the other,
$A$ = the scaling coefficent that sets the overall intensity of the lower Q
power law region, $m1$ = power law exponent at low Q, and $m2$ = power law
exponent at high Q. The scaling of the second power law region (coefficent C)
is then automatically scaled to match the first by following formula:
.. math::
C = \frac{A q_c^{m2}}{q_c^{m1}}
.. note::
Be sure to enter the power law exponents as positive values!
For 2D data the scattering intensity is calculated in the same way as 1D,
where the $q$ vector is defined as
.. math::
q = \sqrt{q_x^2 + q_y^2}
References
----------
None.
**Author:** NIST IGOR/DANSE **on:** pre 2010
**Last Modified by:** <NAME> **on:** February 18, 2016
**Last Reviewed by:** <NAME> **on:** March 21, 2016
"""
from numpy import inf, power, empty, errstate
name = "two_power_law"
title = "This model calculates an empirical functional form for SAS data \
characterized by two power laws."
description = """
I(q) = coef_A*pow(qval,-1.0*power1) + background for q<=q_c
=C*pow(qval,-1.0*power2) + background for q>q_c
where C=coef_A*pow(q_c,-1.0*power1)/pow(q_c,-1.0*power2).
coef_A = scaling coefficent
q_c = crossover location [1/A]
power_1 (=m1) = power law exponent at low Q
power_2 (=m2) = power law exponent at high Q
background = Incoherent background [1/cm]
"""
category = "shape-independent"
# pylint: disable=bad-whitespace, line-too-long
# ["name", "units", default, [lower, upper], "type", "description"],
parameters = [
["coefficent_1", "", 1.0, [-inf, inf], "", "coefficent A in low Q region"],
["crossover", "1/Ang", 0.04,[0, inf], "", "crossover location"],
["power_1", "", 1.0, [0, inf], "", "power law exponent at low Q"],
["power_2", "", 4.0, [0, inf], "", "power law exponent at high Q"],
]
# pylint: enable=bad-whitespace, line-too-long
def Iq(q,
coefficent_1=1.0,
crossover=0.04,
power_1=1.0,
power_2=4.0,
):
"""
:param q: Input q-value (float or [float, float])
:param coefficent_1: Scaling coefficent at low Q
:param crossover: Crossover location
:param power_1: Exponent of power law function at low Q
:param power_2: Exponent of power law function at high Q
:return: Calculated intensity
"""
result= empty(q.shape, 'd')
index = (q <= crossover)
with errstate(divide='ignore'):
coefficent_2 = coefficent_1 * power(crossover, power_2 - power_1)
result[index] = coefficent_1 * power(q[index], -power_1)
result[~index] = coefficent_2 * power(q[~index], -power_2)
return result
Iq.vectorized = True # Iq accepts an array of q values
demo = dict(scale=1, background=0.0,
coefficent_1=1.0,
crossover=0.04,
power_1=1.0,
power_2=4.0)
tests = [
# Accuracy tests based on content in test/utest_extra_models.py
[{'coefficent_1': 1.0,
'crossover': 0.04,
'power_1': 1.0,
'power_2': 4.0,
'background': 0.0,
}, 0.001, 1000],
[{'coefficent_1': 1.0,
'crossover': 0.04,
'power_1': 1.0,
'power_2': 4.0,
'background': 0.0,
}, 0.150141, 0.125945],
[{'coefficent_1': 1.0,
'crossover': 0.04,
'power_1': 1.0,
'power_2': 4.0,
'background': 0.0,
}, 0.442528, 0.00166884],
[{'coefficent_1': 1.0,
'crossover': 0.04,
'power_1': 1.0,
'power_2': 4.0,
'background': 0.0,
}, (0.442528, 0.00166884), 0.00166884],
]
| 2.6875 | 3 |
012.py | Mytho/project-euler-python | 1 | 12771358 | <reponame>Mytho/project-euler-python
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
PROBLEM 12
----------
The sequence of triangle numbers is generated by adding the natural numbers.
So the 7th triangle number would be 1 + 2 + 3 + 4 + 5 + 6 + 7 = 28. The
first ten terms would be:
1, 3, 6, 10, 15, 21, 28, 36, 45, 55, ...
Let us list the factors of the first seven triangle numbers:
1: 1
3: 1,3
6: 1,2,3,6
10: 1,2,5,10
15: 1,3,5,15
21: 1,3,7,21
28: 1,2,4,7,14,28
We can see that 28 is the first triangle number to have over five divisors.
What is the value of the first triangle number to have over five hundred
divisors?
Copyright (c) Project Euler
See: http://projecteuler.net/copyright
SOLUTION
--------
76576500
Copyright (c) 2012, <NAME>
See: https://raw.github.com/Mytho/project-euler-python/master/LISENCE
"""
from profiler import Profiler
from math import floor
def divisors(n):
"""Get a list of all divisors of N."""
d, r = [], int(floor(n**.5))
for i in range(1, r):
if not n % i:
d.extend([i, n / i])
if not n**.5 % 1:
d.append(n)
return d
def find_triangle(n):
"""Find the first triangle number with N divisors."""
t, i = 1, 1
while True:
i += 1
t += i
if len(divisors(t)) > n:
return t
if __name__ == "__main__":
print(find_triangle(500))
Profiler.report()
| 3.90625 | 4 |
venv/Lib/site-packages/flask_administration/blueprints.py | DoesArt-Studios/RamBrowse | 1 | 12771359 | <gh_stars>1-10
from flask import jsonify, Blueprint, request, Response, render_template
from .utils import (static_folder, template_folder, encode_model)
admin = Blueprint('main',
'flask.ext.administration.main',
static_folder=static_folder,
template_folder=template_folder)
| 1.679688 | 2 |
manage.py | eolchina/tna.pyspecies | 0 | 12771360 | #!/usr/bin/env python
#
# manage.py 用于启动程序以及其他的程序任务
import os
# from flask import Flask
# from flask_sqlalchemy import SQLAlchemy
from flask_script import Manager
from flask_migrate import Migrate, MigrateCommand
from app import app, db
from flask_debugtoolbar import DebugToolbarExtension
app.config.from_object(os.environ['APP_SETTINGS'])
migrate = Migrate(app, db)
manager = Manager(app)
manager.add_command('db', MigrateCommand)
# add flask debug toolbar
toolbar = DebugToolbarExtension(app)
if __name__ == '__main__':
manager.run()
| 2.140625 | 2 |
frontends/relay-futil/compiler.py | tedbauer/futil | 1 | 12771361 | <filename>frontends/relay-futil/compiler.py
from tvm import relay, ir
from tvm.relay.expr_functor import ExprFunctor
from tvm.relay.function import Function
import textwrap
from collections import namedtuple, defaultdict
import math
from pretty_print import *
from utilities import *
from futil_ast import *
# Map standard Relay call to respective hardware name in FuTIL.
BuiltInBinaryCalls = {'add': 'add', 'equal': 'eq', 'multiply': 'mult', 'subtract': 'sub'}
EmitResult = namedtuple('EmitResult', ['cells', 'groups'])
class Relay2Futil(ExprFunctor):
"""The main compilation visitor."""
def id(self, name):
"""
Provides unique identification for a given name.
"""
id_number = self.id_dictionary[name]
self.id_dictionary[name] += 1
return name + str(id_number)
def __init__(self):
super(Relay2Futil, self).__init__()
self.id_dictionary = defaultdict(int)
self.main = FComponent(name="main", cells=[], wires=[])
def visit_var(self, var):
name = var.name_hint
type = str(var.type_annotation)
data = [get_bitwidth(type), 1, 1] # [width, size, index_size]
return [FCell(primitive=FPrimitive(name=name, data=data, type=PrimitiveType.Memory1D))]
def visit_let(self, let):
variable = self.visit(let.var)[0]
body = self.visit(let.body)
values = self.visit(let.value)
for value in values:
if not value.is_declaration(): continue
value.declaration.intermediary_output = FCell(
primitive=FPrimitive(name=variable.primitive.name, data=variable.primitive.data,
type=PrimitiveType.Memory1D))
return [body, values]
def visit_constant(self, const):
type = const.data.dtype
shape = const.data.shape
data = [get_bitwidth(type), int(const.data.asnumpy())] # [width, value]
name = self.id("const")
return [FCell(primitive=FPrimitive(name=name, data=data, type=PrimitiveType.Constant))]
def visit_call(self, call):
assert call.op.name in BuiltInBinaryCalls, f'{call.op.name} not supported.'
op = BuiltInBinaryCalls[call.op.name]
args = []
for arg in call.args: args.append(self.visit(arg))
return [build_tensor_0D_binary_op(call, args, op)]
def visit_function(self, function):
fn: FComponent = FComponent(name=self.id("function"), cells=[], wires=[],
signature=FSignature(inputs=[], outputs=[]))
fn.signature.inputs, fn.signature.outputs = extract_function_arguments(function.params)
body = self.visit(function.body)
components = [fn]
for cell in flatten(body):
if cell.is_declaration():
fn.add_cell(cell)
components.append(cell.declaration.component)
elif cell.primitive.type == PrimitiveType.Constant:
# Include constants, but not function arguments.
fn.add_cell(cell)
build_function_body(fn) # Groups, wires, connections.
# Add declaration to main.
self.main.add_cell(FCell(declaration=FDeclaration(name=self.id("fn"), component=fn)))
return '\n'.join(pp_component(c) for c in reversed(components))
def infer_type(expr: Function) -> Function:
infer_types_pass = relay.transform.InferType()
fuse_op__pass = relay.transform.FuseOps()
to_normal_pass = relay.transform.ToANormalForm()
mod = ir.IRModule()
mod['main'] = expr
# mod = fuse_op__pass(mod)
mod = infer_types_pass(mod)
ret = mod['main']
return ret
def compile(program) -> str:
"""Translate a Relay function to a FuTIL program (as a string)."""
program = infer_type(program)
visitor = Relay2Futil()
src = visitor.visit(program)
build_main_body(visitor.main)
PREAMBLE = """import "primitives/std.lib";"""
NEWL = "\n\n"
return f'{PREAMBLE}{NEWL}{src}{NEWL}{pp_component(visitor.main)}'
if __name__ == '__main__':
import sys
relay_func = relay.fromtext(sys.stdin.read())
print(compile(relay_func))
| 2.34375 | 2 |
test/record/parser/test_response_whois_in_ua_status_registered.py | huyphan/pyyawhois | 0 | 12771362 | <reponame>huyphan/pyyawhois<gh_stars>0
# This file is autogenerated. Do not edit it manually.
# If you want change the content of this file, edit
#
# spec/fixtures/responses/whois.in.ua/status_registered
#
# and regenerate the tests with the following script
#
# $ scripts/generate_tests.py
#
from nose.tools import *
from dateutil.parser import parse as time_parse
import yawhois
class TestWhoisInUaStatusRegistered(object):
def setUp(self):
fixture_path = "spec/fixtures/responses/whois.in.ua/status_registered.txt"
host = "whois.in.ua"
part = yawhois.record.Part(open(fixture_path, "r").read(), host)
self.record = yawhois.record.Record(None, [part])
def test_status(self):
eq_(self.record.status, 'registered')
def test_available(self):
eq_(self.record.available, False)
def test_nameservers(self):
eq_(self.record.nameservers.__class__.__name__, 'list')
eq_(len(self.record.nameservers), 3)
eq_(self.record.nameservers[0].__class__.__name__, 'Nameserver')
eq_(self.record.nameservers[0].name, "ns12.uadns.com")
eq_(self.record.nameservers[1].__class__.__name__, 'Nameserver')
eq_(self.record.nameservers[1].name, "ns11.uadns.com")
eq_(self.record.nameservers[2].__class__.__name__, 'Nameserver')
eq_(self.record.nameservers[2].name, "ns10.uadns.com")
def test_registered(self):
eq_(self.record.registered, True)
def test_created_on(self):
assert_raises(yawhois.exceptions.AttributeNotSupported, self.record.created_on)
def test_updated_on(self):
eq_(self.record.updated_on.__class__.__name__, 'datetime')
eq_(self.record.updated_on, time_parse('2012-12-16 13:41:04'))
def test_expires_on(self):
eq_(self.record.expires_on.__class__.__name__, 'datetime')
eq_(self.record.expires_on, time_parse('2013-12-18 00:00:00'))
| 2.078125 | 2 |
phase_2/pkg_KOG/class_hero.py | Jackson-Cong/191130 | 0 | 12771363 | <reponame>Jackson-Cong/191130
# -*- coding: UTF-8 -*-
import random
class Hero():
'''
skin = '英雄原始皮肤'
name = '英雄姓名'
position = '英雄定位'
ab_viability = 0
ab_damage = 0
ab_effect = 0
ab_difficulty = 0
'''
def __init__(self, s, n, p):
'初始化英雄类'
self.skin = s
self.__name = n # 不可更改
self.__position = p # 不可更改
self.ab_viability = random.randint(1, 100)
self.ab_damage = random.randint(1, 100)
self.ab_effect = random.randint(1, 100)
self.ab_difficulty = random.randint(1, 100)
return
@property
def name(self):
return self.__name
@property
def position(self):
return self.__position
def show_story(self):
return
def show_history(self):
return
| 3.28125 | 3 |
src/model.py | talipucar/PyFlow_DomainAdaptation | 1 | 12771364 | <filename>src/model.py
"""
Author: <NAME>: <EMAIL>
Version: 0.2
- Added support for unsupervised and semi-supervised training.
Description: Class to train an Autoencoder using multiple datasets with same number of features and
to align their latent representations. It is configured such that it expects 3 datasets at the moment. However,
extending it to more or less data sources should be trivial.
Two discriminators are used:
I) A discriminator is used to align corresponding clusters across different data sources in the latent space.
Clusters are aligned by using a mixture of Gaussians
II) A discriminator is used to compare reconstructions at the output of Autoencoder and original samples. This
is to improve the quality of reconstructions.
TODO: Making number of datasets being used a flexible choice rather than fixing it to three.
"""
import os
import gc
import random
import itertools
from itertools import cycle
from tqdm import tqdm
import numpy as np
import pandas as pd
from sklearn.utils import shuffle
from utils.utils import set_seed, set_dirs
from utils.loss_functions import get_vae_loss, getMSEloss, get_generator_loss, get_discriminator_loss
from utils.model_plot import save_loss_plot
from utils.model_utils import Autoencoder, Discriminator, Classifier
import torch as th
import torch.nn.functional as F
th.autograd.set_detect_anomaly(True)
class AEModel:
"""
Model: Consists of an Autoencoder together with two Discriminators, one for latent, and one for reconstructions.
Loss function: Reconstruction loss of untrained Autoencoder + either Adversarial losses for two Discriminators.
------------------------------------------------------------
Architecture: X -> Encoder -> z -> Decoder -> X' __ Discriminator
| X _/
|
\_ Discriminator
GMM -> z_prior /
------------------------------------------------------------
Autoencoders can be configured as
- Autoencoder (ae),
- Variational autoencoder (vae),
- Beta-VAE (bvae),
- Adversarial autoencoder (aae).
------------------------------------------------------------
Autoencoder can have a CNN-based architecture, or fully-connected one. Use "convolution=true" in model config file if
CNN-based model should be used.
Dictionary:
clabels = cohort labels, or cluster labels within each domain (data source)
dlabels = domain labels (labels assigned to each data source)
ds = data source, or data set. For example, ds1 = data source 1
ae = AutoEncoder
aae = Advarserial AutoEncoder
disc = discriminator
gen = generator
recon = reconstruction
"""
def __init__(self, options):
"""Class to train an autoencoder model with two discriminator for domain adaptation/tranlation/alignment.
Args:
options (dict): Configuration dictionary.
"""
# Get config
self.options = options
# Define which device to use: GPU, or CPU
self.device = options["device"]
# Create empty lists and dictionary
self.model_dict, self.summary = {}, {}
# Set random seed
set_seed(self.options)
# Set paths for results and Initialize some arrays to collect data during training
self._set_paths()
# Set directories i.e. create ones that are missing.
set_dirs(self.options)
# ------Network---------
# Instantiate networks
print("Building the models for Data Alignment and Translation...")
# Set Autoencoder i.e. setting loss, optimizer, and device assignment (GPU, or CPU)
self.set_autoencoder()
# If supervised, use discriminators
if self.options["adv_training"]:
# Set AEE i.e. setting loss, optimizer, and device assignment (GPU, or CPU)
self.set_aae()
# Assign domains to labels e.g. label: 0, 1, 2 for 3 data sources.
self.set_domain_labels()
# Set scheduler (its use is optional)
self._set_scheduler()
# Print out model architecture
self.print_model_summary()
def set_autoencoder(self):
"""Sets up the autoencoder model, optimizer, and loss"""
# Instantiate the model for the Autoencoder
self.autoencoder = Autoencoder(self.options)
# Add the model and its name to a list to save, and load in the future
self.model_dict.update({"autoencoder": self.autoencoder})
# Assign autoencoder to a device
self.autoencoder.to(self.device)
# Reconstruction loss
self.recon_loss = getMSEloss
# Set optimizer for autoencoder
self.optimizer_ae = self._adam([self.autoencoder.parameters()], lr=self.options["learning_rate"])
# Add items to summary to be used for reporting later
self.summary.update({"recon_loss": [], "kl_loss": []})
def set_aae(self):
"""Sets up the discriminator models, optimizer, and loss"""
# Get number of clusters/cohorts
num_classes = self.options["n_cohorts"] + 1 if self.options["framework"] in ["semi-supervised"] else \
self.options["n_cohorts"]
# Instantiate Discriminators for latent space
self.discriminator_z = Discriminator(self.options, input_dim=self.options["dims"][-1] + num_classes)
# Instantiate Discriminators for data space
self.discriminator_x = Discriminator(self.options,
input_dim=self.options["dims"][0] + self.options["n_domains"])
# Add the model and its name to a list to save, and load in the future
self.model_dict.update({"discriminator_z": self.discriminator_z, "discriminator_x": self.discriminator_x})
# Assign models to the device
_ = [model.to(self.device) for model in [self.discriminator_z, self.discriminator_x]]
# Generator loss
self.gen_loss = get_generator_loss
# Discriminator loss for latent and data space
self.disc_loss = get_discriminator_loss
# Set optimizer for generator for latent space
self.optimizer_gen_z = self._adam([self.autoencoder.encoder.parameters()], lr=1e-3)
# Set optimizer for generator for data space
self.optimizer_gen_x = self._adam([self.autoencoder.decoder.parameters()], lr=1e-3)
# Set optimizer for discriminator of latent space
self.optimizer_disc_z = self._adam([self.discriminator_z.parameters()], lr=1e-5)
# Set optimizer for discriminator of data space
self.optimizer_disc_x = self._adam([self.discriminator_x.parameters()], lr=1e-5)
# Add items to summary to be used for reporting later
self.summary.update({"disc_z_train_acc": [], "disc_z_test_acc": []})
def set_parallelism(self, model):
"""NOT USED - Sets up parallelism in training."""
# If we are using GPU, and if there are multiple GPUs, parallelize training
if th.cuda.is_available() and th.cuda.device_count() > 1:
print(th.cuda.device_count(), " GPUs will be used!")
model = th.nn.DataParallel(model)
return model
def fit(self, data_loaders):
"""Fits model to the data
Args:
data_loaders (list): List of dataloaders for multiple datasets.
"""
# Get data loaders for three datasets
ds1_loader, ds2_loader, ds3_loader = data_loaders
# Placeholders for record batch losses
self.loss = {"rloss_b": [], "rloss_e": [], "kl_loss": [], "vloss_e": [], "aae_loss_z": [], "aae_loss_x": []}
# Turn on training mode for each model.
self.set_mode(mode="training")
# Compute total number of batches per epoch
self.total_batches = len(ds1_loader.train_loader)
# Start joint training of Autoencoder, and/or classifier
for epoch in range(self.options["epochs"]):
# Change learning rate if schedular=True
_ = self.scheduler.step() if self.options["scheduler"] else None
# zip() both data loaders, and cycle the one with smaller dataset to go through all samples of longer one.
zipped_data_loaders = zip(ds1_loader.train_loader, ds2_loader.train_loader, cycle(ds3_loader.train_loader))
# Attach progress bar to data_loader to check it during training. "leave=True" gives a new line per epoch
self.train_tqdm = tqdm(enumerate(zipped_data_loaders), total=self.total_batches, leave=True)
# Go through batches
for i, (ds1_dict, ds2_dict, ds3_dict) in self.train_tqdm:
# Get features, labels in each dataset, and labels assigned to domains
Xdata, labels, dlabels = self.process_batch(ds1_dict, ds2_dict, ds3_dict)
# 0 - Update Autoencoder
self.update_autoencoder(Xdata, dlabels)
if self.options["adv_training"] and self.options["framework"] in ["supervised", "semi-supervised"]:
# Update generator (Encoder) and discriminators in z- and x- space
# 1 - In z-space, update generator and discriminator
self.update_generator_discriminator_z([Xdata, labels, dlabels])
# 2 - Forward pass on the Autoencoder
_, z, _, _ = self.autoencoder([Xdata, dlabels])
# In x-space, update generator and discriminator
self.update_generator_discriminator_x([Xdata, z, dlabels])
# 3 - Shuffle the data and label to update the parameters again to learn translations between domains
Xdata_shuffled, dlabels_shuffled = self.shuffle_tensors([Xdata, dlabels])
# 4 - Update generator and discriminator
self.update_generator_discriminator_x([Xdata_shuffled, z, dlabels_shuffled])
# 5 - Update log message using epoch and batch numbers
self.update_log(epoch, i)
# 6 - Clean-up for efficient memory use.
gc.collect()
# Validate every nth epoch. n=1 by default
if epoch % self.options["nth_epoch"] == 0:
# Compute total number of batches, assuming all test sets have same number of samples
total_val_batches = len(ds1_loader.test_loader)
# Zip all test data loaders
zipped_val_loaders = zip(ds1_loader.test_loader, ds2_loader.test_loader, ds3_loader.test_loader)
# Compute validation loss
_ = self.validate(zipped_val_loaders, total_val_batches)
# Get reconstruction loss for training per epoch
self.loss["rloss_e"].append(sum(self.loss["rloss_b"][-self.total_batches:-1]) / self.total_batches)
# Save plot of training and validation losses
save_loss_plot(self.loss, self._plots_path)
# Convert loss dictionary to a dataframe
loss_df = pd.DataFrame(dict([(k, pd.Series(v)) for k, v in self.loss.items()]))
# Save loss dataframe as csv file for later use
loss_df.to_csv(self._loss_path + "/losses.csv")
def validate(self, validation_loader, total_batches):
"""Computes validation loss.
Args:
validation_loader (): data loader for validation set.
total_batches (int): total number of batches in validation set.
Returns:
float: validation loss
"""
with th.no_grad():
# Initialize validation loss
vloss = 0
# Turn on evaluatin mode
self.set_mode(mode="evaluation")
# Print validation message
print(f"Computing validation loss. #Batches:{total_batches}")
# Attach progress bar to data_loader to check it during training. "leave=True" gives a new line per epoch
val_tqdm = tqdm(enumerate(validation_loader), total=total_batches, leave=True)
# Go through batches
for i, (ds1_dict, ds2_dict, ds3_dict) in val_tqdm:
# Get features, labels in each dataset, and labels assigned to domains
Xdata, labels, dlabels = self.process_batch(ds1_dict, ds2_dict, ds3_dict)
# Prepare input data
input_data = [Xdata, dlabels] if self.options["conditional"] else Xdata
recon, latent, _, _ = self.autoencoder(input_data)
# Record validation loss
val_loss = getMSEloss(recon, Xdata)
# Get validation loss
vloss = vloss + val_loss.item()
# Clean up to avoid memory issues
del val_loss, recon, latent
gc.collect()
# Turn on training mode
self.set_mode(mode="training")
# Compute mean validation loss
vloss = vloss / total_batches
# Record the loss
self.loss["vloss_e"].append(vloss)
# Return mean validation loss
return vloss
def update_autoencoder(self, Xdata, dlabels):
"""Updates autoencoder model.
Args:
Xdata (ndarray): 2D array containing data with float type
dlabels (ndarray): 1D array containing data with int type
"""
# Prepare input data
input_data = [Xdata, dlabels] if self.options["conditional"] else Xdata
# Forward pass on Autoencoder
Xrecon, z, z_mean, z_logvar = self.autoencoder(input_data)
# Compute reconstruction loss
recon_loss = self.recon_loss(Xrecon, Xdata)
# Add KL loss to compute total loss if we are using variational methods
total_loss, kl_loss = get_vae_loss(recon_loss, z_mean, z_logvar, self.options)
# Record reconstruction loss
self.loss["rloss_b"].append(recon_loss.item())
# Record KL loss if we are using variational inference
self.loss["kl_loss"] += [kl_loss.item()] if self.options["model_mode"] in ["vae", "bvae"] else []
# Update Autoencoder params
self._update_model(total_loss, self.optimizer_ae, retain_graph=True)
# Delete loss and associated graph for efficient memory usage
del recon_loss, total_loss, kl_loss, Xrecon, z_mean, z_logvar
def update_generator_discriminator_z(self, data, retain_graph=True):
"""Updates encoder and discriminator used for latent space.
Args:
data (list): List of ndarrays
retain_graph (bool):
Returns:
None
"""
# Get the output dimension of classifier
num_classes = self.options["n_cohorts"]
# Add +1 to num_classes if semi-supervised setting since we have an extra label for unlabeled data points
num_classes = num_classes + 1 if self.options["framework"] in ["semi-supervised"] else num_classes
# Get the data: Xbatch: features, clabels=cohort labels, dlabels=domain labels
Xbatch, clabels, dlabels = data
# Sample real samples based on class proportions
latent_real, labels_real = self.gaussian_mixture(clabels)
latent_real, labels_real = shuffle(latent_real, labels_real)
latent_real = th.from_numpy(latent_real).float().to(self.device)
labels_real = th.from_numpy(labels_real).long().to(self.device)
# Normalize the noise if samples from posterior (i.e. latent variable) is also normalized.
latent_real = F.normalize(latent_real, p=2, dim=1) if self.options["normalize"] else latent_real
# 1)---- Start of Discriminator update: Autoencoder in evaluation mode ------------------------
self.autoencoder.eval()
# Forward pass on Autoencoder
_, latent_fake, _, _ = self.autoencoder([Xbatch, dlabels])
# Turn labels to one-hot encoded form
clabels = self.one_hot_embedding(clabels, num_classes)
labels_real = self.one_hot_embedding(labels_real, num_classes)
# Concatenate cluster labels of data to its corresponding real latent samples (to make it conditional)
latent_real = th.cat((latent_real, labels_real.float().view(-1, num_classes)), dim=1)
# Concatenate cluster labels of data to its corresponding fake latent samples (to make it conditional)
latent_fake = th.cat((latent_fake, clabels.float().view(-1, num_classes)), dim=1)
# Get predictions for real samples
pred_fake = self.discriminator_z(latent_fake.detach())
# Get predictions for fake samples
pred_real = self.discriminator_z(latent_real)
# Compute discriminator loss
disc_loss = self.disc_loss(pred_real, pred_fake)
# Reset optimizer
self.optimizer_disc_z.zero_grad()
# Backward pass
disc_loss.backward(retain_graph=retain_graph)
# Update parameters of discriminator
self.optimizer_disc_z.step()
# 2)---- Start of Generator update: Autoencoder in train mode ------------------------
self.autoencoder.encoder.train()
# Discriminator in evaluation mode
self.discriminator_z.eval()
# Forward pass on Autoencoder
_, latent_fake, _, _ = self.autoencoder([Xbatch, dlabels])
# Concatenate cluster labels of data to its corresponding fake latent samples (to make it conditional)
latent_fake = th.cat((latent_fake, clabels.float().view(-1, num_classes)), dim=1)
# Get predictions for real samples
pred_fake = self.discriminator_z(latent_fake)
# Compute discriminator loss
gen_loss = self.gen_loss(pred_fake)
# Reset optimizer
self.optimizer_gen_z.zero_grad()
# Backward pass
gen_loss.backward(retain_graph=retain_graph)
# Update parameters of discriminator
self.optimizer_gen_z.step()
# Turn training mode back on. Default mode is training
self.set_mode()
# Record losses
self.loss["aae_loss_z"].append([disc_loss.item(), gen_loss.item()])
# Delete losses (graphs) for efficient memory usage
self.clean_up_memory([disc_loss, gen_loss])
def update_generator_discriminator_x(self, data, retain_graph=True):
"""Updates decoder and discriminator used for reconstruction space.
Args:
data (list): List of ndarrays
retain_graph (bool):
Returns:
None
"""
# Get the output dimension of classifier
num_classes = self.options["n_domains"]
# Get the data
Xdata, z, dlabels = data
# Concatenate labels to z to use decoder as conditional decoder
z_cond = th.cat((z, dlabels.float().view(-1, num_classes)), dim=1)
# 1)---- Start of Discriminator update: Autoencoder in evaluation mode
self.autoencoder.eval()
# Forward pass on decoder
with th.no_grad():
Xrecon = self.autoencoder.decoder(z_cond)
# Concatenate labels of image data (repeated 10 times) to its corresponding embedding (i.e. conditional)
real = th.cat((Xdata, dlabels.float().view(-1, num_classes)), dim=1)
# Concatenate domain labels of data to its corresponding reconstructions (to make it conditional)
fake = th.cat((Xrecon, dlabels.float().view(-1, num_classes)), dim=1)
# Get predictions for real samples
pred_fake = self.discriminator_x(fake.detach())
# Get predictions for fake samples
pred_real = self.discriminator_x(real)
# Compute discriminator loss
disc_loss = self.disc_loss(pred_real, pred_fake)
# Reset optimizer
self.optimizer_disc_x.zero_grad()
# Backward pass
disc_loss.backward(retain_graph=retain_graph)
# Update parameters of discriminator
self.optimizer_disc_x.step()
# 2)---- Start of Generator update: Autoencoder in train mode
self.autoencoder.decoder.train()
# Discriminator in evaluation mode
self.discriminator_x.eval()
# Forward pass on Autoencoder
Xrecon = self.autoencoder.decoder(z_cond)
# Concatenate domain labels of data to its corresponding reconstructions (i.e. conditional)
fake = th.cat((Xrecon, dlabels.float().view(-1, num_classes)), dim=1)
# Get predictions for real samples
pred_fake = self.discriminator_x(fake)
# Compute discriminator loss
gen_loss = self.gen_loss(pred_fake)
# Reset optimizer
self.optimizer_gen_x.zero_grad()
# Backward pass
gen_loss.backward(retain_graph=retain_graph)
# Update parameters of discriminator
self.optimizer_gen_x.step()
# Turn training mode back on. Default mode is training
self.set_mode()
# Record losses
self.loss["aae_loss_x"].append([disc_loss.item(), gen_loss.item()])
# Delete losses (graphs) for efficient memory usage
self.clean_up_memory([disc_loss, gen_loss])
def shuffle_tensors(self, data_list):
"""Shuffles rows of tensors
Args:
data_list (list): List of tensors
Returns:
tensor:
"""
# Shuffle input and domain labels to precent clf from learning a trivial solution.
random_indexes = th.randperm(3 * self.options["batch_size"])
# Shuffled data
data_shuffled = [data[random_indexes, :] for data in data_list]
# Return
return data_shuffled
def clean_up_memory(self, losses):
"""Deletes losses with attached graph, and cleans up memory"""
for loss in losses: del loss
gc.collect()
def process_batch(self, ds1_dict, ds2_dict, ds3_dict):
"""Concatenates arrays from different data sources into one, and pushes it to the device"""
# Process the batch i.e. turning it into a tensor
Xds1, Xds2, Xds3 = [d['tensor'].to(self.device) for d in [ds1_dict, ds2_dict, ds3_dict]]
# Get labels
Yds1, Yds2, Yds3 = [d['binary_label'].to(self.device) for d in [ds1_dict, ds2_dict, ds3_dict]]
# Concatenate data from different sources
Xdata, labels = th.cat((Xds1, Xds2, Xds3), dim=0), th.cat((Yds1, Yds2, Yds3), dim=0)
# Get domain labels
dlabels = self.domain_labels
# Return
return Xdata, labels, dlabels
def update_log(self, epoch, batch):
"""Updates the messages displayed during training and evaluation"""
# For the first epoch, add losses for batches since we still don't have loss for the epoch
if epoch < 1:
description = f"Epoch:[{epoch - 1}], Batch:[{batch}], Recon. loss:{self.loss['rloss_b'][-1]:.4f}"
# For sub-sequent epochs, display only epoch losses.
else:
description = f"Epoch:[{epoch - 1}] training loss:{self.loss['rloss_e'][-1]:.4f}, val loss:{self.loss['vloss_e'][-1]:.4f}"
# Add generator and discriminator losses
if self.options["adv_training"] and self.options["framework"] in ["supervised", "semi-supervised"]:
description += f", Disc-Z loss:{self.loss['aae_loss_z'][-1][0]:.4f}, Gen-Z:{self.loss['aae_loss_z'][-1][1]:.4f}"
description += f", Disc-X loss:{self.loss['aae_loss_x'][-1][0]:.4f}, Gen-X:{self.loss['aae_loss_x'][-1][1]:.4f}"
# Update the displayed message
self.train_tqdm.set_description(description)
def set_mode(self, mode="training"):
"""Sets the mode of the models, either as .train(), or .eval()"""
for _, model in self.model_dict.items():
model.train() if mode == "training" else model.eval()
def save_weights(self):
"""Used to save weights."""
for model_name in self.model_dict:
th.save(self.model_dict[model_name], self._model_path + "/" + model_name + ".pt")
print("Done with saving models.")
def load_models(self):
"""Used to load weights saved at the end of the training."""
for model_name in self.model_dict:
model = th.load(self._model_path + "/" + model_name + ".pt", map_location=self.device)
setattr(self, model_name, model.eval())
print(f"--{model_name} is loaded")
print("Done with loading models.")
def print_model_summary(self):
"""Displays model architectures as a sanity check to see if the models are constructed correctly."""
# Summary of the model
description = f"{40 * '-'}Summary of the models (an Autoencoder and two Discriminators):{40 * '-'}\n"
description += f"{34 * '='}{self.options['model_mode'].upper().replace('_', ' ')} Model{34 * '='}\n"
description += f"{self.autoencoder}\n"
# Summary of Discriminator
if self.options["adv_training"]:
description += f"{30 * '='} Discriminator for latent {30 * '='}\n"
description += f"{self.discriminator_z}\n"
description += f"{30 * '='} Discriminator for reconstruction {30 * '='}\n"
description += f"{self.discriminator_x}\n"
# Print model architecture
print(description)
def _update_model(self, loss, optimizer, retain_graph=True):
"""Does backprop, and updates the model parameters
Args:
loss ():
optimizer ():
retain_graph (bool):
Returns:
None
"""
# Reset optimizer
optimizer.zero_grad()
# Backward propagation to compute gradients
loss.backward(retain_graph=retain_graph)
# Update weights
optimizer.step()
def _set_scheduler(self):
"""Sets a scheduler for learning rate of autoencoder"""
# Set scheduler (Its use will be optional)
self.scheduler = th.optim.lr_scheduler.StepLR(self.optimizer_ae, step_size=1, gamma=0.97)
def _set_paths(self):
""" Sets paths to bse used for saving results at the end of the training"""
# Top results directory
self._results_path = os.path.join(self.options["paths"]["results"], self.options["framework"])
# Directory to save model
self._model_path = os.path.join(self._results_path, "training", self.options["model_mode"], "model")
# Directory to save plots as png files
self._plots_path = os.path.join(self._results_path, "training", self.options["model_mode"], "plots")
# Directory to save losses as csv file
self._loss_path = os.path.join(self._results_path, "training", self.options["model_mode"], "loss")
def _adam(self, params, lr=1e-4):
"""Sets up Adam optimizer using model params"""
return th.optim.Adam(itertools.chain(*params), lr=lr, betas=(0.9, 0.999))
def _tensor(self, data):
"""Turns numpy arrays to torch tensors"""
return th.from_numpy(data).to(self.device).float()
def one_hot_embedding(self, labels, num_classes):
"""Converts labels to one-hot encoded form.
Args:
labels (LongTensor): class labels, sized [N,].
num_classes (int): number of classes.
Returns:
None
"""
# Generate Identity matrix
y = th.eye(num_classes)
# Return corresponding one-hot coded labels for each label
return y[labels].to(self.device)
def set_domain_labels(self):
# Assign each domain to a label: domain-1:0, domain-2:1 and so on.
self.domain_labels = []
# i = number that the domain is assigned to.
for i in range(self.options["n_domains"]):
# Repeat each for number of batch size so that we have label for each data point from each domain
self.domain_labels += self.options["batch_size"] * [i]
# Turn labels to torch tensor
self.domain_labels = th.from_numpy(np.array(self.domain_labels))
# Turn them into one-hot embeddings, shape: (3 x batch_size, number of domains)
self.domain_labels = self.one_hot_embedding(self.domain_labels, self.options["n_domains"])
def gaussian_mixture(self, clabels):
"""Samples data from the GMM prior
Args:
clabels (ndarray): 1D array of cluster/class labels
Returns:
ndarray, ndarray: 2D and 1D numpy arrays
"""
batchsize = self.options["n_domains"] * self.options["batch_size"]
ndim = self.options["dims"][-1]
num_clabels = self.options["n_cohorts"]
if ndim % 2 != 0:
raise Exception("ndim must be a multiple of 2.")
x_var = 0.5
y_var = 0.5
x = np.random.normal(0, x_var, (batchsize, ndim // 2))
y = np.random.normal(0, y_var, (batchsize, ndim // 2))
z = np.empty((batchsize, ndim), dtype=np.float32)
for batch in range(batchsize):
for zi in range(ndim // 2):
z[batch, zi * 2:zi * 2 + 2] = self.gm_sample(x[batch, zi], y[batch, zi], clabels[batch], num_clabels)
return z, clabels.cpu().numpy()
def gm_sample(self, x, y, label, num_clabels):
"""
Args:
x (ndarray): 1D array of float numbers
y (ndarray): 1D array of float numbers
label (ndarray): 1D array of cluster labels
num_clabels (int): Number of clusters/classes in a domain
Returns:
ndarray: 2D numpy array
"""
# Overwrite the labels (==n_cohorts) of the unlabeled data points with randomly-sampled labels ([0, n_cohorts-1]) if semi-supervised setting
if self.options["framework"] in ["semi-supervised"] and label == num_clabels:
# Overwrite the labels of the unlabeled data points with randomly-sampled labels - i.e. uninformative clluster assignment
label = np.random.randint(0, self.options["n_cohorts"], 1)
shift = 1.4
r = 2.0 * np.pi / float(num_clabels) * float(label)
new_x = x * np.cos(r) - y * np.sin(r)
new_y = x * np.sin(r) + y * np.cos(r)
new_x += shift * np.cos(r)
new_y += shift * np.sin(r)
return np.array([new_x, new_y]).reshape((2,))
def generate_random_labels(self):
# Generate random samples using expected number of unique clusters (i.e. cohorts) in the dataset
random_labels = np.random.randint(0, self.options["n_cohorts"],
self.options["n_domains"] * self.options["batch_size"])
# Return random labels
return random_labels
# def generate_random_labels(self):
# # Generate random samples using expected number of unique clusters (i.e. cohorts) in the dataset
# random_labels = np.random.randint(0, self.options["n_cohorts"], self.options["n_domains"]*self.options["batch_size"])
# # Convert numpy to torch tensor
# random_labels = th.from_numpy(random_labels)
# # Move it to the device and return
# return random_labels.to(self.device) | 2.828125 | 3 |
regTests.py | RyanClinton777/graph-theory-project | 0 | 12771365 | import ShuntingYard_RE
import ThompsonConstruct
def runTests():
# List of ["Regular Expression", ["Strings"...]]
# (Infix Regular Expressions)
tests = [
["(a.b|b*)", ["", "ab", "b", "bb", "a"]],
["a.(b.b)*.a", ["aa", "bb", "abba", "aba"]],
["1.(0.0)*.1", ["11", "100001", "11001"]]
]
print("\nTESTS:")
#For each test
for test in tests:
# Infix
infix = test[0]
print(f"infix: {infix}")
# Postfix
postfix = ShuntingYard_RE.toPostfix(infix)
print(f"postfix: {postfix}")
# NFA
nfa = ThompsonConstruct.toNFA(postfix)
print(f"thompson: {nfa}")
# For each string for this test:
for s in test[1]:
# Match?
match = nfa.match(s)
print(f"Match '{s}'? {match}")
# Newline
print()
if __name__ == "__main__":
runTests() | 2.78125 | 3 |
env/lib/python3.6/site-packages/jeepney/__init__.py | michaelpeterswa/darkthyme | 1 | 12771366 | <reponame>michaelpeterswa/darkthyme<gh_stars>1-10
"""Low-level, pure Python DBus protocol wrapper.
"""
from .auth import AuthenticationError
from .low_level import Message, Parser
from .bus import find_session_bus, find_system_bus
from .wrappers import *
__version__ = '0.4.3'
| 1.15625 | 1 |
adx_lib/pjnz_file.py | jonathansberry/adx_lib | 0 | 12771367 | <reponame>jonathansberry/adx_lib<filename>adx_lib/pjnz_file.py<gh_stars>0
from rpy2.robjects import r, pandas2ri, packages
from rpy2.rinterface import NALogicalType
import pandas
import numpy
import zipfile
import io
import re
import logging
# Setup R packages to import the data
utils = packages.importr("utils")
if not packages.isinstalled('devtools'):
utils.install_packages('devtools')
packages.importr("devtools")
if not packages.isinstalled('specio'):
r['install_github']('mrc-ide/specio')
packages.importr("specio")
pandas2ri.activate()
class PJNZFile():
# Determines the files in the PJNZ that we want to import to Pandas
# Key is the suffix, Value specifies kwargs sent to read_csv
file_suffixes = {
'.DP': {'dtype': str}
}
year_range = map(lambda x: str(x), range(1970, 2026))
default_columns = year_range
def __init__(self, fpath, file_suffixes=file_suffixes, country=None):
"""
Files are extracted upon object creation.
"""
self.fpath = fpath
self.fname = fpath.split('/')[-1][:-5] # fName w/o path or extension
self.file_suffixes = file_suffixes
self.pjnz_file = zipfile.PyZipFile(fpath)
self.epp_data = {}
self.dp_tables = {}
self.epp('subpops') # This calculates self.epidemic_type
if country:
self.country = country
else:
self.country = fpath.split('/')[-1].split('_')[0]
self._extract_files()
def _extract_files(self):
"""
This funtion takes each of the specified file_suffixes and try's to
import it as a Pandas dataframe, using the specified kwargs.
"""
self.dataframes = {}
for file_suffix, kwargs in self.file_suffixes.items():
filename = self.fname + file_suffix
self.dataframes[filename] = pandas.read_csv(
PJNZFile._add_delimiters(
self.pjnz_file.open(filename, 'r')
),
**kwargs
)
def epp(self, table):
"""
Uses the R package SpecIO, developed at Imperial, to import some of the
data from the Spectrum file.
"""
# Details the R functions available and tables of data they export.
epp_functions = {
"read_epp_data": [
'anc.prev',
'anc.n',
'ancrtsite.prev',
'ancrtsite.n',
'hhs'
],
"read_epp_subpops": [
'subpops',
'turnover'
]
}
# Utility function to convert an R matrix into a pandas dataframe
def r2df(r_matrix):
dataframe = pandas2ri.ri2py_dataframe(r_matrix)
dataframe.columns = r_matrix.colnames
dataframe.index = r_matrix.rownames
return dataframe
# Determines the R function to call, given the requested table
def get_function(table):
for function, tables in epp_functions.iteritems():
if table in tables:
return function
# Combines data for each group into one complete data set.
def read_epp_data():
# Import data for every table we are interested in
# Data is stratified into groups which we have to combine.
# Groups are either regions or sub-populations depending on epidemic type.
epp_data = r['read_epp_data'](self.fpath)
for table_name in epp_functions[function]:
complete_data = {}
for group in epp_data.names:
try:
data_frame = r2df(epp_data.rx2(group).rx2(table_name))
data_frame['Group'] = group
complete_data[group] = data_frame
except TypeError:
pass
if complete_data.values():
self.epp_data[table_name] = pandas.concat(complete_data.values())
else:
self.epp_data[table_name] = None
# Combines data for each group into one complete data set.
def read_epp_subpops():
epp_subpops = r['read_epp_subpops'](self.fpath)
pops_data = {}
turnover_data = {}
self.epidemic_type = r['attr'](epp_subpops, 'epidemicType')[0]
for group in epp_subpops.rx2('subpops').names:
# Get the population data
data_frame = r2df(epp_subpops.rx2('subpops').rx2(group))
data_frame['Group'] = group
pops_data[group] = data_frame
# Get the turnover data
duration = r['attr'](
epp_subpops.rx2('subpops').rx2(group),
'duration'
)[0]
if type(duration) is NALogicalType:
duration = numpy.NaN
turnover_data[group] = pandas.DataFrame({group: duration}, index=['Duration'])
if pops_data.values():
self.epp_data['subpops'] = pandas.concat(pops_data.values())
else:
self.epp_data['subpops'] = None
if turnover_data.values():
self.epp_data['turnover'] = pandas.concat(turnover_data.values(), axis='columns')
else:
self.epp_data['turnover'] = None
# Only import if we havn't already done so.
if self.epp_data.get(table, None) is None:
# Call the relavent R function to get the data
function = get_function(table)
locals()[function]()
# Return only the requested table
return self.epp_data[table]
def dp(self, tag, type=None, columns=None):
"""
Loads a dp table stored under the specified tag. Converts the
table to the specified type and adds the specified columns. If type
and columns arn't specified as arguments, it will look into the
PJNZFile's dp_tables property for the relevant configurations.
"""
# Setup some default values
if not type:
type = self.dp_tables.get(tag, {}).get('type', float)
if not columns:
columns = self.dp_tables.get(tag, {}).get('columns', PJNZFile.default_columns)
# DP tables are cached in the dp_tables property alongside configs
# Only extract table if needbe as it's computationally intensive
if self.dp_tables.get(tag, {}).get('data') is not None:
table = self.dp_tables[tag]['data']
else:
table = self.extract_dp_table(tag, type, columns)
# Cache the table
if not self.dp_tables.get(tag):
self.dp_tables[tag] = {}
self.dp_tables[tag]['data'] = table
self.dp_tables[tag]['type'] = type
self.dp_tables[tag]['columns'] = columns
return table
def extract_dp_table(self, tag, type=float, columns=default_columns):
"""
The DP file appears to be made up of a number of subsidary dataframes,
each taggedv and labelled. There isn't a clear pattern to the way
they are structured in the sheet, but this function broadly pulls
out a subset of the DP sheet for a given tag.
"""
# Get the entire DP sheet with rows and columns indexed by numbers
dp_sheet = self.dataframes.get(self.fname + '.DP')
if dp_sheet is None:
raise FileNotFoundError("DP sheet not found")
dp_sheet.columns = range(0, len(dp_sheet.columns))
# Find desired tag in first column
tag = "<" + str(tag) + ">"
try:
start_row = dp_sheet.index[dp_sheet[0] == tag].tolist()[0]
except IndexError:
raise ValueError(tag + " not found in DP sheet")
# Find end tag that follows the desired tag
end_rows = dp_sheet.index[dp_sheet[0] == "<End>"].tolist()
for n in end_rows:
if n > start_row:
end_row = n
break
# Slice out the desired sub-table, and store the table name and tag.
dp_table = dp_sheet.copy().iloc[start_row+2:end_row, 3:]
dp_table.name = dp_sheet.iloc[start_row+1, 1]
dp_table.tag = tag
# Drop empty columns at end of data, and assign column names to the data
dp_table = dp_table.drop(range(
len(columns)+3,
len(dp_table.columns)+3
), axis='columns')
dp_table.columns = columns
if "Drop" in columns:
dp_table = dp_table.drop("Drop", 'columns')
# Convert the type of the data
if type:
try:
dp_table = PJNZFile._convert_to_type(dp_table, type)
except Exception:
logging.error("Can't convert " + tag + " to " + str(type))
raise
return dp_table
def extract_surv_data(self):
"""
The ANC Sentinel Surveillance and Routine Testing data are stored in
the surv file. This breaks the surv data down in to sub dataframes
using the =========== and ---------- dividers.
"""
# Get entire surv.csv sheet with rows and columns indexed by numbers
surv_sheet = self.dataframes.get(self.fname + '_surv.csv')
if surv_sheet is None:
raise FileNotFoundError("surv.csv sheet not found")
surv_sheet.columns = range(0, len(surv_sheet.columns))
tags = "|".join(PJNZFile.surv_file_datasheets)
dividing_rows = surv_sheet.index[
surv_sheet[0].str.contains(tags, na=False, regex=True)
].tolist()
dataframes = {}
for index, row in enumerate(dividing_rows):
start_row = row+1
try:
end_row = dividing_rows[index+1]
except IndexError:
break
table = surv_sheet.copy().iloc[start_row:end_row, 0:]
dataframes[surv_sheet.iloc[row, 0][:-5]] = table
# # Divide up the data into multiple smaller data frames
# tags = "=============================|------------------------------"
# dividing_rows = surv_sheet.index[
# surv_sheet[0].str.contains(tags, na=False, regex=True)
# ].tolist()
# dataframes = []
# for index, row in enumerate(dividing_rows):
# start_row = row+1
# try:
# end_row = dividing_rows[index+1]
# except IndexError:
# break
# table = surv_sheet.copy().iloc[start_row:end_row, 0:]
# dataframes.append(table)
return dataframes
@staticmethod
def _add_delimiters(file_object, delimiter=','):
"""
Pandas does not allow the import of irregular shaped CSV files - the first row must have
more elements that any other row. This function adds extra delimiters to the first row so
it matches the longest row in length. All empty cells will be assigned NaN if the Pandas
dataframe.
"""
s_data = ''
max_num_delimiters = 0
with file_object as f:
for line in f:
s_data += line
delimiter_count = line.count(delimiter)
if delimiter_count > max_num_delimiters:
max_num_delimiters = delimiter_count
s_delimiters = delimiter * max_num_delimiters + '\n'
return io.StringIO(unicode(s_delimiters + s_data, "utf-8"))
@staticmethod
def _convert_to_type(df, type):
df = df.fillna(-9999)
df = df.astype(type, errors='ignore')
df = df.replace(-9999, numpy.nan)
return df
| 2.4375 | 2 |
archive/google_stuff.py | Tahlor/cleanvid | 0 | 12771368 | import os
import pickle
from googleapiclient.discovery import build
from google_auth_oauthlib.flow import InstalledAppFlow
from google.auth.transport.requests import Request
from googleapiclient.errors import HttpError
SCOPES = ['https://www.googleapis.com/auth/calendar.events',
'https://www.googleapis.com/auth/cloud-platform']
# https://developers.google.com/identity/protocols/googlescopes
def authorize(json_path='credentials_oauth2.json', token_path='token.pickle'):
creds = None
# The file token.pickle stores the user's access and refresh tokens, and is
# created automatically when the authorization flow completes for the first
# time.
if os.path.exists(token_path):
with open(token_path, 'rb') as token:
creds = pickle.load(token)
# If there are no (valid) credentials available, let the user log in.
if not creds or not creds.valid:
if creds and creds.expired and creds.refresh_token:
creds.refresh(Request())
else:
flow = InstalledAppFlow.from_client_secrets_file(
json_path, SCOPES)
creds = flow.run_local_server()
# Save the credentials for the next run
with open(token_path, 'wb') as token:
pickle.dump(creds, token)
return creds
authorize("./credentials/credentials.json", "./credentials/token.pickle") | 2.796875 | 3 |
djTest/pages.py | born2code4u/game | 0 | 12771369 | from otree.api import Currency as c, currency_range
from ._builtin import Page, WaitPage
from .models import Constants
def detail(request, question_id):
question = get_object_or_404(Question, pk=question_id)
return render(request, 'djTest/index.html', {'question': question})
class MyPage(Page):
pass
class ResultsWaitPage(WaitPage):
def after_all_players_arrive(self):
pass
class Results(Page):
pass
page_sequence = [
MyPage,
ResultsWaitPage,
Results
]
| 2.234375 | 2 |
addons/mendeley/views.py | gaybro8777/osf.io | 628 | 12771370 | # -*- coding: utf-8 -*-
from .provider import MendeleyCitationsProvider
from website.citations.views import GenericCitationViews
mendeley_views = GenericCitationViews('mendeley', MendeleyCitationsProvider)
| 1.226563 | 1 |
19-10-15-exercises/main.py | stogacs/cscex | 0 | 12771371 | <filename>19-10-15-exercises/main.py
'''
Conestoga Computer Science Club Programming Challenges
Instructions:
Finish each method and run the test.py file to test the methods.
'''
'''
Karaca Encrypt
--------------
Make a function that encrypts a given input with these steps:
Input: "apple"
Step 1: Reverse the input: "elppa"
Step 2: Replace all vowels using the following chart:
a => 0
e => 1
i => 2
o => 3
u => 4
# "1lpp0"
Step 3: Add "aca" to the end of the word: "1lpp0aca"
Output: "1lpp0aca"
Examples:
encrypt("banana") ➞ "0n0n0baca"
encrypt("karaca") ➞ "0c0r0kaca"
encrypt("burak") ➞ "k0r3baca"
encrypt("alpaca") ➞ "0c0pl0aca"
Notes:
All inputs are strings, no uppercases and all output must be strings.
'''
def karacaEncrypt(word):
# Code here
map = {"a": "0", "e": "1", "i": "2", "o": "3", "u": "4"}
backwords = word[:-1]
'''
Convert to Hex
Create a function that takes a strings characters as ASCII and returns each characters hexadecimal value as a string.
Examples:
convert_to_hex("hello world") ➞ "68 65 6c 6c 6f 20 77 6f 72 6c 64"
convert_to_hex("Big Boi") ➞ "42 69 67 20 42 6f 69"
convert_to_hex("<NAME>") ➞ "4d 61 72 74 79 20 50 6f 70 70 69 6e 73 6f 6e"
Notes:
Each byte must be seperated by a space.
All alpha hex characters must be lowercase.
'''
def convertToHex(string):
# Code here
pass
'''
Moran Numbers
A Harshad number is a number which is divisible by the sum of its digits. For example, 132 is divisible by 6 (1+3+2).
A subset of the Harshad numbers are the Moran numbers. Moran numbers yield a prime when divided by the sum of their digits. For example, 133 divided by 7 (1+3+3) yields 19, a prime.
Create a function that takes a number and returns "M" if the number is a Moran number, "H" if it is a (non-Moran) Harshad number, or "Neither".
Examples
moran(132) ➞ "H"
moran(133) ➞ "M"
moran(134) ➞ "Neither"
Notes:
You may need to make a method to determine whether a number is prime.
'''
def moran(num):
# Code here
pass
| 4.21875 | 4 |
messages/ClientAddContributorMessage.py | zadjii/nebula | 2 | 12771372 | # last generated 2016-10-14 13:51:52.131000
from messages import BaseMessage
from msg_codes import CLIENT_ADD_CONTRIBUTOR as CLIENT_ADD_CONTRIBUTOR
__author__ = 'Mike'
class ClientAddContributorMessage(BaseMessage):
def __init__(self, sid=None, new_user_id=None, cloud_uname=None, cname=None, fpath=None, permissions=None):
super(ClientAddContributorMessage, self).__init__()
self.type = CLIENT_ADD_CONTRIBUTOR
self.sid = sid
self.new_user_id = new_user_id
self.cloud_uname = cloud_uname
self.cname = cname
self.fpath = fpath
self.permissions = permissions
@staticmethod
def deserialize(json_dict):
msg = ClientAddContributorMessage()
msg.sid = json_dict['sid']
msg.new_user_id = json_dict['new_user_id']
msg.cloud_uname = json_dict['cloud_uname']
msg.cname = json_dict['cname']
msg.fpath = json_dict['fpath']
msg.permissions = json_dict['permissions']
return msg
| 2.078125 | 2 |
tests/python/test_units.py | aTrotier/sycomore | 14 | 12771373 | <reponame>aTrotier/sycomore<gh_stars>10-100
import unittest
import sycomore
from sycomore.units import *
class TestUnits(unittest.TestCase):
def test_basic_unit(self):
length = 100*cm
self.assertEqual(length.magnitude, 1)
self.assertEqual(length.dimensions, sycomore.Length)
def test_derived_unit(self):
length = 1*kN
self.assertEqual(length.magnitude, 1000)
self.assertEqual(length.dimensions, sycomore.Force)
if __name__ == "__main__":
unittest.main()
| 2.59375 | 3 |
SQL/1068. Product Sales Analysis I.py | joshlyman/Josh-LeetCode | 0 | 12771374 | -- Write an SQL query that reports all product names of the products in the Sales table along with their selling year and price.
-- For example:
-- Sales table:
-- +---------+------------+------+----------+-------+
-- | sale_id | product_id | year | quantity | price |
-- +---------+------------+------+----------+-------+
-- | 1 | 100 | 2008 | 10 | 5000 |
-- | 2 | 100 | 2009 | 12 | 5000 |
-- | 7 | 200 | 2011 | 15 | 9000 |
-- +---------+------------+------+----------+-------+
-- Product table:
-- +------------+--------------+
-- | product_id | product_name |
-- +------------+--------------+
-- | 100 | Nokia |
-- | 200 | Apple |
-- | 300 | Samsung |
-- +------------+--------------+
-- Result table:
-- +--------------+-------+-------+
-- | product_name | year | price |
-- +--------------+-------+-------+
-- | Nokia | 2008 | 5000 |
-- | Nokia | 2009 | 5000 |
-- | Apple | 2011 | 9000 |
-- +--------------+-------+-------+
# Write your MySQL query statement below
select product_name,year,price
from Sales left join Product
on Sales.product_id = Product.product_id | 3.03125 | 3 |
tools/dns-sync/dns_sync/api.py | ruchirjain86/professional-services | 2,116 | 12771375 | # Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import threading
from google.cloud import datastore
from google.cloud import resource_manager
from googleapiclient import discovery
from googleapiclient import errors
import httplib2
from oauth2client import client
import webapp2
def resource_iterator(next_page_function):
"""Loop through resources from a Google API.
An iterator that returns all of the resources from a Google API 'list'
operation paging through each set.
Args:
next_page_function: A function that when called will return the next
page of results.
Yields:
A list if resources, which are typically dictionaries.
"""
next_page_token = None
more_results = True
while more_results:
resource_response = None
try:
resource_response = next_page_function(next_page_token).execute()
except errors.HttpError:
# Some projects throw a 403. (compute engine isn't enabled)
# just ignore those resources.
logging.debug('skipping resources.', exc_info=True)
return
for items_field in ['items', 'rrsets', 'managedZones']:
items = resource_response.get(items_field, {})
if items and (type(items) == dict):
for item in items.iteritems():
yield item
if items and (type(items) == list):
for item in items:
yield item
if 'nextPageToken' in resource_response:
next_page_token = resource_response['nextPageToken']
else:
more_results = False
class ThreadsafeClientLocal(object):
"""A thread local Google API client descriptor.
Httplib2 is not threadsafe so each request thread requires it's own
threadlocal client object which this creates.
Attributes:
service: String name of the API to create the client for.
version: String version of the API client.
"""
_class_thread_local = threading.local()
def __init__(self, service, version):
"""Create a thread local API client.
Will create the underlying httplib2.Http object on construction, but
the underlying API client is lazy constructed.
Args:
service: Name of API.
version: Version of the api.
"""
self.service = service
self.version = version
self.http = httplib2.Http(timeout=60)
self.cache_discovery = True
def __get__(self, instance, instance_type):
"""Construct the API client."""
if instance is None:
return self
thread_local = None
try:
app = webapp2.get_app()
# Python Google API clients aren't threadsafe as they use httplib2
# which isn't threadsafe.
thread_local = app.registry.get(self)
if thread_local is None:
thread_local = threading.local()
app.registry[self] = thread_local
except AssertionError:
# When not in a request context, use class thread local.
thread_local = ThreadsafeClientLocal._class_thread_local
cached_client = getattr(thread_local, 'api', None)
if cached_client is None:
credentials = client.GoogleCredentials.get_application_default()
if credentials.create_scoped_required():
credentials = credentials.create_scoped(
'https://www.googleapis.com/auth/cloud-platform')
cached_client = discovery.build(
self.service,
self.version,
http=credentials.authorize(self.http),
cache_discovery=self.cache_discovery)
thread_local.api = cached_client
return cached_client
class Clients(object):
"""Holds API clients.
For Google API clients, we use thread local descriptors which creates the
client on first access. The "google.cloud" clients are threadsafe and are
simple properties.
"""
metrics = ThreadsafeClientLocal('monitoring', 'v3')
compute = ThreadsafeClientLocal('compute', 'v1')
dns = ThreadsafeClientLocal('dns', 'v1')
iam = ThreadsafeClientLocal('cloudresourcemanager', 'v1')
def __init__(self):
self.datastore = datastore.Client()
self.crm = resource_manager.Client()
CLIENTS = Clients()
| 2.609375 | 3 |
django_simple_coupons/actions.py | brunovila/django-simple-coupons | 19 | 12771376 | <filename>django_simple_coupons/actions.py
from django.contrib.admin import ModelAdmin
from django.utils import timezone
# Create your actions here
# ========================
def reset_coupon_usage(modeladmin, request, queryset):
for coupon_user in queryset:
coupon_user.times_used = 0
coupon_user.save()
ModelAdmin.message_user(modeladmin, request, "Coupons reseted!")
def delete_expired_coupons(modeladmin, request, queryset):
count = 0
for coupon in queryset:
expiration_date = coupon.ruleset.validity.expiration_date
if timezone.now() >= expiration_date:
coupon.delete()
count += 1
ModelAdmin.message_user(modeladmin, request, "{0} Expired coupons deleted!".format(count))
# Actions short descriptions
# ==========================
reset_coupon_usage.short_description = "Reset coupon usage"
delete_expired_coupons.short_description = "Delete expired coupons"
| 2.25 | 2 |
rrs/tools/dump_upgrades.py | ardier/layerindex-web | 0 | 12771377 | <gh_stars>0
#!/usr/bin/env python3
#
# Copyright (C) 2019 Intel Corporation
# Author: <NAME> <<EMAIL>>
#
# Licensed under the MIT license, see COPYING.MIT for details
#
# SPDX-License-Identifier: MIT
import sys
import os.path
sys.path.insert(0, os.path.realpath(os.path.join(os.path.dirname(__file__), '..')))
sys.path.insert(0, os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'layerindex')))
import argparse
import re
import glob
import utils
import logging
from datetime import date, datetime
class DryRunRollbackException(Exception):
pass
logger = utils.logger_create('RRSDump')
def main():
parser = argparse.ArgumentParser(description="Dump RRS upgrade info")
parser.add_argument("plan",
help="Specify maintenance plan to operate on")
parser.add_argument('-d', '--debug', action='store_true', help='Enable debug output')
parser.add_argument('-q', '--quiet', action='store_true', help='Hide all output except error messages')
args = parser.parse_args()
if args.debug:
loglevel = logging.DEBUG
elif args.quiet:
loglevel = logging.WARNING
else:
loglevel = logging.INFO
utils.setup_django()
import settings
from rrs.models import MaintenancePlan, Release, Milestone, RecipeUpgrade, RecipeSymbol
import rrs.views
from django.db import transaction
logger.setLevel(loglevel)
maintplan = MaintenancePlan.objects.filter(id=args.plan).first()
if not maintplan:
logger.error('No maintenance plan with id %s' % args.plan)
sys.exit(1)
release = maintplan.get_default_release()
if not release:
logger.error('No default release for maintenance plan %s' % maintplan)
sys.exit(1)
milestone = release.get_default_milestone()
if not milestone:
logger.error('No default milestone for release %s' % release)
sys.exit(1)
recipe_list = rrs.views._get_recipe_list(milestone)
for r in recipe_list:
recipesymbol = RecipeSymbol.objects.get(id=r.pk)
print('* %s %s %s %s' % (r.name, r.version, r.upstream_version, r.upstream_status))
details = []
for ru in RecipeUpgrade.objects.filter(recipesymbol=recipesymbol).exclude(upgrade_type='M').order_by('group', '-commit_date', '-id'):
details.append(rrs.views._get_recipe_upgrade_detail(maintplan, ru))
details.sort(key=lambda s: rrs.views.RecipeUpgradeGroupSortItem(s.group), reverse=True)
group = None
for rud in details:
if rud.group != group:
print(' ---- %s ----' % rud.group.title)
group = rud.group
print(' - %s | %s | %s | %s | %s' % (rud.title, rud.version, rud.upgrade_type, rud.milestone_name, rud.date))
sys.exit(0)
if __name__ == "__main__":
main()
| 1.984375 | 2 |
pid.py | hrafnkelle/pid-autotune | 97 | 12771378 | <reponame>hrafnkelle/pid-autotune
from time import time
import logging
# Based on Arduino PID Library
# See https://github.com/br3ttb/Arduino-PID-Library
class PIDArduino(object):
"""A proportional-integral-derivative controller.
Args:
sampletime (float): The interval between calc() calls.
kp (float): Proportional coefficient.
ki (float): Integral coefficient.
kd (float): Derivative coefficient.
out_min (float): Lower output limit.
out_max (float): Upper output limit.
time (function): A function which returns the current time in seconds.
"""
def __init__(self, sampletime, kp, ki, kd, out_min=float('-inf'),
out_max=float('inf'), time=time):
if kp is None:
raise ValueError('kp must be specified')
if ki is None:
raise ValueError('ki must be specified')
if kd is None:
raise ValueError('kd must be specified')
if sampletime <= 0:
raise ValueError('sampletime must be greater than 0')
if out_min >= out_max:
raise ValueError('out_min must be less than out_max')
self._logger = logging.getLogger(type(self).__name__)
self._Kp = kp
self._Ki = ki * sampletime
self._Kd = kd / sampletime
self._sampletime = sampletime * 1000
self._out_min = out_min
self._out_max = out_max
self._integral = 0
self._last_input = 0
self._last_output = 0
self._last_calc_timestamp = 0
self._time = time
def calc(self, input_val, setpoint):
"""Adjusts and holds the given setpoint.
Args:
input_val (float): The input value.
setpoint (float): The target value.
Returns:
A value between `out_min` and `out_max`.
"""
now = self._time() * 1000
if (now - self._last_calc_timestamp) < self._sampletime:
return self._last_output
# Compute all the working error variables
error = setpoint - input_val
input_diff = input_val - self._last_input
# In order to prevent windup, only integrate if the process is not saturated
if self._last_output < self._out_max and self._last_output > self._out_min:
self._integral += self._Ki * error
self._integral = min(self._integral, self._out_max)
self._integral = max(self._integral, self._out_min)
p = self._Kp * error
i = self._integral
d = -(self._Kd * input_diff)
# Compute PID Output
self._last_output = p + i + d
self._last_output = min(self._last_output, self._out_max)
self._last_output = max(self._last_output, self._out_min)
# Log some debug info
self._logger.debug('P: {0}'.format(p))
self._logger.debug('I: {0}'.format(i))
self._logger.debug('D: {0}'.format(d))
self._logger.debug('output: {0}'.format(self._last_output))
# Remember some variables for next time
self._last_input = input_val
self._last_calc_timestamp = now
return self._last_output
| 3.109375 | 3 |
global_variables.py | DirkZomerdijk/status | 0 | 12771379 | #%%
# dutch_w = 0.664
# turkish_w = 0.075
# moroccan_w = 0.13
# ghanaian_w = 0.021
# suriname_w = 0.11
# sample_n = 4000
# dutch_pop = sample_n * dutch_w
# suriname_pop = sample_n * suriname_w
# turkish_pop = sample_n * turkish_w
# moroccan_pop = sample_n * moroccan_w
# ghanaian_pop = sample_n * ghanaian_w
# ethnicity_weights = [
# dutch_w,
# suriname_w,
# moroccan_w,
# turkish_w,
# ghanaian_w,
# ]
# pops = [
# dutch_pop,
# suriname_pop,
# turkish_pop,
# moroccan_pop,
# ghanaian_pop
# ]
DEBUG = True
SAVETYPE = "group"
root = "C:/Users/Admin/Code/status/"
results_dir = "C:/Users/Admin/Code/status/results/"
param_dict = {
# "population_size": [],
# "chronic_threshold": [],
"similarity_min": [],
"interactions": [],
"ses_noise": [],
# "repeats": [],
"vul_param": [],
"psr_param": [],
"coping_noise": [],
"recover_param": [],
"prestige_beta": [],
"prestige_param": [],
"stressor_param": [],
}
DAY = 1
WEEK = 7*DAY
CHRONIC_STRESS_PERIOD = 10
MAX_STATUS_DIFFERENCE = 14
status_dict_linear = {
"occ": {
1: 0,
2: 1,
3: 1,
4: 2,
5: 3,
6: 4,
7: 5,
8: 6
},
"edu": {
1: 0,
2: 2,
3: 4,
4: 6
},
"inc": {
1: -1,
2: -0.5,
3: 0.5,
4: 1
}
}
status_dict = {
"occ": {
1: 0,
2: 1,
3: 1,
4: 2,
5: 2,
6: 3,
7: 4,
8: 5
},
"edu": {
1: 0,
2: 1,
3: 2,
4: 4
},
"inc": {
1: 0,
2: 0,
3: 1,
4: 1
}
}
base_columns = ["ID", "occupation","H1_InkHhMoeite","H1_Opleid","H1_Mastery_Sumscore","H1_SSQSa", "status_l", "psr", "H1_etniciteit", "prestige"]
rename_columns = dict({
"ID": "id",
"occupation":"occ",
"H1_InkHhMoeite":"inc",
"H1_Opleid":"edu",
"H1_Mastery_Sumscore":"mastery",
"H1_SSQSa":"support",
"status_l" :"status",
"H1_etniciteit": "eth",
"H1_LO_BMI": "bmi",
"H1_Roken": "smoke"
})
columns_normalized = ["occ","edu","mastery","support"]
# %%
| 1.789063 | 2 |
airtech/apps/flights/urls.py | sam-karis/airtech | 0 | 12771380 | <reponame>sam-karis/airtech
from django.urls import path
from airtech.apps.flights.views import GetAllFlightsAPIView
app_name = 'flight'
urlpatterns = [
path('flights/', GetAllFlightsAPIView.as_view(), name='flights'),
]
| 1.679688 | 2 |
plugins/photos/__init__.py | mohnjahoney/website_source | 13 | 12771381 | <reponame>mohnjahoney/website_source
from .photos import *
| 1.070313 | 1 |
tests/test_client.py | max-arnold/python-block-disposable-email | 0 | 12771382 | # -*- coding: utf-8 -*-
import os
from io import StringIO
import json
import pytest
from unittest.mock import patch
from bdea.client import BDEAClient, URLError
from bdea.client import is_disposable_domain, is_disposable_email
class TestBDEAClientRequest(object):
def test_urlerror_returns_empty(self):
with patch('bdea.client.urlopen') as urlopen_mock:
urlopen_mock.side_effect = URLError('No luck!')
cl = BDEAClient('apikey')
assert cl.request('http://www.rottentomatoes.com/') == {}
def test_invalid_json_returns_empty(self):
with patch('bdea.client.urlopen') as urlopen_mock:
urlopen_mock.return_value = StringIO('invalid json')
cl = BDEAClient('apikey')
assert cl.request('http://www.rottentomatoes.com/') == {}
def test_valid_json(self):
with patch('bdea.client.urlopen') as urlopen_mock:
urlopen_mock.return_value = StringIO('{"blah": "blah"}')
cl = BDEAClient('apikey')
assert cl.request('http://www.rottentomatoes.com/') == {'blah': 'blah'}
def test_do_not_accept_email(self):
cl = BDEAClient('apikey')
with pytest.raises(ValueError):
cl.get_domain_status('<EMAIL>')
class TestBDEAClient(object):
def test_status_urlopen_args(self):
with patch('bdea.client.urlopen') as urlopen_mock:
urlopen_mock.return_value = StringIO('{}')
cl = BDEAClient('apikey')
cl.get_api_status()
url = 'http://status.block-disposable-email.com/status/?apikey=apikey'
urlopen_mock.assert_called_with(url, timeout=5)
def test_domain_urlopen_args(self):
with patch('bdea.client.urlopen') as urlopen_mock:
urlopen_mock.return_value = StringIO('{}')
cl = BDEAClient('apikey')
cl.get_domain_status('example.com')
url = 'http://check.block-disposable-email.com/easyapi/json/apikey/example.com'
urlopen_mock.assert_called_with(url, timeout=5)
class TestBDEAClientLive(object):
APIKEY_INVALID = 'invalid-unittest-apikey'
def _get_api_key(self):
return os.environ.get('BDEA_APIKEY', self.APIKEY_INVALID)
def test_invalid_apikey_domain_ok(self):
res = BDEAClient(self.APIKEY_INVALID).get_domain_status(BDEAClient.TEST_DOMAIN_OK)
assert res.response['domain_status'] == 'ok'
assert res.response['request_status'] == 'fail_key'
def test_invalid_apikey_domain_block(self):
res = BDEAClient(self.APIKEY_INVALID).get_domain_status(BDEAClient.TEST_DOMAIN_BLOCK)
assert res.response['domain_status'] == 'ok'
assert res.response['request_status'] == 'fail_key'
def test_invalid_apikey_api_status(self):
res = BDEAClient(self.APIKEY_INVALID).get_api_status()
assert res.response['request_status'] == 'ok'
assert res.response['apikeystatus'] == 'inactive'
@pytest.mark.xfail
def test_valid_apikey_api_status(self):
res = BDEAClient(self._get_api_key()).get_api_status()
assert res.response['request_status'] == 'ok'
assert res.response['apikeystatus'] == 'active'
@pytest.mark.xfail
def test_valid_apikey_domain_ok(self):
res = BDEAClient(self._get_api_key()).get_domain_status(BDEAClient.TEST_DOMAIN_OK)
assert res.response['domain_status'] == 'ok'
assert res.response['request_status'] == 'success'
@pytest.mark.xfail
def test_valid_apikey_domain_block(self):
res = BDEAClient(self._get_api_key()).get_domain_status(BDEAClient.TEST_DOMAIN_BLOCK)
assert res.response['domain_status'] == 'block'
assert res.response['request_status'] == 'success'
class TestShortcut(object):
RESPONSE = {
'domain_status': 'ok',
'execution_time': 0.0052359104156494,
'request_status': 'success',
'server_id': 'mirror5_vienna',
'servertime': '2015-10-25 5:25:54',
'version': '0.2'
}
def test_domain_shortcut_function(self):
with patch('bdea.client.urlopen') as urlopen_mock:
res = self.RESPONSE.copy()
urlopen_mock.return_value = StringIO('{}'.format(json.dumps(res)))
assert is_disposable_domain('google.com', 'apikey') == False
res.update({
'domain_status': 'block'
})
urlopen_mock.return_value = StringIO('{}'.format(json.dumps(res)))
assert is_disposable_domain('mailinator.com', 'apikey') == True
def test_email_shortcut_function(self):
with patch('bdea.client.urlopen') as urlopen_mock:
res = self.RESPONSE.copy()
urlopen_mock.return_value = StringIO('{}'.format(json.dumps(res)))
assert is_disposable_email('<EMAIL>', 'apikey') == False
res.update({
'domain_status': 'block'
})
urlopen_mock.return_value = StringIO('{}'.format(json.dumps(res)))
assert is_disposable_email('<EMAIL>', 'apikey') == True
| 2.453125 | 2 |
emlx_parse/time_utls.py | crb912/emlx_parse | 8 | 12771383 | <filename>emlx_parse/time_utls.py
"""
Date and Time Converting.
"""
import datetime
from dateutil import parser
import pytz
IOS_UTC_FORMAT = '%Y-%m-%dT%H:%M:%SZ'
UTC_FORMAT = '%Y-%m-%dT%H:%M:%S+00:00'
def time_str_to_utc_str(time_str: str) -> str:
"""Return UTC str from arbitrary timezone time string.
About ISO8601 date string: https://en.wikipedia.org/wiki/ISO_8601
Combined date and time representations:
"2007-04-05T14:30Z" or "2007-04-05T12:30-02:00".
>>> time_str_to_utc_str('2012-11-01T04:16:13-04:00') # ISO8601 format
'2012-11-01T08:16:13+00:00'
>>> time_str_to_utc_str('8 Jul 2019 20:22:47 +0800') # Email data format
'2019-07-08T12:22:47+00:00'
>>> time_str_to_utc_str('8 Jul 2019 20:22:47 +0800 (GMT+08:00)')
'2019-07-08T12:22:47+00:00'
"""
try:
dt = parser.parse(time_str)
dt = dt.replace(tzinfo=pytz.utc) - dt.utcoffset()
return dt.strftime(UTC_FORMAT)
except ValueError:
if '(' in time_str:
return time_str_to_utc_str(time_str.split('(')[0])
def utc_str_to_local_str(utc_str: str, utc_format: str, local_format: str):
"""Return local time strings form UTC time strings.
:param utc_str: UTC time string
:param utc_format: format of UTC time string
:param local_format: format of local time string
:return: local time string
>>> utc = '2018-10-17T00:00:00.111Z'
>>> utc_str_to_local_str(utc, '%Y-%m-%dT%H:%M:%S.%fZ', '%Y-%m-%dT%H:%M:%S')
'2018-10-17T08:00:00'
"""
temp1 = datetime.datetime.strptime(utc_str, utc_format)
temp2 = temp1.replace(tzinfo=datetime.timezone.utc)
local_time = temp2.astimezone()
return local_time.strftime(local_format)
def utc_str_to_timestamp(utc_str: str, utc_format: str) -> int:
"""Return timestamp from UTC time strings.
>>> utc_str_to_timestamp('2018-10-17T00:00:00', '%Y-%m-%dT%H:%M:%S')
1539734400
"""
temp1 = datetime.datetime.strptime(utc_str, utc_format)
temp2 = temp1.replace(tzinfo=datetime.timezone.utc)
return int(temp2.timestamp())
def timestamp_to_utc_str(ts: int) -> str:
"""
:param ts: time stamp
>>> timestamp_to_utc_str(1562588568)
'2019-07-08T12:22:48Z'
"""
return datetime.datetime.utcfromtimestamp(ts).strftime('%Y-%m-%dT%H:%M:%SZ')
if __name__ == "__main__":
import doctest
doctest.testmod()
| 3.359375 | 3 |
run_modes/parallel_run.py | seblee97/run_modes | 0 | 12771384 | <filename>run_modes/parallel_run.py
import os
from typing import List, Type
from config_manager import base_configuration
from run_modes import base_runner, constants, single_run, utils
try:
import torch.multiprocessing as mp
except ModuleNotFoundError:
import multiprocessing as mp
def parallel_run(
runner_class: Type[base_runner.BaseRunner],
config_class: Type[base_configuration.BaseConfiguration],
run_methods: List[str],
config_path: str,
checkpoint_paths: List[str],
stochastic_packages: List[str] = [],
) -> None:
"""Set of experiments run in parallel using multiprocessing module.
Args:
runner_class: runner class to be instantiated.
config_class: configuration class to be instantiated.
run_methods: list of methods to be called on runner class.
config_path: path to yaml configuration file for experiment.
checkpoint_paths: list of paths to directories to output results.
stochastic_packages: list of packages (by name) for which seeds are to be set.
"""
processes = []
for checkpoint_path in checkpoint_paths:
changes = utils.json_to_config_changes(
os.path.join(checkpoint_path, constants.CONFIG_CHANGES_JSON)
)
process = mp.Process(
target=single_run.single_run,
args=(
runner_class,
config_class,
run_methods,
config_path,
checkpoint_path,
changes,
stochastic_packages,
),
)
process.start()
processes.append(process)
for process in processes:
process.join()
| 2.265625 | 2 |
app/commands/default/slack_send.py | mallycrip/jenkinsfile-converter | 0 | 12771385 | from app.commands.default import BaseCommand
class SlackSend(BaseCommand):
def get_command(self, color, message) -> str:
return self.generate_command(color, message)
@classmethod
def generate_command(cls, color, message):
return f'slackSend (color: "{color}", message: """\\\n{message}""".stripMargin())'
| 2.515625 | 3 |
squish/test.py | bd-j/squish | 2 | 12771386 | <filename>squish/test.py
import numpy as np
import corner as triangle
from sampler import SliceSampler
def lnprob_gaussian(x, ivar):
return -0.5 * np.sum(ivar * x ** 2)
def rosenbrock(x, a=1, b=100):
f = ((a - x[0])**2 + b * (x[1] - x[0]**2)**2)
return -f
def test_gaussian(niter=10000, ndim=2):
ivar = 1. / np.random.rand(ndim)
p0 = np.random.rand(ndim)
lnp0 = lnprob_gaussian(p0, ivar)
transform = np.linalg.cholesky(np.diag(ivar))
ss = SliceSampler(transform, lnprob_gaussian, postargs=[ivar])
res = [r for r in ss.sample(p0, lnp0, niter=niter)]
fig = triangle.corner(ss.chain)
print('----\nGaussian')
print('{} likelihood calls for {} iterations'.format(ss.nlike, niter))
fig.show()
return ss
def test_rosenbrock(niter=10000, a=1, b=100, save=False):
p0 = np.array([-1, 1])
lnp0 = rosenbrock(p0, a=a, b=b)
transform = np.linalg.cholesky(np.diag([1, 1]))
ss = SliceSampler(transform, rosenbrock, postkwargs={'a':a, 'b':b})
res = [r for r in ss.sample(p0, lnp0, niter=niter)]
#ss.reset()
#ss.sample(p0, lnp0, niter=niter, storechain=True)
print('finished sampling')
fig = triangle.corner(ss.chain[5000:, :], truths=[a, a], labels=[r'$x_1$', r'$x_2$'])
free = np.array(fig.axes)[~np.array([ax.has_data() for ax in fig.axes])]
best = ss.chain[np.argmax(ss.lnprob), :]
text = 'Rosenbrock ($a={:4.1f}$, $b={:4.1f})$\n'.format(a, b)
text += '{} iterations\n'.format(niter)
text += '{} likelihood calls\n'.format(ss.nlike)
text += 'MAP: {:4.2f}, {:4.2f}'.format(*best)
free[0].text(0.01, 0.8, text, transform=free[0].transAxes, verticalalignment='top')
fig.show()
if save:
fig.savefig('rosenbrock.png')
return ss
if __name__ == "__main__":
import matplotlib.pyplot as pl
pl.rcParams['xtick.direction'] = 'in'
pl.rcParams['xtick.direction'] = 'in'
#test_gaussian(ndim=4)
ss = test_rosenbrock()
ss = test_rosenbrock(niter=int(1e6))
import acor
lag = 10 **(np.arange(4)+1)
tau1 = [acor.acor(ss.chain[:,1], maxlag=l)[0] for l in lag]
tau0 = [acor.acor(ss.chain[:,0], maxlag=l)[0] for l in lag]
for l, t1, t0 in zip(lag, tau1, tau0):
print('maxlag:{}, tau0:{}, tau1:{}'.format(l, t0, t1))
| 2.46875 | 2 |
node_modules/nuclide/pkg/nuclide-server/scripts/utils.py | kevingatera/kgatewebapp | 1 | 12771387 | # Copyright (c) 2015-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree.
from __future__ import print_function
import httplib
import logging
import os
import re
import socket
import ssl
import subprocess
import sys
logger = logging.getLogger('utils')
# Run the process silently without stdout and stderr.
# On success, return stdout. Otherwise, raise CalledProcessError
# with combined stdout and stderr.
def check_output_silent(args, cwd=None, env=None):
# Use Popen here. check_ouput is not available in Python 2.6.
# cwd=None means don't change cwd.
# env=None means inheriting the current process' environment.
process = subprocess.Popen(
args,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=cwd,
env=env)
out, err = process.communicate()
if process.returncode != 0:
error = subprocess.CalledProcessError(process.returncode, args)
error.output = out + err
raise error
else:
return out
def darwin_path_helper():
try:
out = check_output_silent(['/usr/libexec/path_helper', '-s'])
path = re.search(r'PATH=\"([^\"]+)\"', out).group(1)
return path
except Exception as e:
logger.warn('Failed to get additional PATH info (%s)', e.message)
return ''
# It supports https if key_file and cert_file are given.
def http_get(host, port, method, url, key_file=None, cert_file=None, ca_cert=None, timeout=1):
try:
conn = None
if key_file is not None and cert_file is not None and ca_cert is not None:
if sys.version_info < (2, 7, 9):
conn = httplib.HTTPSConnection(
host,
port,
key_file=key_file,
cert_file=cert_file,
timeout=timeout)
else:
ctx = ssl.create_default_context(cafile=ca_cert)
# We disable host name validation here so we can ping the server endpoint
# using localhost.
ctx.check_hostname = False
conn = httplib.HTTPSConnection(
host,
port,
key_file=key_file,
cert_file=cert_file,
timeout=timeout,
context=ctx)
else:
conn = httplib.HTTPConnection(host, port, timeout=timeout)
conn.request(method, url)
response = conn.getresponse()
if response.status == 200:
ret = response.read()
return ret
else:
return None
except ssl.SSLError as e:
if sys.version_info < (2, 7, 9):
logger.error("An SSL Error occurred")
else:
logger.error("An SSL Error occurred: %s" % e.reason)
return None
except socket.error:
return None
except:
logger.error("Unexpected error: %s" % sys.exc_info()[0])
return None
finally:
if conn:
conn.close()
def is_ip_address(addr):
try:
# Check ipv4 address.
socket.inet_aton(addr)
return True
except socket.error:
pass
try:
# Check ipv6 address.
socket.inet_pton(socket.AF_INET6, addr)
return True
except socket.error:
return False
# Read the resource and write it to a given dir using the resource name as file name.
# Return the file path.
def write_resource_to_file(name, dir):
target_path = os.path.join(dir, os.path.basename(name))
with open(name, 'r') as res_file:
content = res_file.read()
with open(target_path, 'w') as f:
f.write(content)
return target_path
| 2.078125 | 2 |
functions_cache/engines/base.py | BalighMehrez/functions-cache | 0 | 12771388 | <gh_stars>0
#!/usr/bin/env python
"""
functions_cache.engines.base
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Contains BaseCache class which can be used as in-memory cache engine or
extended to support persistence.
"""
import hashlib
from datetime import datetime, timezone
from io import BytesIO
from typing import Any
from urllib.parse import parse_qsl, urlencode, urlparse, urlunparse
from functions_cache.function_identifier import FunctionIdentifier
# All engine-specific keyword arguments combined
ENGINE_KWARGS = [
'connection',
'db_name',
'endpont_url',
'extension',
'fast_save',
'ignored_parameters',
'include_get_headers',
'location',
'name',
'namespace',
'read_capacity_units',
'region_name',
'write_capacity_units',
]
class BaseCache(object):
"""Base class for cache implementations, can be used as in-memory cache.
To extend it you can provide dictionary-like objects for
:attr:`responses` or override public methods.
"""
def __init__(self, *args, **kwargs):
#: `key_in_cache` -> `response` mapping
self.responses = {}
def save_response(self, key, response):
"""Save response to cache
:param key: key for this response
:param response: response to save
"""
self.responses[key] = response, datetime.now(timezone.utc)
def get_response_and_time(self, key, default=(None, None)):
"""Retrieves response and timestamp for `key` if it's stored in cache,
otherwise returns `default`
:param key: key of resource
:param default: return this if `key` not found in cache
:returns: tuple (response, datetime)
"""
try:
response, timestamp = self.responses[key]
except KeyError:
return default
return response, timestamp
def delete(self, key):
"""Delete `key` from cache. Also deletes all responses from response history"""
try:
if key in self.responses:
response, _ = self.responses[key]
del self.responses[key]
except KeyError:
pass
def clear(self):
"""Clear cache"""
self.responses.clear()
def remove_old_entries(self, expires_before):
"""Deletes entries from cache with expiration time older than ``expires_before``"""
if expires_before.tzinfo is None:
# if expires_before is not timezone-aware, assume local time
expires_before = expires_before.astimezone()
keys_to_delete = set()
for key, (response, _) in self.responses.items():
if response.expiration_date is not None and response.expiration_date < expires_before:
keys_to_delete.add(key)
for key in keys_to_delete:
self.delete(key)
def create_key(self, function_identifier: FunctionIdentifier):
key = hashlib.sha256()
key.update(_to_bytes(function_identifier.function_name.upper()))
if function_identifier.function_args and function_identifier.function_args != ():
for arg in function_identifier.function_args:
key.update(_to_bytes(arg))
if function_identifier.function_kwargs and function_identifier.function_kwargs != ():
for name, value in sorted(function_identifier.function_kwargs.items()):
key.update(_to_bytes(name))
key.update(_to_bytes(value))
return key.hexdigest()
def has_key(self, key):
"""Returns `True` if cache has `key`, `False` otherwise"""
return key in self.responses
def __str__(self):
return 'responses: %s' % (self.responses)
def _to_bytes(s, encoding='utf-8'):
return s if isinstance(s, bytes) else bytes(str(s), encoding)
| 3.015625 | 3 |
test/tests.py | jealuna/NReinas | 0 | 12771389 | import pytest
import reinas.queens as queens
import numpy as np
import models.consultas as consultas
def test_numero_reinas(numero):
n = int(numero)
lista_soluciones = []
session = consultas.loadSession()
tablero = np.zeros(shape=(n,n),dtype=int)
queens.n_reinas(tablero,0,lista_soluciones)
num_soluciones = consultas.num_soluciones(n, session)
assert len(lista_soluciones) == num_soluciones | 2.296875 | 2 |
knownly/console/tests/test_models.py | dwightgunning/knownly | 2 | 12771390 | <gh_stars>1-10
from django.test import TestCase
from knownly.console.models import DropboxUser
class DropboxUserTests(TestCase):
fixtures = ['test_fixtures.json']
def test_has_activated_website_true(self):
user = DropboxUser.objects.get(
django_user__email="<EMAIL>")
self.assertTrue(user.date_activated)
def test_has_activated_website_false(self):
user = DropboxUser.objects.get(
django_user__email="<EMAIL>")
self.assertIsNone(user.date_activated)
| 2.5 | 2 |
apps/article/urls.py | moogoo78/portal20 | 7 | 12771391 | from django.urls import path, re_path
from . import views
urlpatterns = [
path('category/<str:category>/', views.article_list, name='article-list'),
path('tag/<str:tag_name>/', views.article_tag_list, name='article-tag-list'),
path('<int:pk>/', views.article_detail, name='article-detail-id'),
#path('<int:pk>/<slug:slug>/', views.article_detail, name='article-detail-slug'),
]
| 1.890625 | 2 |
tests/test_graphic_coloring_engine.py | filosfino/graphic-coloring-engine | 0 | 12771392 | import logging
from shapely.geometry.multipolygon import MultiPolygon
from shapely.geometry.polygon import Polygon
from snapshottest import TestCase
from graphic_coloring_engine.core import (
Color,
ColorChoice,
ColoringEngine,
ColoringEngineConstants,
Coordinate,
DominantColor,
Layer,
Layout,
)
logger = logging.getLogger(__name__)
class TestLayout(TestCase):
seed = 42
constants = ColoringEngineConstants()
default_allowed_color_set = set(
[ColorChoice(rgb_string="#000"), ColorChoice(rgb_string="#fff")]
)
bg_coord = Coordinate(
xmin=0,
xmax=100,
ymin=0,
ymax=200,
)
img_coord = Coordinate(
xmin=0,
xmax=30,
ymin=0,
ymax=30,
)
# 左上
text_coord = Coordinate(
xmin=0,
xmax=50,
ymin=0,
ymax=50,
)
# 右下
text_coord_2 = Coordinate(
xmin=50,
xmax=100,
ymin=150,
ymax=200,
)
def build_layout(self):
return Layout(
width=100,
height=200,
layers=[
# 背景
Layer(
order=3,
bbox_coordinate=self.bg_coord,
dominant_colors=[DominantColor(rgb_string="#1f1f1f", ratio=0.8)],
type="image",
),
# 图片
Layer(
order=2,
bbox_coordinate=self.img_coord,
dominant_colors=[DominantColor(rgb_string="#88F", ratio=0.8)],
type="image",
),
# 文字
Layer(
order=1,
bbox_coordinate=self.text_coord,
polygon=MultiPolygon(
[
Polygon(
[
(10, 20),
(40, 20),
(40, 30),
(10, 30),
]
)
]
),
color_mutable=True,
type="text",
),
# 文字
Layer(
order=0,
bbox_coordinate=self.text_coord_2,
color_mutable=True,
type="text",
),
],
)
def test_coloring_engine_init(self):
# 没有设置额外的约束
layout = self.build_layout()
engine = ColoringEngine(layout=layout, seed=self.seed, constants=self.constants)
assert engine.get_layer_color_filters(0) is None
assert engine.get_layer_color_constraint(0) is None
color_schemes = engine.colorize()
assert len(color_schemes) == 0
def test_coloring_engine_init_with_extra_color(self):
layout = self.build_layout()
engine = ColoringEngine(
layout=layout,
seed=self.seed,
constants=self.constants,
extra_usable_colors=self.default_allowed_color_set,
)
assert engine.get_layer_color_filters(0) is None
assert engine.get_layer_color_constraint(0) is None
color_schemes = engine.colorize()
assert len(color_schemes) > 0
# 预期文字颜色与相交的元素都有足够对比度
for color_scheme in color_schemes:
for layer_order in [
layer.order for layer in layout.layers if layer.type == "text"
]:
new_text_color = color_scheme[layer_order]
for bg_order in layout.layer_collision_map[layer_order]:
bg_layer = layout.layer_map[bg_order]
assert (
bg_layer.color.contrast(new_text_color)
> engine.constants.文字与背景的最小对比度
)
def test_coloring_engine_init_with_filter(self):
layout = self.build_layout()
engine = ColoringEngine(
layout=layout,
seed=self.seed,
constants=self.constants,
extra_usable_colors=self.default_allowed_color_set,
layer_color_filter_map={1: [lambda color, layout: False]},
)
assert engine.get_layer_color_filters(1) is not None
color_schemes = engine.colorize()
assert len(color_schemes) == 0
def test_coloring_engine_init_with_constraint(self):
layout = self.build_layout()
engine = ColoringEngine(
layout=layout,
seed=self.seed,
constants=self.constants,
extra_usable_colors=self.default_allowed_color_set,
layer_color_constraint_map={1: [lambda color, layout: False]},
)
assert engine.get_layer_color_constraint(1) is not None
color_schemes = engine.colorize()
assert len(color_schemes) == 0
def test_coloring_engine_init_with_constraint_ctx(self):
layout = self.build_layout()
other_node_colorized = set()
def build_flag_constraint(layer_order: int):
def flag_constraint(color: Color, coloring_engine: ColoringEngine):
nonlocal other_node_colorized
{
other_node_colorized.add(bool(layer.color))
for layer in coloring_engine.layout.layers
if layer.order != layer_order
}
return True
return flag_constraint
engine = ColoringEngine(
layout=layout,
seed=self.seed,
constants=self.constants,
extra_usable_colors=self.default_allowed_color_set,
layer_color_constraint_map={
1: [build_flag_constraint(1)],
2: [build_flag_constraint(0)],
},
)
color_schemes = engine.colorize()
assert len(color_schemes) != 0
assert True in other_node_colorized
assert False in other_node_colorized
def test_coloring_engine_init_with_global_constraint(self):
layout = self.build_layout()
engine = ColoringEngine(
layout=layout,
seed=self.seed,
constants=self.constants,
extra_usable_colors=self.default_allowed_color_set,
global_color_constraint=[
lambda layout: Color(rgb_string="#000") == layout.layers[2].color
],
)
color_schemes = engine.colorize()
assert len(color_schemes) != 0
| 2.25 | 2 |
program/object-detection-tf-py/tensorRT_hooks.py | G4V/ck-tensorflow | 108 | 12771393 | '''
hooks for using tensorRT with the object detection program.
names and parameters are defined as required by the detect.py infrastructure.
'''
import tensorflow as tf
import tensorflow.contrib.tensorrt as trt
def load_graph_tensorrt(params):
graph_def = tf.compat.v1.GraphDef()
with tf.compat.v1.gfile.GFile(params["FROZEN_GRAPH"], 'rb') as f:
graph_def.ParseFromString(f.read())
tf.import_graph_def(graph_def, name='')
trt_graph = trt.create_inference_graph(
input_graph_def=graph_def,
outputs=['detection_boxes:0','detection_scores:0','detection_classes:0','num_detections:0'],
max_batch_size=params["BATCH_SIZE"],
max_workspace_size_bytes=4000000000,
is_dynamic_op=True if params["TENSORRT_DYNAMIC"]==1 else False,
precision_mode=params["TENSORRT_PRECISION"]
)
tf.import_graph_def(
trt_graph,
return_elements=['detection_boxes:0','detection_scores:0','detection_classes:0','num_detections:0'])
##no more needed
def convert_from_tensorrt(tmp_output_dict ):
return tmp_output_dict
### names of tensors are different from normal TF names, but can be retrieved and a dict with the same shape of the original one can be formed, thus avoiding the conversion after the postprocessing.
# note that for the tf session, the names are enough and there is no real need to get the tensors.
def get_handles_to_tensors_RT():
graph = tf.get_default_graph()
tensor_dict = {}
tensor_dict['num_detections'] = graph.get_tensor_by_name('import/num_detections:0')
tensor_dict['detection_classes']=graph.get_tensor_by_name( 'import/detection_classes:0')
tensor_dict['detection_boxes'] = graph.get_tensor_by_name('import/detection_boxes:0')
tensor_dict['detection_scores'] = graph.get_tensor_by_name('import/detection_scores:0')
image_tensor =graph.get_tensor_by_name('import/image_tensor:0')
return tensor_dict, image_tensor
| 2.515625 | 3 |
tasks/task_11.2_triangle/test_gerone.py | ivanguy/python-course-tasks | 0 | 12771394 | <filename>tasks/task_11.2_triangle/test_gerone.py
#!/usr/bin/env python
from unittest.mock import patch
import unittest
import gerone
mocked_input = """0 0 0 3 4 0""".split()
@patch(target='builtins.input', side_effect=mocked_input)
class TestInput(unittest.TestCase):
def setUp(self):
super().setUp()
def test_valid_input(self, mock):
self.assertEqual(input(), '0')
def test_something(*args):
pass
class TestTriangleClass(unittest.TestCase):
valid_points = [
(0, 0),
(3, 0),
(0, 4)]
points_on_same_line = [
(0, 0),
(0, 5),
(0, 6)]
def test_invalid_points_init(self):
with self.assertRaises(gerone.TriangleError):
gerone.Triangle(self.points_on_same_line)
def test_valid_points_init(self):
gerone.Triangle(self.valid_points)
if __name__ == '__main__':
unittest.main()
doctest.testmod(gerone)
| 3.046875 | 3 |
tests/run_tests.py | alercebroker/lc_correction | 0 | 12771395 | import unittest
import os
import sys
FILE_PATH = os.path.dirname(os.path.abspath(__file__))
sys.path.append(FILE_PATH)
from ZTF_correction import *
if __name__ == "__main__":
unittest.main()
| 1.710938 | 2 |
despike.py | ColinBrosseau/DataManipulation | 0 | 12771396 | <reponame>ColinBrosseau/DataManipulation
import numpy as np
def despike_background(y, length, threshold=3.5, order=6, verbose=False):
"""
Despike the vector y using background estimation.
Inputs:
y
(vector)
Evenly spaced vector
length
(int>0)
Minimum size of real peaks
threshold
(float>0)
Threshold for detecting potential bad peaks (units of noise)
order
(int>=0)
Order of the polynomial to use as background model
verbose
(bool)
Print additional informations
"""
noise, INear, IFar, yBack = calculateNoise(y, threshold=threshold, order=order, cost_function='stq', verbose=verbose)
I1 = IFar[0]
loop_over_peaks = True
i = 0
while loop_over_peaks:
while True: # search the last point in this group
i += 1
if i > len(IFar)-1: # we reached the end of the vector
i = len(IFar)
break
if IFar[i] > IFar[i-1] + 1: # the next point is not adjacent (it is in another group)
break
I2 = IFar[i-1]
if I2 - I1 < length: # only keep small enough groups
# index of points at left to fit bad points
Ibegin = np.where(np.logical_and(INear < I1, INear > I1 - length/2))
# index of points at right to fit bad points
Iend = np.where(np.logical_and(INear > I2, INear < I2 + length/2))
Ifit = np.concatenate((INear[Ibegin], INear[Iend])) # index of the points used for fitting
# points for fitting
x_fit = np.array(Ifit)
y_fit = np.array(y[Ifit])
# order of the polynomial for fitting
order = 2
if len(x_fit) == 1: # it not enough points, use lower order polynomials
order = 0
elif len(x_fit) == 2:
order = 1
# polynomial fitting
poly = np.poly1d(np.polyfit(x_fit, y_fit, order))
x_new = np.arange(I1, I2+1) # position of the points to replace
y[x_new] = poly(x_new) # replace the bad points with fitted ones
if i > len(IFar)-1:
break
I1 = IFar[i]
def calculateNoise(y, threshold=1, order=6, cost_function='atq', verbose=False):
"""
Calculate the noise, the background, points close and far for vector y.
We suppose that y is for evenly spaced position y(x).
Inputs:
y
(vector)
Evenly spaced vector
threshold
(float>0)
Threshold for Far points (units of noise)
order
(int>=0)
Order of the polynomial to use as background model
cost_function
'sh', 'ah', 'stq', 'atq'
Cost function for background calculation. See backcor.backcor
verbose
(bool)
Print additional informations
Outputs:
noise
(float)
Estimation of the noise of y. Using median absolute deviation (MAD) of y-yBack.
INear
(vector, int)
Index of points near from yBack
IFar
(vector, int)
Index of points far from yBAck
yBack
(vector)
Background of y (polynomial)
"""
from backcor import backcor
# Calculate the noise (first estimation)
MAD = np.median(np.abs(y - np.median(y)))
if verbose:
print("MAD (first estimation)")
print(MAD)
# Calculate the background
yBack, poly, it = backcor(np.arange(len(y)), y, order, MAD, fct=cost_function)
if verbose:
pl.plot(y)
pl.plot(yBack)
pl.show()
deviation = y - yBack
# Calculate the noise
noise = np.median(np.abs(deviation - np.median(deviation)))
if verbose:
print("MAD")
print(noise)
if verbose:
print("threshold level:")
print(threshold*noise)
# detect the peaks far from the background
INear = np.where(np.logical_and(deviation>=-threshold*noise, deviation<=threshold*noise))[0]
IFar = np.where(np.logical_or(deviation<-threshold*noise, deviation>threshold*noise))[0]
return noise, INear, IFar, yBack
| 3 | 3 |
name_generator/generator.py | cookyt/name-generator | 1 | 12771397 | <reponame>cookyt/name-generator
#! /usr/bin/env python2
import os
import random
from collections import defaultdict
def GetNames(fname):
with open(fname, 'r') as fin:
names = [name.strip().lower() for name in fin.readlines()]
return names
class WordGenerator(object):
''' Generates words using statistical properties of the given list of seed
words.
'''
def __init__(self, words):
self.positional_freqs = self._CalcCharFreqs(words)
def GenerateWord(self):
''' Generates a word by walking a markov chain where priors are taken from
the current character index in the word and the last generated character,
and the next character is chosen probabilistically using the frequencies of
that that character comes after the current character in the current
position.
'''
cur_pos = 0
cur_char = self._ChooseNext(self.positional_freqs[0]['^'])
chars = []
while cur_char != '$':
chars.append(cur_char)
cur_pos += 1
cur_char = self._ChooseNext(self.positional_freqs[cur_pos][cur_char])
return ''.join(chars)
def _CalcCharFreqs(self, words):
# map of positional index in word
# -> map of current character
# -> map of next possible character
# -> count of times next possible character appeared in list of words
positional_counts = defaultdict(
lambda: defaultdict(lambda: defaultdict(float)))
# TODO the special characters '^' and '$' denote the beginning and end of a
# word respectively. This is done so that I don't have to keep track of
# frequency distributions for the first character and the last character in
# a word seperately. That said, this isn't a great solution because it
# creates forbidden characters in the words.
for word in words:
character_pairs = zip('^' + word, word + '$')
for pos, (char, char_after) in enumerate(character_pairs):
positional_counts[pos][char][char_after] += 1
# Normalize character counts so that the sum of all after-character
# frequencies for a given position and current-character is equal to 1.
for character_counts in positional_counts.itervalues():
for next_char_freqs in character_counts.itervalues():
norm_factor = sum(next_char_freqs.itervalues())
for char, freq in next_char_freqs.iteritems():
next_char_freqs[char] = float(freq) / norm_factor
return positional_counts
def _ChooseNext(self, freqs):
probability = random.random()
cum_probability = 0
for next_char, freq in freqs.iteritems():
cum_probability += freq
if probability <= cum_probability:
return next_char
# This should never happen theoretically because the sum of all frequencies
# should be one, but rounding errors might cause this to occur.
return next_char
def main():
name_file = 'instance/data/first-names_en-US.txt'
first_names = GetNames(name_file)
first_name_generator = WordGenerator(first_names)
rand_names = [first_name_generator.GenerateWord() for i in range(100)]
common_names = set(n for n in rand_names) & set(n for n in first_names)
print "Seeded with %d real names" % len(first_names)
print "Generated %d names" % len(rand_names)
real_name_ratio = float(len(common_names)) / len(rand_names)
print ("%d generated names are real (%g%%)" %
(len(common_names), real_name_ratio))
print "Real names which were also generated: %s" % common_names
print "Generated names: %s" % rand_names
if __name__ == '__main__':
main()
| 3.71875 | 4 |
rldb/db/paper__gorila_dqn/algo__dqn/entries.py | seungjaeryanlee/sotarl | 45 | 12771398 | entries = [
{
'env-title': 'atari-alien',
'env-variant': 'Human start',
'score': 570.20,
},
{
'env-title': 'atari-amidar',
'env-variant': 'Human start',
'score': 133.40,
},
{
'env-title': 'atari-assault',
'env-variant': 'Human start',
'score': 3332.30,
},
{
'env-title': 'atari-asterix',
'env-variant': 'Human start',
'score': 124.50,
},
{
'env-title': 'atari-asteroids',
'env-variant': 'Human start',
'score': 697.10,
},
{
'env-title': 'atari-atlantis',
'env-variant': 'Human start',
'score': 76108.00,
},
{
'env-title': 'atari-bank-heist',
'env-variant': 'Human start',
'score': 176.30,
},
{
'env-title': 'atari-battle-zone',
'env-variant': 'Human start',
'score': 17560.00,
},
{
'env-title': 'atari-beam-rider',
'env-variant': 'Human start',
'score': 8672.40,
},
{
'env-title': 'atari-bowling',
'env-variant': 'Human start',
'score': 41.20,
},
{
'env-title': 'atari-boxing',
'env-variant': 'Human start',
'score': 25.80,
},
{
'env-title': 'atari-breakout',
'env-variant': 'Human start',
'score': 303.90,
},
{
'env-title': 'atari-centipede',
'env-variant': 'Human start',
'score': 3773.10,
},
{
'env-title': 'atari-chopper-command',
'env-variant': 'Human start',
'score': 3046.00,
},
{
'env-title': 'atari-crazy-climber',
'env-variant': 'Human start',
'score': 50992.00,
},
{
'env-title': 'atari-demon-attack',
'env-variant': 'Human start',
'score': 12835.20,
},
{
'env-title': 'atari-double-dunk',
'env-variant': 'Human start',
'score': -21.60,
},
{
'env-title': 'atari-enduro',
'env-variant': 'Human start',
'score': 475.60,
},
{
'env-title': 'atari-fishing-derby',
'env-variant': 'Human start',
'score': -2.30,
},
{
'env-title': 'atari-freeway',
'env-variant': 'Human start',
'score': 25.80,
},
{
'env-title': 'atari-frostbite',
'env-variant': 'Human start',
'score': 157.40,
},
{
'env-title': 'atari-gopher',
'env-variant': 'Human start',
'score': 2731.80,
},
{
'env-title': 'atari-gravitar',
'env-variant': 'Human start',
'score': 216.50,
},
{
'env-title': 'atari-hero',
'env-variant': 'Human start',
'score': 12952.50,
},
{
'env-title': 'atari-ice-hockey',
'env-variant': 'Human start',
'score': -3.80,
},
{
'env-title': 'atari-jamesbond',
'env-variant': 'Human start',
'score': 348.50,
},
{
'env-title': 'atari-kangaroo',
'env-variant': 'Human start',
'score': 2696.00,
},
{
'env-title': 'atari-krull',
'env-variant': 'Human start',
'score': 3864.00,
},
{
'env-title': 'atari-kung-fu-master',
'env-variant': 'Human start',
'score': 11875.00,
},
{
'env-title': 'atari-montezuma-revenge',
'env-variant': 'Human start',
'score': 50.00,
},
{
'env-title': 'atari-ms-pacman',
'env-variant': 'Human start',
'score': 763.50,
},
{
'env-title': 'atari-name-this-game',
'env-variant': 'Human start',
'score': 5439.90,
},
{
'env-title': 'atari-pong',
'env-variant': 'Human start',
'score': 16.20,
},
{
'env-title': 'atari-private-eye',
'env-variant': 'Human start',
'score': 298.20,
},
{
'env-title': 'atari-qbert',
'env-variant': 'Human start',
'score': 4589.80,
},
{
'env-title': 'atari-riverraid',
'env-variant': 'Human start',
'score': 4065.30,
},
{
'env-title': 'atari-road-runner',
'env-variant': 'Human start',
'score': 9264.00,
},
{
'env-title': 'atari-robotank',
'env-variant': 'Human start',
'score': 58.50,
},
{
'env-title': 'atari-seaquest',
'env-variant': 'Human start',
'score': 2793.90,
},
{
'env-title': 'atari-space-invaders',
'env-variant': 'Human start',
'score': 1449.70,
},
{
'env-title': 'atari-star-gunner',
'env-variant': 'Human start',
'score': 34081.00,
},
{
'env-title': 'atari-tennis',
'env-variant': 'Human start',
'score': -2.30,
},
{
'env-title': 'atari-time-pilot',
'env-variant': 'Human start',
'score': 5640.00,
},
{
'env-title': 'atari-tutankham',
'env-variant': 'Human start',
'score': 32.40,
},
{
'env-title': 'atari-up-n-down',
'env-variant': 'Human start',
'score': 3311.30,
},
{
'env-title': 'atari-venture',
'env-variant': 'Human start',
'score': 54.00,
},
{
'env-title': 'atari-video-pinball',
'env-variant': 'Human start',
'score': 20228.10,
},
{
'env-title': 'atari-wizard-of-wor',
'env-variant': 'Human start',
'score': 246.00,
},
{
'env-title': 'atari-zaxxon',
'env-variant': 'Human start',
'score': 831.00,
},
]
| 1.515625 | 2 |
experiments/distributed.py | DeNeutoy/flowseq | 256 | 12771399 | <reponame>DeNeutoy/flowseq
import sys
import os
current_path = os.path.dirname(os.path.realpath(__file__))
root_path = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(root_path)
import json
import signal
import threading
import torch
from flownmt.data import NMTDataSet
import experiments.options as options
from experiments.nmt import main as single_process_main
def create_dataset(args):
model_path = args.model_path
if not os.path.exists(model_path):
os.makedirs(model_path)
result_path = os.path.join(model_path, 'translations')
if not os.path.exists(result_path):
os.makedirs(result_path)
vocab_path = os.path.join(model_path, 'vocab')
if not os.path.exists(vocab_path):
os.makedirs(vocab_path)
data_path = args.data_path
src_lang = args.src
tgt_lang = args.tgt
src_vocab_path = os.path.join(vocab_path, '{}.vocab'.format(src_lang))
tgt_vocab_path = os.path.join(vocab_path, '{}.vocab'.format(tgt_lang))
params = json.load(open(args.config, 'r'))
src_max_vocab = params['{}_vocab_size'.format(src_lang)]
tgt_max_vocab = params['{}_vocab_size'.format(tgt_lang)]
NMTDataSet(data_path, src_lang, tgt_lang, src_vocab_path, tgt_vocab_path, src_max_vocab, tgt_max_vocab,
subword=args.subword, create_vocab=True)
def main():
args = options.parse_distributed_args()
args_dict = vars(args)
nproc_per_node = args_dict.pop('nproc_per_node')
nnodes = args_dict.pop('nnodes')
node_rank = args_dict.pop('node_rank')
# world size in terms of number of processes
dist_world_size = nproc_per_node * nnodes
# set PyTorch distributed related environmental variables
current_env = os.environ
current_env["MASTER_ADDR"] = args_dict.pop('master_addr')
current_env["MASTER_PORT"] = str(args_dict.pop('master_port'))
current_env["WORLD_SIZE"] = str(dist_world_size)
create_vocab = args_dict.pop('create_vocab')
if create_vocab:
create_dataset(args)
args.create_vocab = False
batch_size = args.batch_size // dist_world_size
args.batch_size = batch_size
mp = torch.multiprocessing.get_context('spawn')
# Create a thread to listen for errors in the child processes.
error_queue = mp.SimpleQueue()
error_handler = ErrorHandler(error_queue)
processes = []
for local_rank in range(0, nproc_per_node):
# each process's rank
dist_rank = nproc_per_node * node_rank + local_rank
args.rank = dist_rank
args.local_rank = local_rank
process = mp.Process(target=run, args=(args, error_queue, ), daemon=True)
process.start()
error_handler.add_child(process.pid)
processes.append(process)
for process in processes:
process.join()
def run(args, error_queue):
try:
single_process_main(args)
except KeyboardInterrupt:
pass # killed by parent, do nothing
except Exception:
# propagate exception to parent process, keeping original traceback
import traceback
error_queue.put((args.rank, traceback.format_exc()))
class ErrorHandler(object):
"""A class that listens for exceptions in children processes and propagates
the tracebacks to the parent process."""
def __init__(self, error_queue):
self.error_queue = error_queue
self.children_pids = []
self.error_thread = threading.Thread(target=self.error_listener, daemon=True)
self.error_thread.start()
signal.signal(signal.SIGUSR1, self.signal_handler)
def add_child(self, pid):
self.children_pids.append(pid)
def error_listener(self):
(rank, original_trace) = self.error_queue.get()
self.error_queue.put((rank, original_trace))
os.kill(os.getpid(), signal.SIGUSR1)
def signal_handler(self, signalnum, stackframe):
for pid in self.children_pids:
os.kill(pid, signal.SIGINT) # kill children processes
(rank, original_trace) = self.error_queue.get()
msg = "\n\n-- Tracebacks above this line can probably be ignored --\n\n"
msg += original_trace
raise Exception(msg)
if __name__ == "__main__":
main()
| 1.984375 | 2 |
config.py | superiorkid/todo | 0 | 12771400 | import os
class Config(object):
SECRET_KEY = os.environ.get('SECRET_KEY') or os.urandom(32)
SQLALCHEMY_DATABASE_URI = os.environ.get('DATABASE_URL') or 'postgresql://superiorkid:root@localhost/todo'
SQLALCHEMY_TRACK_MODIFICATIONS = False | 1.882813 | 2 |
examples/insertion_sort.py | kelsheyk/coverageRanker | 25 | 12771401 | from pyllist import dllist
#Classical implementation, requires manipulations with indexes
def ins_sort(array):
for i in range(1, len(array)):
for k in range(i, 0, -1):
if array[k] < array[k - 1]:
array[k], array[k - 1] = array[k - 1], array[k]
return array
#Linked-list implementation, demonstrates iteration starting from a given node
def ins_sort_llist(data):
for card in data.first.next.iternext(): # Start iterating from the second!
for left_card in card.iterprev():
if left_card.prev is not None and left_card.value < left_card.prev.value:
left_card.value, left_card.prev.value = left_card.prev.value, left_card.value
return data
#Linked-list implementation, demonstrates other types of iteration
#and moves nodes instead of their values, which isn't really efficient
def ins_sort_llist2(data):
for card in data.first.next.iternext():
for left_card in data.iternodes(to=card):
if left_card.value > card.value:
data.remove(card)
data.insert(card, before=left_card)
break
return data
data = [6, 5, 32, 8, 234, 5, 1, 9, 0, 33]
print(ins_sort(data))
data_llist = dllist([6, 5, 32, 8, 234, 5, 1, 9, 0, 33])
print(ins_sort_llist(data_llist))
data_llist = dllist([6, 5, 32, 8, 234, 5, 1, 9, 0, 33])
print(ins_sort_llist2(data_llist))
| 3.90625 | 4 |
tests/test_cuckoofilter/generate_testfile.py | crownstone/crownstone-lib-python-core | 0 | 12771402 | """
Script for generating a .csv.cuck file with a few parameters.
"""
import sys, os, random
def getPathFromFileName(fname):
if not fname.find(os.path.pathsep):
fname = os.path.join(os.path.dirname(__file__), fname)
return fname
if __name__ == "__main__":
### arg parsing
if len(sys.argv) < 5 + 1:
print("This script will generate a .csv.cuck file that describes a cuckoo filter with the given amount of added keys of random length between a minimum and a maximum")
print("Usage: python3 generate_testfile.py cuckootestfile 50 4 100 6 20")
print("arg 0: outputfilename (.csv.cuck will be appended")
print("arg 1: cuckoo num buckets log 2")
print("arg 2: cuckoo num items per bucket")
print("arg 3: number of items to generate")
print("arg 4: min length of an item")
print("arg 5: max length of an item")
quit()
out_fname = sys.argv[0 + 1] + ".csv.cuck"
num_bucks = int(sys.argv[1 + 1])
num_nests = int(sys.argv[2 + 1])
num_items = int(sys.argv[3 + 1])
min_len = int(sys.argv[4 + 1])
max_len = int(sys.argv[5 + 1])
F_out = open(getPathFromFileName(out_fname), "w+")
print(f"# testparameters num_items:{num_items}, min_len:{min_len}, max_len:{max_len}", file=F_out)
print(",".join(["cuckoofilter"]+[f"{num_bucks:#0{4}x}",f"{num_nests:#0{4}x}"]), file=F_out)
for i in range(num_items):
print(",".join(
["add"]+
[f"{random.getrandbits(8):#0{4}x}" for i in range(random.randint(min_len,max_len))] ),
file=F_out)
| 3.453125 | 3 |
arclines/misc/low_redux.py | pypeit/arclines | 2 | 12771403 | <reponame>pypeit/arclines<filename>arclines/misc/low_redux.py
""" Generate hdf5 files from LowRedux save files
"""
from __future__ import print_function, absolute_import, division, unicode_literals
import os
import numpy as np
import pdb
import h5py
from scipy.io.idl import readsav
from astropy.table import Table
from astropy import units as u
import arclines
out_path = arclines.__path__[0]+'/data/test_arcs/'
try:
basestring
except NameError: # For Python 3
basestring = str
def fcheby(xnrm,order):
leg = np.zeros((len(xnrm),order))
leg[:,0] = 1.
if order >= 2:
leg[:,1] = xnrm
# For loop
for j in range(2,order):
leg[:,j] = 2.0 * xnrm * leg[:,j-1] - leg[:,j-2]
# Return
return leg
def cheby_val(coeff, x, nrm, order):
#
xnrm = 2. * (x - nrm[0])/nrm[1]
# Matrix first
leg = fcheby(xnrm, order)
# Dot
return np.dot(leg, coeff)
def poly_val(coeff, x, nrm):
#
xnrm = 2. * (x - nrm[0])/nrm[1]
#
n = len(coeff)-1
y = coeff[n]
#for i=n-1,0,-1 do y = TEMPORARY(y) * x + c[i]
for ii in range(n-1,-1,-1):
y = y*xnrm + coeff[ii]
return y
def generate_hdf(sav_file, instr, lamps, outfil, dtoler=0.6):
""" Given an input LR IDL save file, generate an hdf5
IDs arc lines too
Parameters
----------
sav_file : str
Root name of the IDL save file from LowRedux, e.g. lris_blue_600.sav
lamps
outfil
Returns
-------
"""
from pypit import pyputils
msgs = pyputils.get_dummy_logger()
from pypit import arwave
from pypit import arutils
arutils.dummy_settings()
#
from arclines.pypit_utils import find_peaks
from arclines.io import load_line_lists
#
# Read IDL save file
sav_file = os.getenv('LONGSLIT_DIR')+'calib/linelists/'+sav_file
s = readsav(sav_file)
ctbl = Table(s['calib']) # For writing later
# Line list
alist = load_line_lists(lamps)
# One spectrum?
ashape = s['archive_arc'].shape
if len(ashape) == 1:
nspec = 1
npix = ashape[0]
else:
nspec = s['archive_arc'].shape[0]
npix = ashape[1]
# Meta data
mdict = dict(npix=npix, instr=instr,
lamps=[str(ilamp) for ilamp in lamps], # For writing to hdf5
nspec=nspec, infil=sav_file, IDairvac='vac')
print("Processing {:d} spectra in {:s}".format(mdict['nspec'], sav_file))
# Start output
outh5 = h5py.File(out_path+outfil, 'w')
outh5.create_group('arcs')
# Loop on spectra
for ss in range(mdict['nspec']):
sss = str(ss)
# Parse
if nspec == 1:
spec = s['archive_arc']
else:
spec = s['archive_arc'][ss]
calib = s['calib'][ss]
# Peaks
tampl, tcent, twid, w, yprep = find_peaks(spec)
pixpk = tcent[w]
pixampl = tampl[w]
# Wavelength solution
try:
cfunc = calib['func'].decode('UTF-8')
except:
cfunc = calib['func']
if cfunc == 'CHEBY':
wv_air = cheby_val(calib['ffit'], np.arange(mdict['npix']),
calib['nrm'], calib['nord'])
elif cfunc == 'POLY':
wv_air = poly_val(calib['ffit'], np.arange(mdict['npix']),
calib['nrm'])
else:
pdb.set_trace()
raise ValueError("Bad calib")
# Check blue->red or vice-versa
if ss == 0:
if wv_air[0] > wv_air[-1]:
mdict['bluered'] = False
else:
mdict['bluered'] = True
# Peak waves
if calib['func'] == 'CHEBY':
twave_air = cheby_val(calib['ffit'], pixpk,
calib['nrm'], calib['nord'])
else:
twave_air = poly_val(calib['ffit'], pixpk, calib['nrm'])
# Air to Vac
twave_vac = arwave.airtovac(twave_air*u.AA)
wave_vac = arwave.airtovac(wv_air*u.AA)
if ss == 0:
disp = np.median(np.abs(wave_vac-np.roll(wave_vac,1)))
print("Average dispersion = {:g}".format(disp))
# IDs
idwv = np.zeros_like(pixpk)
idsion = np.array([str('12345')]*len(pixpk))
for kk,twv in enumerate(twave_vac.value):
# diff
diff = np.abs(twv-alist['wave'])
if np.min(diff) < dtoler:
imin = np.argmin(diff)
idwv[kk] = alist['wave'][imin]
#idsion[kk] = alist['Ion'][imin] NIST
idsion[kk] = alist['ion'][imin]
# Red to blue?
if mdict['bluered'] is False:
pixpk = mdict['npix']-1 - pixpk
# Re-sort
asrt = np.argsort(pixpk)
pixpk = pixpk[asrt]
idwv = idwv[asrt]
# Reverse
spec = spec[::-1]
wave_vac = wave_vac[::-1]
# Output
outh5['arcs'].create_group(sss)
# Datasets
outh5['arcs'][sss]['wave'] = wave_vac
outh5['arcs'][sss]['wave'].attrs['airvac'] = 'vac'
outh5['arcs'][sss]['spec'] = spec
outh5['arcs'][sss]['spec'].attrs['flux'] = 'counts'
outh5['arcs'][sss]['pixpk'] = pixpk
outh5['arcs'][sss]['ID'] = idwv
outh5['arcs'][sss]['ID'].attrs['airvac'] = 'vac'
outh5['arcs'][sss]['Ion'] = str(idsion)
# LR wavelengths
outh5['arcs'][sss]['LR_wave'] = wv_air
outh5['arcs'][sss]['LR_wave'].attrs['airvac'] = 'air'
# LR Fit
outh5['arcs'][sss].create_group('LR_fit')
for key in ctbl.keys():
outh5['arcs'][sss]['LR_fit'][key] = ctbl[ss][key]
# Meta data
outh5.create_group('meta')
for key in mdict.keys():
try:
outh5['meta'][key] = mdict[key]
except TypeError: # Probably a unicode thing
if isinstance(mdict[key], list):
if isinstance(mdict[key][0], basestring):
tmp = [bytes(item, 'utf-8') for item in mdict[key]]
else:
tmp = mdict[key]
elif isinstance(mdict[key], basestring):
tmp = str(mdict[key])
try:
outh5['meta'][key] = tmp
except TypeError:
pdb.set_trace()
# Close
outh5.close()
print('Wrote {:s}'.format(out_path+outfil))
# Command line execution
def main(flg_tst):
# LRISb 600
if (flg_tst % 2**1) >= 2**0:
generate_hdf('lris_blue_600.sav', 'LRISb_600',
['ZnI', 'CdI', 'HgI', 'NeI', 'ArI'],
'LRISb_600_LRX.hdf5')
# LRISr 600
if (flg_tst % 2**2) >= 2**1:
generate_hdf('lris_red_600_7500.sav', 'LRISr_600', ['ArI', 'HgI', 'KrI', 'NeI', 'XeI'], 'LRISr_600_7500.hdf5')
# Kastb 600
if (flg_tst % 2**3) >= 2**2:
generate_hdf('kast_600_4310.sav', 'Kastb_600', ['ArI', 'HgI', 'KrI', 'NeI', 'XeI'], 'LRISr_600_7500.hdf5')
# MMT RCS
if (flg_tst % 2**4) >= 2**3:
generate_hdf('mmt_rcs_600_6310.sav', 'MMT_RCS', ['ArI', 'NeI'], 'MMT_RCS_600_6310.hdf5')
# MODS
if (flg_tst % 2**5) >= 2**4:
generate_hdf('mods_blue_400ms.sav', 'MODSb', ['XeI', 'KrI'], 'MODS_blue_400.hdf5')
generate_hdf('mods_red_670.sav', 'MODSr', ['NeI', 'ArI'], 'MODS_red_670.hdf5')
# Test
if __name__ == '__main__':
flg_tst = 0
flg_tst += 2**0 # LRISb 600
flg_tst += 2**1 # LRISr 600
flg_tst += 2**2 # Kastb 600
flg_tst += 2**3 # MMT RCS 600_6310
flg_tst += 2**4 # MODS
main(flg_tst)
| 2.328125 | 2 |
problems/que-shi-de-shu-zi-lcof/solution.py | MleMoe/LeetCode-1 | 2 | 12771404 | <filename>problems/que-shi-de-shu-zi-lcof/solution.py
from typing import List
class Solution:
def missingNumber(self, nums: List[int]) -> int:
length = len(nums)
for i in range(length):
if i != nums[i]:
return i
return length
if __name__ == '__main__':
test_cases = [[0, 1, 3], [0, 1, 2, 3, 4, 5, 6, 7, 9], [0]]
for case in test_cases:
ans = Solution().missingNumber(case)
print(ans)
| 3.78125 | 4 |
zerver/webhooks/trello/view/templates.py | Supermanu/zulip | 1 | 12771405 | TRELLO_SUBJECT_TEMPLATE = u'{board_name}.'
TRELLO_MESSAGE_TEMPLATE = u'{full_name} {rest}.'
| 0.910156 | 1 |
discrete_fuzzy_operators/decision_making/yager_aggregation.py | mmunar97/discrete-fuzzy-operators | 0 | 12771406 | <filename>discrete_fuzzy_operators/decision_making/yager_aggregation.py<gh_stars>0
from typing import Callable, List, Tuple
def yager_aggregation_decision_making(assessments: List[List[int]], aggregation_function: Callable[[List[int]], int]) -> Tuple[int, int]:
"""
Computes the best alternative from a matrix of assessments. The matrix contains, in each row, the assessment made
in a certain alternative for all the experts.
The method computes, for each row, the value of the aggregation function; then, select the alternative which
associated value is greater.
References:
<NAME>. (1995). An approach to ordinal decision making.
International Journal of Approximate Reasoning, 12(3), 237–261.
https://doi.org/https://doi.org/10.1016/0888-613X(94)00035-2
Args:
assessments: A list of list of integers, containing in each row the assessments made of that alternative by
different experts. The number of rows of the matrix must agree with the number of alternatives,
and the number of columns with the number of experts.
aggregation_function: A callable method which receives as a parameter the list of values to be aggregated.
Since the aggregation must be an ordinal value, the output of the function must be an
integer.
Returns:
A tuple of two values: the first, representing the index of the alternative that has the best value; the second,
the value of the aggregation function that reaches the maximum.
"""
aggregation_values = [aggregation_function(alternative_assessment) for alternative_assessment in assessments]
return aggregation_values.index(max(aggregation_values)), max(aggregation_values)
| 3.828125 | 4 |
buoi7/bai3.py | Viet7501/viet1 | 0 | 12771407 | '''
Cho một list chứa nhiều phần tử mang nhiều kiểu dữ liệu khác nhau,
trong đó có một phần tử kiểu tuple. Viết chương trình đếm số lượng
các phần tử trong một list đó, đến khi gặp một phần tử kiểu tuple.
'''
list = [1, 2, 'Vietnam', (99, 'hello', 2 + 3j)]
count = 0
print(type(list[3]))
for i in range(len(list)):
if type(list[i]) is not tuple:
count += 1
print(f'Số lượng phần tử trong list đến khi gặp phần tử kiểu tuple là: {count}') | 4.03125 | 4 |
__init__.py | gszy/kicad-boardview | 116 | 12771408 | import pcbnew
import os
from .pcbnew2boardview import convert
class Pcbnew2Boardview(pcbnew.ActionPlugin):
def defaults(self):
self.name = "Pcbnew to Boardview"
self.category = "Read PCB"
self.description = "Generate Boardview file from KiCad pcb."
def Run(self):
kicad_pcb = pcbnew.GetBoard()
with open(kicad_pcb.GetFileName().replace('.kicad_pcb', '.brd'), 'wt') as brd_file:
convert(kicad_pcb, brd_file)
plugin = Pcbnew2Boardview()
plugin.register()
| 2.625 | 3 |
monitor/migrations/0004_auto_20170912_1037.py | gserv/mining_monitor | 0 | 12771409 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2017-09-12 02:37
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('monitor', '0003_auto_20170912_1018'),
]
operations = [
migrations.AddField(
model_name='exchangehistory',
name='btc_cny',
field=models.DecimalField(decimal_places=20, default=0, max_digits=30),
preserve_default=False,
),
migrations.AddField(
model_name='exchangehistory',
name='xmr_btc',
field=models.DecimalField(decimal_places=20, default=0, max_digits=30),
preserve_default=False,
),
]
| 1.664063 | 2 |
pexels_cli/pexels_cli.py | amirzenoozi/pexels-crawler-cli | 1 | 12771410 | <reponame>amirzenoozi/pexels-crawler-cli<gh_stars>1-10
__author__ = "<NAME>"
__license__ = "MIT"
__version__ = "0.1.0"
__proxy__ = False
__doc__ = """
Pexels CLI Crawler
Usage:
pexels search [--show-browser] [--load-time=<seconds>] [--page-count=<count>] <keyword>
pexels -h | --help
pexels -v | --version
------------------------------------------------------------------
Options:
--folder-name Destination Folder name, Default: downloads
--show-browser Showing Browser if You Need, Default: False
--load-time=<seconds> Infinite Scroll Time Out, Default: 5
--page-count=<count> Page Counter, Default: 0 (All Pages)
-h --help Show this screen.
-v --version Show version.
"""
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import WebDriverWait
from selenium.common.exceptions import NoSuchElementException
from selenium.webdriver.support import expected_conditions as EC
from random import randint, random
from colorama import Fore, Style
from docopt import docopt
from time import sleep
import urllib
import urllib.request as req
import urllib.parse as parse
import os
import requests
import shutil
import time
import re
import sqlite3
import json
class PexelsCrawler:
def __init__(self, **kwargs):
# Browser UserAgent
userAgent = 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.50 Safari/537.36'
# DataBase Connection Config
self.dataBaseConnection = sqlite3.connect('pexels.db')
self.folderName = kwargs.get('folderName', 'downloads')
self.showBrowser = kwargs.get('showBrowser', True)
self.ScrollTimeout = kwargs.get('ScrollTimeout', 5)
self.ScrollCounte = kwargs.get('ScrollCounte', 0)
# Create New Folder Base on Keyword Search
self.currentPath = os.path.abspath(os.getcwd())
self.downloadPath = os.path.join( self.currentPath, self.folderName)
if not os.path.exists( self.downloadPath ):
os.makedirs( self.downloadPath )
try:
self.dataBaseConnection.execute('''CREATE TABLE pexels
(id INTEGER PRIMARY KEY AUTOINCREMENT,
link_slug TEXT NOT NULL);''')
except sqlite3.Error as error:
print(error)
pass
# Selenium Driver Options
self.driverOption = webdriver.ChromeOptions()
self.driverOption.add_argument('log-level=3')
self.driverOption.add_argument(f'user-agent={userAgent}')
if( not self.showBrowser ):
self.driverOption.add_argument('headless')
self.driver = webdriver.Chrome( options = self.driverOption )
def infiniteScroll(self, timeout, counte):
scrollPauseTime = timeout
# Get scroll height
lastHeight = self.driver.execute_script("return document.body.scrollHeight")
loopIndex = 0
while ( loopIndex <= counte ):
# Scroll down to bottom
self.driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")
# Wait to load page
time.sleep( scrollPauseTime )
# Calculate new scroll height and compare with last scroll height
newHeight = self.driver.execute_script("return document.body.scrollHeight")
if newHeight == lastHeight:
# If heights are the same it will exit the function
break
lastHeight = newHeight
# Make Infinite Loop
if ( counte != 0 ):
loopIndex += 1
def saveImage( self, imageUrl, targetName ):
try:
sleep( random() * 5 )
opener = req.build_opener()
opener.addheaders = [('User-agent', 'Mozilla/5.0 (Windows NT 6.0; WOW64; rv:24.0) Gecko/20100101 Firefox/24.0')]
req.install_opener(opener)
splitedUrl = imageUrl.split("/")
splitedUrl[-1] = parse.quote( splitedUrl[-1] )
imageUrl = '/'.join(splitedUrl)
req.urlretrieve( imageUrl, targetName )
except urllib.error.HTTPError as err:
print('\n')
print('========================')
print(f'Error Code: {err.getcode()}')
print(f'Target URL: {imageUrl}' )
print('========================')
print('\n')
return False
except UnicodeEncodeError as unierror:
print( imageUrl )
print('\n')
print('========================')
print(f'Error Type: URL Parse Error')
print(f'Target URL: {imageUrl}' )
print('========================')
print('\n')
return False
except ConnectionResetError as ConnectionError:
print('\n')
print('========================')
print(f'Error Type: Connection Error')
print(f'Target URL: {imageUrl}' )
print('========================')
print('\n')
return False
return True
def getImageByTags( self, keyword ):
processedKeyword = keyword.lower().replace(' ', '%20')
folderName = keyword.lower().replace(' ', '-')
searchUrl = f'https://pexels.com/search/{processedKeyword}/'
self.driver.get( searchUrl )
self.infiniteScroll( self.ScrollTimeout, self.ScrollCounte )
# Create Download Folder if no Exist
if not os.path.exists( f'{self.downloadPath}/{folderName}' ):
os.makedirs( f'{self.downloadPath}/{folderName}' )
imagesList = self.driver.find_elements_by_css_selector('div.search__grid .photos article.photo-item a.photo-item__link > img')
for index, image in enumerate( imagesList ):
processedLinkSlug = image.find_element_by_xpath("..").get_attribute('href').split('/')[-2]
# Check Image is Processed or Not
if ( self.isImageProcessed( processedLinkSlug ) ):
cleanImageUrl = image.get_attribute('data-big-src').split('?')[0]
# Insert Item to DB After Download
if self.saveImage( cleanImageUrl, f'{self.downloadPath}/{folderName}/{folderName}-{ str(index + 1) }.jpeg' ):
self.insertItemtoDatabase( processedLinkSlug )
def isImageProcessed( self, linkSlug ):
database_record = self.dataBaseConnection.execute("""SELECT link_slug FROM pexels WHERE link_slug = (?) LIMIT 1""", (linkSlug,)).fetchone()
return not database_record
def insertItemtoDatabase( self, linkSlug ):
try:
self.dataBaseConnection.execute("""INSERT INTO pexels (link_slug) VALUES (?)""", (linkSlug,))
self.dataBaseConnection.commit()
except sqlite3.Error as error:
print(error)
pass
def closeDriver( self ):
self.dataBaseConnection.close()
self.driver.close()
self.driver.quit()
def main():
arguments = docopt(__doc__, version='v1.0')
folderName = str( arguments['--folder-name '] ) if arguments['--folder-name '] else 'downloads'
ScrollTimeout = int( arguments['--load-time'] ) if arguments['--load-time'] else 5
ScrollCounte = int( arguments['--page-count'] ) if arguments['--page-count'] else 0
showBrowser = arguments['--show-browser']
keyword = arguments['<keyword>']
Pexels = PexelsCrawler( showBrowser=showBrowser, ScrollTimeout=ScrollTimeout, ScrollCounte=ScrollCounte, folderName=folderName )
if ( arguments['search'] ):
Pexels.getImageByTags( keyword=keyword )
else:
print(Fore.RED + "You Should Enter The Search Keyword!")
print(Style.RESET_ALL)
Pexels.closeDriver()
# if __name__ == "__main__":
# main() | 2.21875 | 2 |
learning/md2hugo.py | trib0r3/scripts | 0 | 12771411 | import argparse
import datetime
DESCRIPTION = "Convert markdown to hugo post page"
HUGO_POST_FORMAT = """---
title: "{}"
date: {}
---
<!--more-->
"""
def convert(source, dest, img_dest, time):
assert isinstance(source, file)
assert isinstance(dest, file)
# create heading
title = source.readline().split("# ")[1].replace('\n', '')
if time is None:
time_now = datetime.datetime.now()
time = "{}-{}-{}".format(time_now.year, time_now.month, time_now.day)
head = HUGO_POST_FORMAT.format(title, time)
dest.write(head)
data = ""
for l in source.readlines():
line = l.replace("img/", "/img/" + img_dest)
data += line
dest.write(data)
def main():
parser = argparse.ArgumentParser(description=DESCRIPTION)
# arguments list
parser.add_argument(dest='src',
metavar="src_file",
type=argparse.FileType("r"),
help="Markdown file")
parser.add_argument(dest='dst',
metavar="dst_file",
type=argparse.FileType("w"),
help="Hugo-converted file")
parser.add_argument('-d', dest='date', metavar='DATE', type=str,
help="Post creation date in format YYYY-MM-DD", default=None)
parser.add_argument('-i', dest='img', metavar='SUBDIR', type=str,
help="Subdirectory of '/img'", default="")
args = parser.parse_args()
convert(args.src, args.dst, '{}/'.format(args.img), args.date)
if __name__ == "__main__":
main()
| 3.0625 | 3 |
tests/unit/test_regmap.py | crzdg/acconeer-python-exploration | 0 | 12771412 | import inspect
import pytest
import acconeer.exptool.structs.configbase as cb
from acconeer.exptool import configs
from acconeer.exptool.clients.reg import regmap
from acconeer.exptool.modes import Mode
BO = regmap.BYTEORDER
def test_full_names_unique():
unique_names = set([r.full_name for r in regmap.REGISTERS])
assert len(regmap.REGISTERS) == len(unique_names)
def test_get_reg_status():
reg = regmap.get_reg("status")
assert reg.full_name == "status"
assert reg == regmap.STATUS_REG
assert reg.bitset_flags == regmap.STATUS_FLAGS
assert reg.bitset_masks == regmap.STATUS_MASKS
assert regmap.get_reg(reg) == reg
assert regmap.get_reg(reg.addr) == reg
def test_get_reg():
with pytest.raises(ValueError):
regmap.get_reg("does-not-exist")
assert regmap.get_reg("iq_sampling_mode").full_name == "iq_sampling_mode"
assert regmap.get_reg("iq_sampling_mode", "iq").full_name == "iq_sampling_mode"
assert regmap.get_reg("sampling_mode", "iq").full_name == "iq_sampling_mode"
with pytest.raises(ValueError):
regmap.get_reg("iq_sampling_mode", "sparse")
with pytest.raises(ValueError):
regmap.get_reg("sampling_mode") # ambiguous
reg = regmap.get_reg("sp_start")
with pytest.raises(ValueError):
regmap.get_reg(reg.addr) # ambiguous
assert regmap.get_reg(reg.addr, reg.modes[0]) == reg
def test_config_to_reg_map_completeness():
m = regmap.CONFIG_TO_STRIPPED_REG_NAME_MAP
assert len(m) == len(set(m))
all_config_attrs = set()
for mode, config_class in configs.MODE_TO_CONFIG_CLASS_MAP.items():
attrs = [k for k, v in inspect.getmembers(config_class) if isinstance(v, cb.Parameter)]
all_config_attrs.update(attrs)
for attr in attrs:
reg_name = m[attr]
if reg_name is None:
continue
reg = regmap.get_reg(reg_name, mode)
assert reg.category in [regmap.Category.CONFIG, regmap.Category.GENERAL]
assert all_config_attrs == set(m.keys())
def test_encode_bitset():
reg = regmap.STATUS_REG
assert reg.data_type == regmap.DataType.BITSET
created = regmap.STATUS_FLAGS.CREATED
activated = regmap.STATUS_FLAGS.ACTIVATED
truth = int(activated).to_bytes(4, BO)
assert reg.encode(activated) == truth
assert reg.encode(int(activated)) == truth
assert reg.encode("activated") == truth
assert reg.encode("ACTIVATED") == truth
assert reg.encode(["activated"]) == truth
truth = int(0).to_bytes(4, BO)
assert reg.encode([]) == truth
assert reg.encode(0) == truth
truth = int(created | activated).to_bytes(4, BO)
assert reg.encode(created | activated) == truth
assert reg.encode(["created", "activated"]) == truth
def test_decode_bitset():
reg = regmap.STATUS_REG
created = regmap.STATUS_FLAGS.CREATED
activated = regmap.STATUS_FLAGS.ACTIVATED
assert reg.decode(reg.encode(created)) == created
assert reg.decode(reg.encode(created | activated)) == created | activated
def test_encode_enum():
reg = regmap.get_reg("mode_selection")
assert reg.data_type == regmap.DataType.ENUM
envelope = reg.enum.ENVELOPE
truth = int(envelope).to_bytes(4, BO)
assert reg.encode(envelope) == truth
assert reg.encode(int(envelope)) == truth
assert reg.encode("envelope") == truth
assert reg.encode("ENVELOPE") == truth
# Implicit remapping
assert reg.encode(Mode.ENVELOPE) == truth
# Explicit remapping
reg = regmap.get_reg("repetition_mode")
truth = int(reg.enum.STREAMING).to_bytes(4, BO)
assert reg.encode(configs.BaseServiceConfig.RepetitionMode.SENSOR_DRIVEN) == truth
def test_decode_enum():
reg = regmap.get_reg("mode_selection")
envelope = reg.enum.ENVELOPE
assert reg.decode(reg.encode(envelope)) == envelope
def test_encode_bool():
reg = regmap.get_reg("tx_disable")
assert reg.data_type == regmap.DataType.BOOL
assert reg.encode(False) == int(0).to_bytes(4, BO)
assert reg.encode(True) == int(1).to_bytes(4, BO)
assert reg.encode(0) == int(0).to_bytes(4, BO)
assert reg.encode(1) == int(1).to_bytes(4, BO)
assert reg.encode(123) == int(1).to_bytes(4, BO)
def test_decode_bool():
reg = regmap.get_reg("tx_disable")
assert reg.decode(reg.encode(True)) is True
def test_encode_int():
pass # tested in float
def test_decode_int():
pass
def test_encode_uint():
reg = regmap.get_reg("downsampling_factor")
assert reg.data_type == regmap.DataType.UINT32
assert reg.encode(0) == int(0).to_bytes(4, BO, signed=True)
assert reg.encode(1234) == int(1234).to_bytes(4, BO, signed=True)
with pytest.raises(ValueError):
reg.encode(-123)
def test_decode_uint():
reg = regmap.get_reg("downsampling_factor")
assert reg.decode(reg.encode(1234)) == 1234
def test_encode_float():
reg = regmap.get_reg("range_start")
assert reg.full_name == "range_start"
assert reg.float_scale == pytest.approx(1000)
assert reg.data_type == regmap.DataType.INT32
assert reg.encode(0) == int(0).to_bytes(4, BO, signed=True)
assert reg.encode(0.123) == int(123).to_bytes(4, BO, signed=True)
assert reg.encode(-0.123) == int(-123).to_bytes(4, BO, signed=True)
def test_decode_float():
reg = regmap.get_reg("range_start")
assert reg.decode(reg.encode(0.123)) == pytest.approx(0.123)
| 1.9375 | 2 |
align/detect_align.py | taan02991/WarpGAN | 0 | 12771413 | """Align face images given landmarks."""
# MIT License
#
# Copyright (c) 2017 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import sys
import os
import warnings
import argparse
import random
import cv2
from align.mtcnntf import detector
from align.matlab_cp2tform import get_similarity_transform_for_cv2
def align(src_img, src_pts, ref_pts, image_size, scale=1.0, transpose_input=False):
w, h = image_size = tuple(image_size)
# Actual offset = new center - old center (scaled)
scale_ = max(w,h) * scale
cx_ref = cy_ref = 0.
offset_x = 0.5 * w - cx_ref * scale_
offset_y = 0.5 * h - cy_ref * scale_
s = np.array(src_pts).astype(np.float32).reshape([-1,2])
r = np.array(ref_pts).astype(np.float32) * scale_ + np.array([[offset_x, offset_y]])
if transpose_input:
s = s.reshape([2,-1]).T
tfm = get_similarity_transform_for_cv2(s, r)
dst_img = cv2.warpAffine(src_img, tfm, image_size)
s_new = np.concatenate([s.reshape([2,-1]), np.ones((1, s.shape[0]))])
s_new = np.matmul(tfm, s_new)
s_new = s_new.reshape([-1]) if transpose_input else s_new.T.reshape([-1])
# tfm = tfm.reshape([-1])
return dst_img, s_new, tfm
def detect_align(image, image_size=(256,256), scale=0.7, transpose_input=False):
bboxes, landmarks = detector.detect(image)
if len(bboxes) == 0 : return None
elif len(bboxes) > 1:
img_size = image.shape[:2]
bbox_size = bboxes[:,2] * bboxes[:,3]
img_center = img_size / 2
offsets = np.vstack([ bboxes[:,0]+0.5*bboxes[:,2]-img_center[1], bboxes[:,1]+0.5*bboxes[:,3]-img_center[0] ])
offset_dist_squared = np.sum(np.power(offsets,2.0),0)
index = np.argmax(offset_dist_squared*2.0) # some extra weight on the centering
bboxes = bboxes[index][None]
landmarks = landmarks[index][None]
src_pts = landmarks[0]
ref_pts = np.array( [[ -1.58083929e-01, -3.84258929e-02],
[ 1.56533929e-01, -4.01660714e-02],
[ 2.25000000e-04, 1.40505357e-01],
[ -1.29024107e-01, 3.24691964e-01],
[ 1.31516964e-01, 3.23250893e-01]])
img_new, new_pts, tfm = align(image, src_pts, ref_pts, image_size, scale, transpose_input)
return img_new, tfm
| 2.21875 | 2 |
getprices_api.py | real-poker/Polygon-FarmTools | 0 | 12771414 | <gh_stars>0
# -*- coding: utf-8 -*-
import requests
import time
import json
import os
if os.path.isfile('tokendict.json'):
with open('tokendict.json', 'r') as f:
token_contract = json.loads(f.read())
token_ids = []
for token in token_contract:
response = requests.get('https://api.coingecko.com/api/v3/coins/polygon-pos/contract/' + token_contract[token])
token_ids.append(response.json()['id'])
prices = {x:[] for x in token_contract}
response = requests.get('https://api.coingecko.com/api/v3/coins/ball-token/history?date=12-08-2021&localization=false')
response.json()['market_data']['current_price']['usd'] | 2.921875 | 3 |
classes/Speak.py | Sunuba/roc | 23 | 12771415 | import pyttsx3
class Speak:
def __init__(self):
self.speaker = pyttsx3.init(driverName='sapi5')
# self.speaker.setProperty('voice', 'HKEY_LOCAL_MACHINE\SOFTWARE\Microsoft\Speech\Voices\Tokens\TTS_MS_EN-US_ZIRA_11.0')
def speak(self, text):
pass
self.speaker.say(text)
self.speaker.runAndWait()
| 2.390625 | 2 |
backend/banner/urls/oj.py | skku-npc/SKKU_Coding_Platform | 1 | 12771416 | <gh_stars>1-10
from django.urls import path
from ..views.oj import BannerAPI
urlpatterns = [
path("banner/", BannerAPI.as_view(), name="banner_api")
]
| 1.492188 | 1 |
src/estimate_start_times/estimator.py | AutomatedProcessImprovement/start-time-estimator | 0 | 12771417 | <filename>src/estimate_start_times/estimator.py
import math
from datetime import timedelta
from statistics import mode
import numpy as np
import pandas as pd
from estimate_start_times.concurrency_oracle import NoConcurrencyOracle, AlphaConcurrencyOracle, \
HeuristicsConcurrencyOracle, DeactivatedConcurrencyOracle
from estimate_start_times.config import ConcurrencyOracleType, ReEstimationMethod, ResourceAvailabilityType, OutlierStatistic
from estimate_start_times.resource_availability import SimpleResourceAvailability
class StartTimeEstimator:
def __init__(self, event_log, config):
# Set event log
self.event_log = event_log.copy()
# Set configuration
self.config = config
# Set log IDs to ease access within class
self.log_ids = config.log_ids
# Set concurrency oracle
if self.config.concurrency_oracle_type == ConcurrencyOracleType.DEACTIVATED:
self.concurrency_oracle = DeactivatedConcurrencyOracle(self.config)
elif self.config.concurrency_oracle_type == ConcurrencyOracleType.NONE:
self.concurrency_oracle = NoConcurrencyOracle(self.event_log, self.config)
elif self.config.concurrency_oracle_type == ConcurrencyOracleType.ALPHA:
self.concurrency_oracle = AlphaConcurrencyOracle(self.event_log, self.config)
elif self.config.concurrency_oracle_type == ConcurrencyOracleType.HEURISTICS:
self.concurrency_oracle = HeuristicsConcurrencyOracle(self.event_log, self.config)
else:
raise ValueError("No concurrency oracle defined!")
# Set resource availability
if self.config.resource_availability_type == ResourceAvailabilityType.SIMPLE:
self.resource_availability = SimpleResourceAvailability(self.event_log, self.config)
else:
raise ValueError("No resource availability defined!")
def estimate(self) -> pd.DataFrame:
# If there is no column for start timestamp, create it
if self.config.reuse_current_start_times:
self.event_log[self.log_ids.estimated_start_time] = self.event_log[self.log_ids.start_time]
else:
self.event_log[self.log_ids.estimated_start_time] = pd.NaT
# Process instant activities
self.event_log[self.log_ids.enabled_time] = np.where(
self.event_log[self.log_ids.activity].isin(self.config.instant_activities),
self.event_log[self.log_ids.end_time],
self.event_log[self.log_ids.estimated_start_time]
)
self.event_log[self.log_ids.available_time] = np.where(
self.event_log[self.log_ids.activity].isin(self.config.instant_activities),
self.event_log[self.log_ids.end_time],
self.event_log[self.log_ids.estimated_start_time]
)
self.event_log[self.log_ids.estimated_start_time] = np.where(
self.event_log[self.log_ids.activity].isin(self.config.instant_activities),
self.event_log[self.log_ids.end_time],
self.event_log[self.log_ids.estimated_start_time]
)
# Assign start timestamps
for (key, trace) in self.event_log.groupby([self.log_ids.case]):
indexes, enabled_times, available_times = [], [], []
for index, event in trace[pd.isnull(trace[self.log_ids.estimated_start_time])].iterrows():
indexes += [index]
enabled_times += [self.concurrency_oracle.enabled_since(trace, event)]
available_times += [
self.resource_availability.available_since(event[self.log_ids.resource], event)
]
if len(indexes) > 0:
self.event_log.loc[indexes, self.log_ids.enabled_time] = enabled_times
self.event_log.loc[indexes, self.log_ids.available_time] = available_times
self.event_log.loc[indexes, self.log_ids.estimated_start_time] = [
max(times) for times in zip(enabled_times, available_times)
]
# Re-estimate start time of those events with an estimated duration over the threshold
if not math.isnan(self.config.outlier_threshold):
self._re_estimate_durations_over_threshold()
# Fix start time of those events for which it could not be estimated (with [config.non_estimated_time])
if self.config.re_estimation_method == ReEstimationMethod.SET_INSTANT:
self._set_instant_non_estimated_start_times()
else:
self._re_estimate_non_estimated_start_times()
# If replacement to true, set estimated as start times
if self.config.replace_recorded_start_times:
self.event_log[self.log_ids.start_time] = self.event_log[self.log_ids.estimated_start_time]
self.event_log.drop([self.log_ids.estimated_start_time], axis=1, inplace=True)
# Return estimated event log
return self.event_log
def _re_estimate_durations_over_threshold(self):
# Take all the estimated durations of each activity and store the specified statistic of each distribution
statistic_durations = \
self.event_log[self.event_log[self.log_ids.estimated_start_time] != self.config.non_estimated_time] \
.groupby([self.log_ids.activity]) \
.apply(lambda row: row[self.log_ids.end_time] - row[self.log_ids.estimated_start_time]) \
.groupby(level=0) \
.apply(lambda row: self._apply_statistic(row))
# For each event, if the duration is over the threshold, set the defined statistic
for index, event in self.event_log.iterrows():
duration_limit = self.config.outlier_threshold * statistic_durations[event[self.log_ids.activity]]
if (event[self.log_ids.estimated_start_time] != self.config.non_estimated_time and
(event[self.log_ids.end_time] - event[self.log_ids.estimated_start_time]) > duration_limit):
self.event_log.loc[index, self.log_ids.estimated_start_time] = event[self.log_ids.end_time] - duration_limit
def _set_instant_non_estimated_start_times(self):
# Identify events with non_estimated as start time
# and set their duration to instant
self.event_log.loc[
self.event_log[self.log_ids.estimated_start_time] == self.config.non_estimated_time,
self.log_ids.estimated_start_time
] = self.event_log[self.log_ids.end_time]
def _re_estimate_non_estimated_start_times(self):
# Store the durations of the estimated ones
activity_durations = (self.event_log[self.event_log[self.log_ids.estimated_start_time] != self.config.non_estimated_time]
.groupby([self.log_ids.activity])
.apply(lambda row: row[self.log_ids.end_time] - row[self.log_ids.estimated_start_time]))
# Identify events with non_estimated as start time
non_estimated_events = self.event_log[self.event_log[self.log_ids.estimated_start_time] == self.config.non_estimated_time]
for index, non_estimated_event in non_estimated_events.iterrows():
activity = non_estimated_event[self.log_ids.activity]
# Re-estimate
duration = self._get_activity_duration(activity_durations, activity)
self.event_log.loc[index, self.log_ids.estimated_start_time] = non_estimated_event[self.log_ids.end_time] - duration
def _get_activity_duration(self, activity_durations, activity):
if activity in activity_durations:
# There have been measured other durations for the activity, take specified statistic
if self.config.re_estimation_method == ReEstimationMethod.MODE:
return mode(activity_durations[activity])
elif self.config.re_estimation_method == ReEstimationMethod.MEDIAN:
return np.median(activity_durations[activity])
elif self.config.re_estimation_method == ReEstimationMethod.MEAN:
return np.mean(activity_durations[activity])
else:
raise ValueError("Unselected re-estimation method for events with non-estimated start time!")
else:
# There are not other measures for the durations of the activity, set instant (duration = 0)
return timedelta(0)
def _apply_statistic(self, durations):
if self.config.outlier_statistic == OutlierStatistic.MODE:
return mode(durations)
elif self.config.outlier_statistic == OutlierStatistic.MEDIAN:
return np.median(durations)
elif self.config.outlier_statistic == OutlierStatistic.MEAN:
return np.mean(durations)
else:
raise ValueError("Unselected outlier statistic for events with estimated duration over the established!")
| 2.21875 | 2 |
__init__.py | dx1983/qgis-server-font-loader | 2 | 12771418 | # -*- coding: utf-8 -*-
def serverClassFactory(serverIface):
from .FontLoader import FontLoaderServer
return FontLoaderServer(serverIface)
| 1.546875 | 2 |
examples/ComputeAreaWithConsoleInput.py | Ellis0817/Introduction-to-Programming-Using-Python | 0 | 12771419 | # Prompt the user to enter a radius
radius = eval(input("Enter a number for radius: "))
# Compute area
area = radius * radius * 3.14159
# Display results
print("The area for the circle of radius", radius, "is", area)
| 4.34375 | 4 |
deliverables/deliverables_3/dashflaskapp/flaskapp/macros_plotly/dashboard.py | NicholasGoh/real_estate_app | 0 | 12771420 | <reponame>NicholasGoh/real_estate_app
import dash
from dash.dependencies import Input, Output
import plotly.express as px
import dash_html_components as html
import dash_core_components as dcc
import dash_bootstrap_components as dbc
import pandas as pd
# self packages
from .data_generator import load_gdp, load_hpi, load_ir, load_flat_demand, load_transactions
from .nav_bar import nav_bar_template
# inits dash app for macros, linked to flask app
def init_macros(server):
'''Create a Plotly Dash dashboard.'''
dashApp = dash.Dash(
server=server,
routes_pathname_prefix='/macros/',
external_stylesheets=[dbc.themes.BOOTSTRAP]
)
dashApp.index_string = nav_bar_template
# Create Layout
dashApp.layout = html.Div([
html.H1("Macro Dash Board", style={'text-align': 'center'}),
html.Div([
html.H2(children='Housing Price Index', style={'text-align': 'left'}),
html.Div(children=''' The House Price Index (HPI) is a broad measure of the movement of single-family house prices'''),
html.Br(),
dcc.Loading(
children = [dcc.Graph(id='Housing Price Index', figure={}),
dcc.Slider(id='my-slider', min=0, max=184,
marks={
0: {'label': '1975Q1'},
61: {'label': '1990Q1'},
122: {'label': '2005Q1'},
181: {'label': '2020Q1'}
})],
type = 'default'
),
]),
html.Div([
html.H2(children='Gross Domestic Product',
style={'text-align': 'left'}),
html.Div(children=''' Gross domestic product (GDP) is the total monetary or market value of all the finished goods and services produced within a country's borders in a specific time period.'''),
html.Br(),
dcc.Loading(
children = [dcc.Checklist(id="select_industry",
options=[
{"label": "Total GDP", "value": "Total_GDP"},
{"label": "Goods Producing Industries",
"value": "Goods_Producing_Industries"},
{"label": "Construction", "value": "Construction"},
{"label": "Utilities", "value": "Utilities"},
{"label": "Wholsale Retail Trade",
"value": "Wholesale_Retail_Trade"},
{"label": "Transportation Storage",
"value": "Transportation_Storage"},
{"label": "Accomodation Food Services",
"value": "Accommodation_Food_Services"},
{"label": "Information Communication",
"value": "Information_Communications"},
{"label": "Finance Insurance",
"value": "Finance_Insurance"},
{"label": "Real Estate", "value": "Real_Estate"}],
value=["Quarter", "Total_GDP"],
),
dcc.Graph(id='GDP', figure={}),
dcc.Slider(id='gdp_slider', min=0, max=184,
marks={
0: {'label': '1975Q1'},
61: {'label': '1990Q1'},
122: {'label': '2005Q1'},
181: {'label': '2020Q1'}
})],
type = 'default'
),
]),
html.Div([
html.H2(children='Interest Rates', style={'text-align': 'left'}),
html.Div(children='''An interest rate is the amount of interest due per period, as a proportion of the amount lent, deposited or borrowed. '''),
html.Br(),
dcc.Loading(
children = [dcc.Checklist(id="select_ir",
options=[
{"label": "Prime Lending Rate",
"value": "Prime Lending Rate"},
{"label": "3-month Fixed Deposit Rate",
"value": "3-month Fixed Deposit Rate"},
{"label": "6-month Fixed Deposit Rate",
"value": "6-month Fixed Deposit Rate"},
{"label": "12-month Fixed Deposit Rate",
"value": "12-month Fixed Deposit Rate"},
{"label": "Savings Deposit Rate", "value": "Savings Deposit Rate"}, ],
value=["Date", "Prime Lending Rate"],
),
dcc.Graph(id='IR', figure={}),
dcc.Slider(id='ir_slider', min=0, max=457,
marks={
0: {'label': '1983-Jan'},
147: {'label': '1995-Jan'},
304: {'label': '2008-Jan'},
457: {'label': '2021-Jan'}
})],
type = 'default'
),
]),
html.Div([
html.H2(children='Flat Demand', style={'text-align': 'left'}),
html.Div(
children=''' Demand for rental and purchase of flat in a period of time '''),
html.Br(),
dcc.Loading(
children = dcc.Graph(id='demand', figure={}),
type = 'default'
)
]),
html.Div([
html.H2(children='Transaction', style={'text-align': 'left'}),
html.Div(children=''' URA Transacted housing price trend '''),
html.Br(),
html.Div(children=''' Property Type: '''),
dcc.Loading(
children = [dcc.RadioItems(id="propertyType",
options=[
{'label': 'Condominium', 'value': 'Condominium'},
{'label': 'Detached', 'value': 'Detached'},
{'label': 'Apartment', 'value': 'Apartment'},
{'label': 'Terrace', 'value': 'Terrace'},
{'label': 'Semi-detached', 'value': 'Semi-detached'},
{'label': 'Strata Terrace', 'value': 'Strata Terrace'},
{'label': 'Strata Detached', 'value': 'Strata Detached'},
{'label': 'Strata Semi-detached',
'value': 'Strata Semi-detached'},
], value='Condominium'),
html.Br(),
html.Div(children=''' Tenure: '''),
dcc.RadioItems(id="tenure",
options=[
{'label': '99-years', 'value': '99-years'},
{'label': 'Freehold', 'value': 'Freehold'},
{'label': '999-years', 'value': '999-years'},
{'label': '9999-years', 'value': '9999-years'},
], value='99-years'),
html.Br(),
html.Div(children=''' District: '''),
dcc.Checklist(id="district",
options=[
{"label": "1", "value": 1},
{"label": "2", "value": 2},
{"label": "3", "value": 3},
{"label": "4", "value": 4},
{"label": "5", "value": 5},
{"label": "6", "value": 6},
{"label": "7", "value": 7},
], value=[1]),
dcc.Graph(id='transac', figure={})],
type = 'default'
),
])
], className = 'container')
@dashApp.callback(
Output(component_id='Housing Price Index', component_property='figure'),
Input(component_id='my-slider', component_property='value'),
)
def hpi_graph(value):
dff = load_hpi()
dff_filtered = dff[:value]
fig = px.line(dff_filtered, x='quarter', y=load_hpi().columns)
return fig
@dashApp.callback(
Output(component_id='GDP', component_property='figure'),
[Input(component_id='select_industry', component_property='value'),
Input('gdp_slider', 'value')]
)
def gpd(value, gdp_slider):
dff = load_gdp()
filtered_industry = dff[value]
filtered = filtered_industry[:gdp_slider]
fig = px.line(filtered, x='Quarter', y=filtered.columns)
return fig
@dashApp.callback(
Output(component_id='IR', component_property='figure'),
[Input(component_id='select_ir', component_property='value'),
Input('ir_slider', 'value')]
)
def interest_rate(value, ir_slider):
dff = load_ir()
filtered_ir = dff[value]
filtered = filtered_ir[:ir_slider]
fig = px.line(filtered, x='Date', y=filtered.columns)
return fig
@dashApp.callback(
Output(component_id='demand', component_property='figure'),
Input(component_id='demand', component_property='figure'),
)
def demand(figure):
fig = px.line(load_flat_demand(), x='period', y=load_flat_demand().columns)
return fig
@dashApp.callback(
Output(component_id='transac', component_property='figure'),
[Input(component_id='propertyType', component_property='value'),
Input(component_id='tenure', component_property='value'),
Input(component_id='district', component_property='value'),
Input(component_id='transac', component_property='figure')]
)
def transac(propType, tenure, dist, figure):
dff = load_transactions()
fpt = dff[dff['propertyType'] == propType]
ft = fpt[fpt["tenure"] == tenure]
fd = ft[ft["district"].isin(dist)]
new_df = fd.groupby(by=["contractDate"], as_index=False).mean()
new_df['contractDate'] = pd.to_datetime(new_df['contractDate'])
new_df_sort = new_df.sort_values(by='contractDate')
fig = px.line(new_df_sort, x="contractDate", y='per_meter_square')
fig.update_layout(
xaxis_title="Date",
yaxis_title="Mean Property Price Per Meter Squared")
return fig
return dashApp.server
| 2.875 | 3 |
tests/test_utils.py | localuser2/pre-commit-hooks | 164 | 12771421 | <reponame>localuser2/pre-commit-hooks<filename>tests/test_utils.py
#!/usr/bin/env python3
import difflib
import os
import re
import shutil
import subprocess as sp
import sys
import pytest
test_file_strs = {
"ok.c": '// Copyright 2021 <NAME>\n#include <stdio.h>\n\nint main() {\n printf("Hello World!\\n");\n return 0;\n}\n',
"ok.cpp": '// Copyright 2021 <NAME>\n#include <iostream>\n\nint main() {\n std::cout << "Hello World!\\n";\n return 0;\n}\n',
"err.c": "#include <stdio.h>\nint main(){int i;return;}",
"err.cpp": "#include <string>\nint main(){int i;return;}",
}
def assert_equal(expected: bytes, actual: bytes):
"""Stand in for Python's assert which is annoying to work with."""
actual = actual.replace(b"\r", b"") # ignore windows file ending differences
if expected != actual:
print(f"\n\nExpected:`{expected}`")
print(f"\n\nActual__:`{actual}`")
if isinstance(expected, bytes) and isinstance(actual, bytes):
expected_str = expected.decode()
actual_str = actual.decode()
print("String comparison:", expected_str == actual_str)
diff_lines_gen = difflib.context_diff(expected_str, actual_str, "Expected", "Actual")
diff_lines = "".join(list(diff_lines_gen))
print(f"\n\nDifference:\n{diff_lines}")
else:
print(f"Expected is type {type(expected)}\nActual is type {type(actual)}")
pytest.fail("Test failed!")
def get_versions():
"""Returns a dict of commands and their versions."""
commands = ["clang-format", "clang-tidy", "uncrustify", "cppcheck", "cpplint"]
if os.name != "nt": # oclint doesn't work on windows, iwyu needs to be compiled on windows
commands += ["oclint", "include-what-you-use"]
# Regex for all versions. Unit tests: https://regex101.com/r/rzJE0I/1
regex = r"[- ]((?:\d+\.)+\d+[_+\-a-z\d]*)(?![\s\S]*OCLint version)"
versions = {}
for cmd in commands:
if not shutil.which(cmd):
sys.exit("Command " + cmd + " not found.")
cmds = [cmd, "--version"]
child = sp.run(cmds, stdout=sp.PIPE, stderr=sp.PIPE)
if len(child.stderr) > 0:
print(f"Received error when running {cmds}:\n{child.stderr}")
sys.exit(1)
output = child.stdout.decode("utf-8")
try:
versions[cmd] = re.search(regex, output).group(1)
except AttributeError:
print(f"Received `{output}`. Version regexes have broken.")
print("Please file a bug (github.com/pocc/pre-commit-hooks).")
sys.exit(1)
return versions
# Required for testing with clang-tidy and oclint
def set_compilation_db(filenames):
"""Create a compilation database for clang static analyzers."""
cdb = "["
clang_location = shutil.which("clang")
file_dir = os.path.dirname(os.path.abspath(filenames[0]))
for f in filenames:
file_base = os.path.basename(f)
clang_suffix = ""
if f.endswith("cpp"):
clang_suffix = "++"
cdb += """\n{{
"directory": "{0}",
"command": "{1}{2} {3} -o {3}.o",
"file": "{3}"
}},""".format(
file_dir, clang_location, clang_suffix, os.path.join(file_dir, file_base)
)
cdb = cdb[:-1] + "]" # Subtract extra comma and end json
# Required for clang-tidy
if os.name == "nt":
cdb = cdb.replace("\\", "\\\\").replace("Program Files", 'Program\\" \\"Files')
with open(os.path.join(file_dir, "compile_commands.json"), "w") as f:
f.write(cdb)
def set_git_identity():
"""Set a git identity if one does not exist."""
sp_child = sp.run(["git", "config", "--list"], stdout=sp.PIPE)
if "user.name" not in sp_child.stdout.decode() or "user.email" not in sp_child.stdout.decode():
sp.run(["git", "config", "--global", "user.name", "Test Runner"])
sp.run(["git", "config", "--global", "user.email", "<EMAIL>"])
sp.run(["git", "config", "--global", "init.defaultbranch", "master"])
def run_in(commands, tmpdir):
sp_child = sp.run(commands, cwd=tmpdir, stdout=sp.PIPE, stderr=sp.PIPE)
if sp_child.returncode != 0:
err_msg = (
f"commands {commands} failed with\nstdout: {sp_child.stdout.decode()}stderr: {sp_child.stderr.decode()}\n"
)
pytest.fail(err_msg)
def integration_test(cmd_name, files, args, test_dir):
for test_file in files:
test_file_base = os.path.split(test_file)[-1]
if test_file_base in test_file_strs:
with open(test_file, "w") as fd:
fd.write(test_file_strs[test_file_base])
# Add only the files we are testing
run_in(["git", "reset"], test_dir)
run_in(["git", "add"] + files, test_dir)
args = list(args) # redeclare so there's no memory weirdness
pre_commit_config_path = os.path.join(test_dir, ".pre-commit-config.yaml")
pre_commit_config = f"""\
repos:
- repo: https://github.com/pocc/pre-commit-hooks
rev: v1.3.4
hooks:
- id: {cmd_name}
args: {args}
"""
with open(pre_commit_config_path, "w") as f:
f.write(pre_commit_config)
# Pre-commit run will only work on staged files, which is what we want to test
# Using git commit can cause hangs if pre-commit passes
sp_child = sp.run(["pre-commit", "run"], cwd=test_dir, stdout=sp.PIPE, stderr=sp.PIPE)
output_actual = sp_child.stderr + sp_child.stdout
# Get rid of pre-commit first run info lines
output_actual = re.sub(rb"\[(?:INFO|WARNING)\].*\n", b"", output_actual)
# Output is unpredictable and platform/version dependent
if any([f.endswith("err.cpp") for f in files]) and "-std=c++20" in args:
output_actual = re.sub(rb"[\d,]+ warnings and ", b"", output_actual)
return output_actual, sp_child.returncode
| 2.5 | 2 |
views/__init__.py | darshanshewale/facerecognition | 66 | 12771422 | from flask import render_template, request, redirect, url_for, session
from app import app
from model import *
@app.route('/', methods=["GET"])
def home():
if "username" in session:
return render_template('index.html')
else:
return render_template('login.html')
# Register new user
@app.route('/register', methods=["GET", "POST"])
def register():
if request.method == "GET":
return render_template("register.html")
elif request.method == "POST":
registerUser()
return redirect(url_for("login"))
#Check if email already exists in the registratiion page
@app.route('/checkusername', methods=["POST"])
def check():
return checkusername()
# Everything Login (Routes to renderpage, check if username exist and also verifypassword through Jquery AJAX request)
@app.route('/login', methods=["GET"])
def login():
if request.method == "GET":
if "username" not in session:
return render_template("login.html")
else:
return redirect(url_for("home"))
@app.route('/checkloginusername', methods=["POST"])
def checkUserlogin():
return checkloginusername()
@app.route('/checkloginpassword', methods=["POST"])
def checkUserpassword():
return checkloginpassword()
#The admin logout
@app.route('/logout', methods=["GET"]) # URL for logout
def logout(): # logout function
session.pop('username', None) # remove user session
return redirect(url_for("home")) # redirect to home page with message
#Forgot Password
@app.route('/forgot-password', methods=["GET"])
def forgotpassword():
return render_template('forgot-password.html')
#404 Page
@app.route('/404', methods=["GET"])
def errorpage():
return render_template("404.html")
#Blank Page
@app.route('/blank', methods=["GET"])
def blank():
return render_template('blank.html')
#Buttons Page
@app.route('/buttons', methods=["GET"])
def buttons():
return render_template("buttons.html")
#Cards Page
@app.route('/cards', methods=["GET"])
def cards():
return render_template('cards.html')
#Charts Page
@app.route('/charts', methods=["GET"])
def charts():
return render_template("charts.html")
#Tables Page
@app.route('/tables', methods=["GET"])
def tables():
return render_template("tables.html")
#Utilities-animation
@app.route('/utilities-animation', methods=["GET"])
def utilitiesanimation():
return render_template("utilities-animation.html")
#Utilities-border
@app.route('/utilities-border', methods=["GET"])
def utilitiesborder():
return render_template("utilities-border.html")
#Utilities-color
@app.route('/utilities-color', methods=["GET"])
def utilitiescolor():
return render_template("utilities-color.html")
#utilities-other
@app.route('/utilities-other', methods=["GET"])
def utilitiesother():
return render_template("utilities-other.html")
| 2.890625 | 3 |
sparseConv/multitask/finetune/data_loader.py | ShengyuH/Scene-Recognition-in-3D | 48 | 12771423 | import torch, numpy as np, glob, math, torch.utils.data, scipy.ndimage, multiprocessing as mp
import math
import MinkowskiEngine as ME
from torch.utils.data.sampler import Sampler
import os,sys
MAX_POINTS=3000000
SEM_COLOR_MAP = {
0: (0., 0., 0.),
1: (174., 199., 232.),
2: (152., 223., 138.),
3: (31., 119., 180.),
4: (255., 187., 120.),
5: (188., 189., 34.),
6: (140., 86., 75.),
7: (255., 152., 150.),
8: (214., 39., 40.),
9: (197., 176., 213.),
10: (148., 103., 189.),
11: (196., 156., 148.),
12: (23., 190., 207.),
14: (247., 182., 210.),
15: (66., 188., 102.),
16: (219., 219., 141.),
17: (140., 57., 197.),
18: (202., 185., 52.),
19: (51., 176., 203.),
20: (200., 54., 131.),
21: (92., 193., 61.),
22: (78., 71., 183.),
23: (172., 114., 82.),
24: (255., 127., 14.),
25: (91., 163., 138.),
26: (153., 98., 156.),
27: (140., 153., 101.),
28: (158., 218., 229.),
29: (100., 125., 154.),
30: (178., 127., 135.),
32: (146., 111., 194.),
33: (44., 160., 44.),
34: (112., 128., 144.),
35: (96., 207., 209.),
36: (227., 119., 194.),
37: (213., 92., 176.),
38: (94., 106., 211.),
39: (82., 84., 163.),
40: (100., 85., 144.),
}
# segmantic lable remapper
SEM_CLASS_LABELS = ('wall', 'floor', 'cabinet', 'bed', 'chair', 'sofa', 'table', 'door', 'window',
'bookshelf', 'picture', 'counter', 'desk', 'curtain', 'refrigerator',
'shower curtain', 'toilet', 'sink', 'bathtub', 'otherfurniture')
SEM_VALID_CLASS_IDS = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 14, 16, 24, 28, 33, 34, 36, 39]
SEM_REMAPPER=np.ones(150)*(20)
for i,x in enumerate(SEM_VALID_CLASS_IDS):
SEM_REMAPPER[x]=i
# scene type remapper
TYPE_CLASS_LABELS=('aparment','bathroom','bedroom','conference room','copy','hallway','kitchen','laundry room','living room','office','storage','misc')
TYPE_VALID_CLASS_IDS=[1,2,3,4,8,9,13,14,15,16,18,20,21]
TYPE_REMAPPER=np.ones(22)*(12)
for i,x in enumerate(TYPE_VALID_CLASS_IDS):
TYPE_REMAPPER[x]=i
'''
ScanNet dataset
'''
class ScanNetDataset(torch.utils.data.Dataset):
def __init__(self,path,augment=False,voxel_size=0.02,leave_rate=None,
crop_rate=None,skip_rate=1,ind_remove=None):
torch.utils.data.Dataset.__init__(self)
self.voxel_size=voxel_size
self.augment=augment
self.leave_rate=leave_rate
self.crop_rate=crop_rate
self.skip_rate=skip_rate
self.ind_remove=ind_remove
# load data
self.data=[]
for x in torch.utils.data.DataLoader(
glob.glob(path), collate_fn=lambda x: torch.load(x[0]),num_workers=mp.cpu_count()):
self.data.append(x)
# preprocess data on train/val/test data
for i in range(len(self.data)):
# normalize colors
self.data[i]['feats']/=255
self.data[i]['feats']-=0.5
# scene type label
# self.data[i]['scene_label']=TYPE_REMAPPER[self.data[i]['scene_label']]
self.data[i]['scene_label']-=1
# semantic label
self.data[i]['sem_label']=SEM_REMAPPER[self.data[i]['sem_label'].astype('int')]
def __getitem__(self,n):
crn_sample=self.data[n]
xyz=crn_sample['coords']
feats=crn_sample['feats']
sem_labels=crn_sample['sem_label']
scene_type=crn_sample['scene_label']
scene_name=crn_sample['scene_name']
# filter by semantic index
ind_left=sem_labels!=self.ind_remove
xyz,feats,sem_labels=xyz[ind_left],feats[ind_left],sem_labels[ind_left]
# voxelization
sel = ME.utils.sparse_quantize(xyz / self.voxel_size, return_index=True)
down_xyz, down_feat,down_labels = xyz[sel],feats[sel],sem_labels[sel]
# Get coords, shift to center
coords = np.floor(down_xyz / self.voxel_size)
coords-=coords.min(0)
return (coords,down_feat,down_labels,scene_type,scene_name)
def __len__(self):
return len(self.data)
'''
collate data for each batch
'''
def collate_fn(list_data):
new_list_data = []
num_removed = 0
for data in list_data:
if data is not None:
new_list_data.append(data)
else:
num_removed += 1
list_data = new_list_data
if len(list_data) == 0:
raise ValueError('No data in the batch')
coords, feats, labels,scene_types,scene_names = list(zip(*list_data))
eff_num_batch = len(coords)
assert len(labels) == eff_num_batch
lens = [len(c) for c in coords]
# filter samples
cum_len=np.cumsum(lens)
n_samples=(cum_len<MAX_POINTS).sum()
feats=feats[:n_samples]
labels=labels[:n_samples]
coords=coords[:n_samples]
scene_types=scene_types[:n_samples]
scene_names=scene_names[:n_samples]
# Concatenate all lists
curr_ptr = 0
num_tot_pts = sum(lens[:n_samples])
coords_batch = torch.zeros(num_tot_pts, 4)
feats_batch = torch.from_numpy(np.vstack(feats)).float()
labels_batch=torch.from_numpy(np.hstack(labels)).long()
scene_types_batch=torch.from_numpy(np.hstack(scene_types)).long()
for batch_id in range(n_samples):
coords_batch[curr_ptr:curr_ptr + lens[batch_id], :3] = torch.from_numpy(
coords[batch_id])
coords_batch[curr_ptr:curr_ptr + lens[batch_id], 3] = batch_id
curr_ptr += len(coords[batch_id])
return {
'coords': coords_batch,
'feats': feats_batch,
'sem_labels': labels_batch,
'clf_labels':scene_types_batch,
'scene_names':scene_names
}
class InfSampler(Sampler):
"""Samples elements randomly, without replacement.
Arguments:
data_source (Dataset): dataset to sample from
"""
def __init__(self, data_source, shuffle=True):
self.data_source = data_source
self.shuffle = shuffle
self.reset_permutation()
def reset_permutation(self):
perm = len(self.data_source)
if self.shuffle:
perm = torch.randperm(perm)
self._perm = perm.tolist()
def __iter__(self):
return self
def __next__(self):
if len(self._perm) == 0:
self.reset_permutation()
return self._perm.pop()
def __len__(self):
return len(self.data_source)
def get_iterators(path_train,path_val,config):
# train loader
train_set=ScanNetDataset(path_train,augment=True,voxel_size=config['voxel_size'])
train_args = {
'batch_size': config['train_batch_size'],
'num_workers': config['num_workers'],
'collate_fn': collate_fn,
'sampler':InfSampler(train_set),
'pin_memory': False,
'drop_last': False
}
train_loader = torch.utils.data.DataLoader(train_set, **train_args)
# val loader
val_set=ScanNetDataset(path_val,augment=False,voxel_size=config['voxel_size'])
val_args = {
'batch_size': config['val_batch_size'],
'num_workers': config['num_workers'],
'collate_fn': collate_fn,
'pin_memory': False,
'drop_last': False
}
val_loader = torch.utils.data.DataLoader(val_set,**val_args)
return {
'train': train_loader,
'val': val_loader
}
def get_testdataset(path_test,config):
test_set=ScanNetDataset(path_test,augment=False,voxel_size=config['voxel_size'])
val_args = {
'batch_size': config['test_batch_size'],
'num_workers': config['num_workers'],
'collate_fn': collate_fn,
'pin_memory': False,
'drop_last': False
}
test_loader = torch.utils.data.DataLoader(test_set,**val_args)
return test_loader
def get_valdataset(path_val,config):
# val loader
val_set=ScanNetDataset(path_val,augment=False,voxel_size=config['voxel_size'],
leave_rate=config['leave_rate'],crop_rate=config['crop_rate'],
skip_rate=config['skip_rate'],ind_remove=config['ind_remove'])
val_args = {
'batch_size': config['val_batch_size'],
'num_workers': config['num_workers'],
'collate_fn': collate_fn,
'pin_memory': False,
'drop_last': False
}
val_loader = torch.utils.data.DataLoader(val_set,**val_args)
return val_loader
| 1.757813 | 2 |
google-dinosaur/google_dinosaur.py | cameronweibel/python-game-automation | 5 | 12771424 | <gh_stars>1-10
import mss
import cv2
import numpy as np
import keyboard
import pyautogui
import time
pyautogui.PAUSE = 0.01
def take_screenshot():
with mss.mss() as sct:
filename = sct.shot(output='fullscreen.png')
return filename
def get_frame(region):
with mss.mss() as sct:
screen = np.asarray(sct.grab(region))
screen_grayscale = cv2.cvtColor(screen, cv2.COLOR_BGR2GRAY)
#print(screen_grayscale.shape)
#cv2.imwrite("region.png",screen_grayscale)
return screen_grayscale
def paint_lines(region):
with mss.mss() as sct:
full_screen = {"top": 0, "left": 0,"width": 3840, "height": 2160}
screen = np.asarray(sct.grab(full_screen))
screen_grayscale = cv2.cvtColor(screen, cv2.COLOR_BGR2GRAY)
for i in [region['top'], region['top'] + 70]:
for j in range(region['left'], region['left'] + region['width']):
screen_grayscale[i,j] = 130
cv2.imwrite("region_on_screen.png", screen_grayscale)
def collision_detected(frame):
for x in [0, 70]:
if len(set(frame[x])) > 2:
return True
return False
region = {"top": 1150, "left": 300, "width": 350, "height": 85}
prev_time = time.time()
while True:
if keyboard.is_pressed('q'):
break
start_time = time.time()
if start_time - prev_time >= 2:
if region['width'] < 1600: region['width'] += 13
prev_time = start_time
frame = get_frame(region)
if collision_detected(frame):
pyautogui.keyDown('space')
print("%d FPS" % (1/(time.time() - start_time))) | 2.671875 | 3 |
optimization/shared/fed_dual_avg_schedule.py | diptanshumittal/FCO-ICML21 | 4 | 12771425 | # Copyright 2021, Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""An implementation of the FedAvg algorithm with learning rate schedules.
This is intended to be a somewhat minimal implementation of Federated
Averaging that allows for client and server learning rate scheduling.
The original FedAvg is based on the paper:
Communication-Efficient Learning of Deep Networks from Decentralized Data
H. <NAME>, <NAME>, <NAME>,
<NAME>, <NAME>. AISTATS 2017.
https://arxiv.org/abs/1602.05629
"""
import collections
from typing import Callable, Optional, Union
import attr
import tensorflow as tf
import tensorflow_federated as tff
from tensorflow_federated.python.tensorflow_libs import tensor_utils
# Convenience type aliases.
ModelBuilder = Callable[[], tff.learning.Model]
OptimizerBuilder = Callable[[float], tf.keras.optimizers.Optimizer]
ClientWeightFn = Callable[..., float]
LRScheduleFn = Callable[[Union[int, tf.Tensor]], Union[tf.Tensor, float]]
def _initialize_optimizer_vars(model: tff.learning.Model,
optimizer: tf.keras.optimizers.Optimizer):
"""Ensures variables holding the state of `optimizer` are created."""
delta = tf.nest.map_structure(tf.zeros_like, _get_weights(model).trainable)
model_weights = _get_weights(model)
grads_and_vars = tf.nest.map_structure(lambda x, v: (x, v), delta,
model_weights.trainable)
optimizer.apply_gradients(grads_and_vars, name='server_update')
assert optimizer.variables()
def _get_weights(model: tff.learning.Model) -> tff.learning.ModelWeights:
return tff.learning.ModelWeights.from_model(model)
@attr.s(eq=False, order=False, frozen=True)
class ServerState(object):
"""Structure for state on the server.
Fields:
- `model`: primal model (weights), A dictionary of the model's trainable and non-trainable weights (consistent with FedAvg)
- `dual_model`: dual model (weights), dictionary of the model's trainable and non-trainable weights.
- `optimizer_state`: The server optimizer variables.
- `round_num`: The current training round, as a float.
"""
model = attr.ib()
dual_model_weights = attr.ib() # actually model_weights
optimizer_state = attr.ib()
elapsed_lr = attr.ib()
round_num = attr.ib()
# This is a float to avoid type incompatibility when calculating learning rate
# schedules.
@tf.function
def server_update(primal_model, dual_model, server_optimizer, server_mirror,
server_state, weights_delta, elapsed_lr_delta):
"""Updates `server_state` based on `weights_delta`, increase the round number.
Args:
model: A `tff.learning.Model`.
dual_model: A `tff.learning.Model` for dual weights.
server_optimizer: A `tf.keras.optimizers.Optimizer`.
server_state: A `ServerState`, the state to be updated.
weights_delta: An update to the trainable variables of the model.
Returns:
An updated `ServerState`.
"""
dual_model_weights = _get_weights(dual_model)
# server state hold dual model
tff.utils.assign(dual_model_weights, server_state.dual_model_weights)
# Server optimizer variables must be initialized prior to invoking this
tff.utils.assign(server_optimizer.variables(), server_state.optimizer_state)
weights_delta, has_non_finite_weight = (
tensor_utils.zero_all_if_any_non_finite(weights_delta))
if has_non_finite_weight > 0:
return server_state
# Apply the update to the model. We must multiply weights_delta by -1.0 to
# view it as a gradient that should be applied to the server_optimizer.
grads_and_vars = [
(-1.0 * x, v) for x, v in zip(weights_delta, dual_model_weights.trainable)
]
server_optimizer.apply_gradients(grads_and_vars)
elapsed_lr = server_state.elapsed_lr + elapsed_lr_delta * server_optimizer.lr
primal_model_weights = _get_weights(primal_model)
tff.utils.assign(primal_model_weights, dual_model_weights)
server_mirror(primal_model_weights.trainable, lr=elapsed_lr)
# Create a new state based on the updated model.
return tff.utils.update_state(
server_state,
model=primal_model_weights,
dual_model_weights=dual_model_weights,
optimizer_state=server_optimizer.variables(),
elapsed_lr = elapsed_lr,
round_num=server_state.round_num + 1.0)
@attr.s(eq=False, order=False, frozen=True)
class ClientOutput(object):
"""Structure for outputs returned from clients during federated optimization.
Fields:
- `weights_delta`: A dictionary of updates to the model's trainable
variables.
- `client_weight`: Weight to be used in a weighted mean when
aggregating `weights_delta`.
- `model_output`: A structure matching
`tff.learning.Model.report_local_outputs`, reflecting the results of
training on the input dataset.
- `optimizer_output`: Additional metrics or other outputs defined by the
optimizer.
"""
weights_delta = attr.ib()
client_weight = attr.ib()
elapsed_lr_delta = attr.ib() # necessary for dual averaging
model_output = attr.ib()
optimizer_output = attr.ib()
def create_client_update_fn():
"""Returns a tf.function for the client_update.
This "create" fn is necesessary to prevent
"ValueError: Creating variables on a non-first call to a function decorated
with tf.function" errors due to the client optimizer creating variables. This
is really only needed because we test the client_update function directly.
"""
@tf.function
def client_update(primal_model,
dual_model,
dataset,
dual_initial_weights,
client_optimizer,
client_mirror,
elapsed_lr,
client_weight_fn=None,
client_weight_pow=1):
"""Updates client model.
Args:
model: A `tff.learning.Model`.
dataset: A 'tf.data.Dataset'.
dual_initial_weights: A `tff.learning.Model.weights` from server.
client_optimizer: A `tf.keras.optimizer.Optimizer` object.
client_weight_fn: Optional function that takes the output of
`model.report_local_outputs` and returns a tensor that provides the
weight in the federated average of model deltas. If not provided, the
default is the total number of examples processed on device.
Returns:
A 'ClientOutput`.
"""
primal_model_weights = _get_weights(primal_model)
dual_model_weights = _get_weights(dual_model)
new_elapsed_lr = elapsed_lr
tff.utils.assign(dual_model_weights, dual_initial_weights)
num_examples = tf.constant(0, dtype=tf.int32)
for batch in dataset:
# assign dual to primal
tff.utils.assign(primal_model_weights, dual_model_weights)
# apply (in place) projector to primal model
client_mirror(primal_model_weights.trainable, lr=new_elapsed_lr)
# tape gradients
with tf.GradientTape() as tape:
output = primal_model.forward_pass(batch)
grads = tape.gradient(output.loss, primal_model_weights.trainable)
# zip gradient with DUAL trainable
grads_and_vars = zip(grads, dual_model_weights.trainable)
# apply gradients (to dual)
client_optimizer.apply_gradients(grads_and_vars)
num_examples += tf.shape(output.predictions)[0]
new_elapsed_lr += client_optimizer.lr
aggregated_outputs = primal_model.report_local_outputs()
weights_delta = tf.nest.map_structure(lambda a, b: a - b,
dual_model_weights.trainable,
dual_initial_weights.trainable)
weights_delta, has_non_finite_weight = (
tensor_utils.zero_all_if_any_non_finite(weights_delta))
if has_non_finite_weight > 0:
client_weight = tf.constant(0, dtype=tf.float32)
elif client_weight_fn is None:
client_weight = tf.cast(float(num_examples) ** float(client_weight_pow), tf.float32)
else:
client_weight = client_weight_fn(aggregated_outputs)
return ClientOutput(
weights_delta, client_weight,
new_elapsed_lr - elapsed_lr, aggregated_outputs,
collections.OrderedDict([('num_examples', num_examples)]))
return client_update
def build_server_init_fn(
model_fn: ModelBuilder,
server_optimizer_fn: Callable[[], tf.keras.optimizers.Optimizer]):
"""Builds a `tff.tf_computation` that returns the initial `ServerState`.
The attributes `ServerState.dual_model` and `ServerState.optimizer_state` are
initialized via their constructor functions. The attribute
`ServerState.round_num` is set to 0.0.
Args:
model_fn: A no-arg function that returns a `tff.learning.Model`.
server_optimizer_fn: A no-arg function that returns a
`tf.keras.optimizers.Optimizer`.
Returns:
A `tff.tf_computation` that returns initial `ServerState`.
"""
@tff.tf_computation
def server_init_tf():
server_optimizer = server_optimizer_fn()
primal_model = model_fn()
dual_model = model_fn()
_initialize_optimizer_vars(dual_model, server_optimizer)
return ServerState(
model=_get_weights(primal_model),
dual_model_weights=_get_weights(dual_model),
optimizer_state=server_optimizer.variables(),
elapsed_lr=0.0,
round_num=0.0)
return server_init_tf
def build_fed_dual_avg_process(
model_fn: ModelBuilder,
client_optimizer_fn: OptimizerBuilder,
client_lr: Union[float, LRScheduleFn] = 0.1,
client_mirror=(lambda _: None),
server_optimizer_fn: OptimizerBuilder = tf.keras.optimizers.SGD,
server_lr: Union[float, LRScheduleFn] = 1.0,
server_mirror=(lambda _: None),
client_weight_fn: Optional[ClientWeightFn] = None,
client_weight_pow=1,
) -> tff.templates.IterativeProcess:
"""Builds the TFF computations for optimization using federated averaging.
Args:
model_fn: A no-arg function that returns a `tff.learning.Model`.
client_optimizer_fn: A function that accepts a `learning_rate` keyword
argument and returns a `tf.keras.optimizers.Optimizer` instance.
client_lr: A scalar learning rate or a function that accepts a float
`round_num` argument and returns a learning rate.
server_optimizer_fn: A function that accepts a `learning_rate` argument and
returns a `tf.keras.optimizers.Optimizer` instance.
server_lr: A scalar learning rate or a function that accepts a float
`round_num` argument and returns a learning rate.
client_weight_fn: Optional function that takes the output of
`model.report_local_outputs` and returns a tensor that provides the weight
in the federated average of model deltas. If not provided, the default is
the total number of examples processed on device.
Returns:
A `tff.templates.IterativeProcess`.
"""
client_lr_schedule = client_lr
if not callable(client_lr_schedule):
client_lr_schedule = lambda round_num: client_lr
server_lr_schedule = server_lr
if not callable(server_lr_schedule):
server_lr_schedule = lambda round_num: server_lr
dummy_model = model_fn()
server_init_tf = build_server_init_fn(
model_fn,
# Initialize with the learning rate for round zero.
lambda: server_optimizer_fn(server_lr_schedule(0)))
server_state_type = server_init_tf.type_signature.result
model_weights_type = server_state_type.model
round_num_type = server_state_type.round_num
elapsed_lr_type = server_state_type.elapsed_lr
tf_dataset_type = tff.SequenceType(dummy_model.input_spec)
model_input_type = tff.SequenceType(dummy_model.input_spec)
@tff.tf_computation(model_input_type, model_weights_type, round_num_type, elapsed_lr_type)
def client_update_fn(tf_dataset, initial_model_weights, round_num, elapsed_lr):
client_lr = client_lr_schedule(round_num)
client_optimizer = client_optimizer_fn(client_lr)
client_update = create_client_update_fn()
# client_update consumes two dummy model
return client_update(model_fn(), model_fn(), tf_dataset, initial_model_weights,
client_optimizer, client_mirror, elapsed_lr,
client_weight_fn, client_weight_pow)
@tff.tf_computation(server_state_type, model_weights_type.trainable, elapsed_lr_type)
def server_update_fn(server_state, model_delta, elapsed_lr_delta):
primal_model = model_fn()
dual_model = model_fn()
server_lr = server_lr_schedule(server_state.round_num)
server_optimizer = server_optimizer_fn(server_lr)
# We initialize the server optimizer variables to avoid creating them
# within the scope of the tf.function server_update.
_initialize_optimizer_vars(primal_model, server_optimizer)
_initialize_optimizer_vars(dual_model, server_optimizer)
return server_update(primal_model, dual_model, server_optimizer,
server_mirror, server_state,
model_delta, elapsed_lr_delta)
@tff.federated_computation(
tff.FederatedType(server_state_type, tff.SERVER),
tff.FederatedType(tf_dataset_type, tff.CLIENTS))
def run_one_round(server_state, federated_dataset):
"""Orchestration logic for one round of computation.
Args:
server_state: A `ServerState`.
federated_dataset: A federated `tf.Dataset` with placement `tff.CLIENTS`.
Returns:
A tuple of updated `ServerState` and the result of
`tff.learning.Model.federated_output_computation`.
"""
client_dual_model_weights = tff.federated_broadcast(server_state.dual_model_weights)
client_round_num = tff.federated_broadcast(server_state.round_num)
client_elapsed_lr = tff.federated_broadcast(server_state.elapsed_lr)
client_outputs = tff.federated_map(
client_update_fn,
(federated_dataset, client_dual_model_weights,
client_round_num, client_elapsed_lr))
client_weight = client_outputs.client_weight
model_delta = tff.federated_mean(
client_outputs.weights_delta, weight=client_weight)
elapsed_lr_delta = tff.federated_mean(
client_outputs.elapsed_lr_delta, weight=client_weight)
server_state = tff.federated_map(server_update_fn,
(server_state, model_delta, elapsed_lr_delta))
aggregated_outputs = dummy_model.federated_output_computation(
client_outputs.model_output)
if aggregated_outputs.type_signature.is_struct():
aggregated_outputs = tff.federated_zip(aggregated_outputs)
return server_state, aggregated_outputs
@tff.federated_computation
def initialize_fn():
return tff.federated_value(server_init_tf(), tff.SERVER)
return tff.templates.IterativeProcess(
initialize_fn=initialize_fn, next_fn=run_one_round)
| 2.109375 | 2 |
day04.py | joelgrus/advent2021 | 13 | 12771426 | <reponame>joelgrus/advent2021
from typing import List
RAW = """7,4,9,5,11,17,23,2,0,14,21,24,10,16,13,6,15,25,12,22,18,20,8,19,3,26,1
22 13 17 11 0
8 2 23 4 24
21 9 14 16 7
6 10 3 18 5
1 12 20 15 19
3 15 0 2 22
9 18 13 17 5
19 8 7 25 23
20 11 10 24 4
14 21 16 12 6
14 21 17 24 4
10 16 15 9 19
18 8 23 26 20
22 11 13 6 5
2 0 12 3 7"""
class Board:
def __init__(self, grid: List[List[int]]):
self.grid = grid
self.nr = len(grid)
self.nc = len(grid[0])
self.row_counts = [0 for _ in range(self.nr)]
self.col_counts = [0 for _ in range(self.nc)]
def mark(self, number: int) -> None:
for i in range(self.nr):
for j in range(self.nc):
if self.grid[i][j] == number:
self.row_counts[i] += 1
self.col_counts[j] += 1
self.grid[i][j] = -1 # bad, bad, bad
def is_winner(self) -> bool:
if any(rc == self.nc for rc in self.row_counts):
return True
if any(cc == self.nr for cc in self.col_counts):
return True
else:
return False
def score(self, number: int) -> int:
"""Return the score of the board"""
return number * sum(entry
for row in self.grid
for entry in row
if entry != -1)
@staticmethod
def parse(raw: str) -> 'Board':
grid = [
[int(n) for n in row.split()]
for row in raw.split('\n')
]
return Board(grid)
class Game:
def __init__(self, numbers: List[int], boards: List[Board]) -> None:
self.numbers = numbers
self.boards = boards
def play(self) -> int:
"""Play the game and return the final score of the winning board"""
for number in self.numbers:
for board in self.boards:
board.mark(number)
if board.is_winner():
return board.score(number)
raise ValueError("No winner")
def play_last(self) -> int:
"""
Play the game until only one board is left
Return its score when it wins
"""
for number in self.numbers:
to_remove = []
for board in self.boards:
board.mark(number)
if board.is_winner() and len(self.boards) == 1:
return board.score(number)
elif board.is_winner():
to_remove.append(board)
continue
for board in to_remove:
self.boards.remove(board)
raise ValueError("No winner")
@staticmethod
def parse(raw: str) -> 'Game':
"""Parse the input into a Game object"""
paragraphs = raw.split("\n\n")
numbers = [int(n) for n in paragraphs[0].split(",")]
paragraphs = paragraphs[1:]
boards = [Board.parse(paragraph) for paragraph in paragraphs]
return Game(numbers, boards)
GAME = Game.parse(RAW)
SCORE = GAME.play()
assert SCORE == 4512
GAME = Game.parse(RAW)
SCORE = GAME.play_last()
assert SCORE == 1924
if __name__ == "__main__":
raw = open('data/day04.txt').read()
game = Game.parse(raw)
print(game.play())
game = Game.parse(raw)
print(game.play_last()) | 3.421875 | 3 |
setup.py | arsham/django-cachedpaginator | 5 | 12771427 | <filename>setup.py<gh_stars>1-10
# -*- coding: utf-8 -*-
import os
from setuptools import setup, find_packages
def read_file(filename):
return open(os.path.join(os.path.dirname(__file__), filename)).read()
setup(
name='django-cachedpaginator',
version=__import__('django_cachedpaginator').__version__,
description="Paginator that caches pages automatically.",
long_description=read_file('README.md'),
classifiers=[
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Topic :: Software Development :: Libraries',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Framework :: Django',
'Environment :: Web Environment',
],
keywords=['paginator', 'django', 'cache'],
include_package_data=True,
platforms=['OS Independen'],
author='<NAME>',
author_email='<EMAIL>',
url='http://github.com/arsham/django-cachedpaginator',
license='BSD',
packages=find_packages(),
zip_safe=False,
)
| 1.53125 | 2 |
python/tasks/task11.py | eiseazei/core | 0 | 12771428 | #! /usr/bin/python3.7
from common import test_input_integer, \
handle_age_input, \
num_denum, \
week_day
# test user input and make exception if bad
test_input_integer()
# test age value
handle_age_input()
# test two input values and divide
num_denum()
# print week day
week_day()
| 3.515625 | 4 |
tester.py | abr-98/Chatbot_modified | 10 | 12771429 | from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
import webbrowser
import requests
import time
import datetime
from bs4 import BeautifulSoup
import re
from googlesearch import *
import requests, json
#import pafy
def scrape(phrase):
flag=0
ext="https://www.google.com/search?q="
links=search(phrase, num=5, stop=5, pause=2)
msg=phrase.replace(" ","+")
url=ext+msg
i=0
for link in links:
i+=1
if 'wikipedia' in link:
flag=1
l=link
break
if flag==1:
wiki = requests.get(l)
wiki_c = wiki.content
soup = BeautifulSoup(wiki_c, 'html.parser')
data=soup.find_all('p')
print("Source:wikipedia")
print(data[0].get_text())
print(data[1].get_text())
print(data[2].get_text())
print(data[3].get_text())
else:
print("wikipedia Source not available")
print("Providing search results")
webbrowser.open(url,new=1)
time.sleep(3)
scrape("What is internet") | 3.140625 | 3 |
08_Paquetes/8.1_uso-paquetes.py | alemr214/curso-python-desde-cero | 0 | 12771430 | <filename>08_Paquetes/8.1_uso-paquetes.py<gh_stars>0
# Se llama a un modulo del paquete "paquetito" usando todas sus funcionalidades
from paquetito.funcionesMatematicas import *
def main():
print(sumar(5, 4))
if __name__ == '__main__':
main()
| 2.171875 | 2 |
src/py21cmmc/prior.py | dprelogo/21CMMC | 0 | 12771431 | <gh_stars>0
"""Module containing additional priors to be used in 21CMMC."""
import logging
import numpy as np
from functools import partial
from sklearn.neighbors import KernelDensity
from . import likelihood
logger = logging.getLogger("21cmFAST")
class PriorBase(likelihood.LikelihoodBase):
"""Base prior class."""
def computePrior(self, arg_values):
"""Calculate the log-prior of the instance data given the model.
Parameters
----------
arg_values : list
A list containing all model-dependent quantities required to calculate
the prior. Explicitly, matches the output of :meth:`~reduce_data`.
Returns
-------
lnP : float
Logarithm of the prior.
"""
raise NotImplementedError("The Base prior should never be used directly!")
def computeLikelihood(self, arg_values):
"""Wrapping prior to likelihood computiation.
This is needed for the compatibility with the rest of the code.
"""
return self.computePrior(arg_values)
class PriorFunction(PriorBase):
"""Wrapper class for an arbitrary prior function.
Parameters
----------
arg_names : list
List of argument names to be extracted from the context and from which
`arg_values` list is constructed.
f : callable
log-prior function. It is assumed to recieve a list of `arg_values` as
a parameter, i.e. it is called as `f(arg_values)`.
"""
def __init__(self, arg_names, f=None):
super().__init__()
self.arg_names = arg_names
self.f = f
def computePrior(self, arg_values):
"""Calling the prior function."""
if self.f is None:
raise ValueError("Prior function is not defined.")
else:
return self.f(arg_values)
def reduce_data(self, ctx):
"""Extracting argument values from the context."""
params = ctx.getParams()
arg_values = [v for k, v in params.items() if k in self.arg_names]
return arg_values
class PriorGaussianKDE(PriorFunction):
"""Gaussian Kernel Density Estimation prior function.
Given the list of `arg_names` and a chain data to fit KDE with,
it returns a KDE approximation of the underlying prior.
Parameters
----------
chain : array
Array containing data to be fit by KDE, of shape `(n_samples, n_dim)`.
arg_names : list
List of argument names, in the same order as in `chain`. Number of arguments
should be equal to `n_dim`.
bandwidth : float, optional
Gaussian bandwidth to use, by default calculates optimal bandwidth.
whiten : bool, optional
Either to use whitener with KDE fit or not. Defaults to `True`.
whitening_algorithm : str, optional
Whitening algorithm to use, one of one of `["PCA", "ZCA", "rescale"]`,
defaults to "rescale".
"""
def __init__(
self,
chain,
arg_names,
bandwidth=None,
whiten=True,
whitening_algorithm="rescale",
):
super().__init__(arg_names)
if bandwidth is None:
n_samples, n_dim = chain.shape
bandwidth = 10 ** self.log_scott(n_samples, n_dim)
if whiten is False:
whitening_algorithm = None
self.dw = DataWhitener(algorithm=whitening_algorithm)
self.dw.fit(chain)
self.kde = KernelDensity(bandwidth=bandwidth)
self.kde.fit(self.dw.whiten(chain))
def computePrior(self, arg_values):
"""Computing KDE prior.
Firstly extracting argument values, followed by lnP calculation
from KDE function, with possible whitening step.
"""
arg_values = np.array(arg_values).reshape(1, -1)
arg_values = self.dw.whiten(arg_values)
lnP = np.squeeze(self.kde.score_samples(arg_values))
return lnP
@staticmethod
def log_scott(n_samples, n_dim):
"""Optimal bandwidth, i.e. Scott's parameter."""
return -1 / (n_dim + 4) * np.log10(n_samples)
class DataWhitener:
"""Whitening of the data.
Implements several algorithms, depending on the desired whitening properties.
Parameters
----------
algorithm : str
One of `[None, "PCA", "ZCA", "rescale"]`.
`None`: does nothing.
"PCA": data is transformed into its PCA space and divided by
the standard deviation of each dimension
"ZCA": equivalent to the "PCA", with additional step of rotating
back to original space. In this case, the final data still
outputs 'in the same direction'.
"rescale": calculates mean and standard deviation in each dimension
and rescales it to zero-mean, unit-variance. In the absence
of high correlations between dimensions, this is often sufficient.
"""
def __init__(self, algorithm="rescale"):
self.algorithm = algorithm
def fit(self, X, save_data=False):
"""Fitting the whitener on the data X.
Parameters
----------
X : array
Of shape `(n_samples, n_dim)`.
save_data : bool
If `True`, saves the data and whitened data as `self.data`, `self.whitened_data`.
Returns
-------
X_whiten : array
Whitened array.
"""
if self.algorithm is not None:
self.μ = np.mean(X, axis=0, dtype=np.float128).astype(np.float32)
Σ = np.cov(X.T)
evals, evecs = np.linalg.eigh(Σ)
if self.algorithm == "PCA":
self.W = np.einsum("ij,kj->ik", np.diag(evals ** (-1 / 2)), evecs)
self.WI = np.einsum("ij,jk->ik", evecs, np.diag(evals ** (1 / 2)))
elif self.algorithm == "ZCA":
self.W = np.einsum(
"ij,jk,lk->il", evecs, np.diag(evals ** (-1 / 2)), evecs
)
self.WI = np.einsum(
"ij,jk,lk->il", evecs, np.diag(evals ** (1 / 2)), evecs
)
elif self.algorithm == "rescale":
self.W = np.identity(len(Σ)) * np.diag(Σ) ** (-1 / 2)
self.WI = np.identity(len(Σ)) * np.diag(Σ) ** (1 / 2)
else:
raise ValueError(
"`algorithm` should be either `None`, PCA, ZCA or rescale."
)
if save_data:
self.data = X
self.whitened_data = self.whiten(X)
def whiten(self, X):
"""Whiten the data by making it unit covariance.
Parameters
----------
X : array
Data to whiten, of shape `(n_samples, n_dims)`.
`n_dims` has to be the same as self.data.
Returns
-------
whitened_data : array
Whitened data, of shape `(n_samples, n_dims)`.
"""
if self.algorithm is None:
return X
if len(X.shape) == 1:
X = np.expand_dims(X, axis=0)
squeeze = True
else:
squeeze = False
X_whitened = np.einsum("ij,kj->ki", self.W, X - self.μ)
return np.squeeze(X_whitened) if squeeze else X_whitened
def unwhiten(self, X_whitened):
"""Un-whiten the sample with whitening parameters from the data.
Parameters
----------
X_whitened : array
Sample of the data to un-whiten, of shape `(n_samples, n_dims)`.
`n_dims` has to be the same as `self.data`.
Returns
-------
X : array
Whitened data, of shape `(n_samples, n_dims)`.
"""
if self.algorithm is None:
return X_whitened
if len(X_whitened.shape) == 1:
X_whitened = np.expand_dims(X_whitened, axis=0)
squeeze = True
else:
squeeze = False
X = np.einsum("ij,kj->ki", self.WI, X_whitened) + self.μ
return np.squeeze(X) if squeeze else X
| 2.40625 | 2 |
strats/always_come.py | crschmidt/crapssim | 0 | 12771432 | <filename>strats/always_come.py
#!/usr/bin/python
# Copyright 2022 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from sim import Sim, Player, TABLE_MIN
def pass_come_strat(player, point):
if not player.current_bets.get('pass', 0) and not point:
player.bet("pass", 15)
if not player.current_bets.get('come') and point:
player.bet("come", 15)
def test_pass_come_strat():
s = Sim()
p = Player(0, "Pass Come")
p.set_strategy(pass_come_strat)
s.players.append(p)
s.rolls = [
[3,3],
[3,3],
[5,6],
[3,2],
[3,4],
[3,3],
[3,4],
[2,2],
[5,5],
[3,4],
]
s.runsim()
if __name__ == "__main__":
test_pass_come_strat()
| 2.359375 | 2 |
clubadmin/config/memadmin.py | rpmoseley/clubadmin | 0 | 12771433 | <reponame>rpmoseley/clubadmin
'''
This module provides the configuration information for the database used by the
memadmin sub-package of the ClubAdmin package.
'''
from .options import DefaultOptions
| 1.179688 | 1 |
notebooks/vbm/spm.py | tobiashepp/brainage | 1 | 12771434 | import time
from pathlib import Path
import scipy.io
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import nibabel as nib
from nilearn import datasets
from nilearn.input_data import MultiNiftiMasker
from nilearn.image import get_data
from nilearn.mass_univariate import permuted_ols
from sklearn.feature_selection import VarianceThreshold
n_subjects = 2136
var_threshold = 0.001
smoothness = 12
permutations = 5000
jobs = 15
t0 = time.perf_counter()
print('loading and preprocessing data ...')
dir = Path('/mnt/qdata/raheppt1/data/brainage/nako/interim/vbm/test4')
files = sorted(list(dir.glob('*nii')))
keys = [f.stem[:6] for f in files]
tiv = Path('/mnt/qdata/raheppt1/data/brainage/nako/interim/vbm/test4/report/TIV_test4.txt')
tiv = np.array([float(l.split('\t')[0]) for l in tiv.open('r').readlines()])
info = pd.read_csv('/mnt/qdata/raheppt1/data/brainage/nako/interim/nako_age_labels.csv').astype({'key': str, 'age': np.float64})
info = info.set_index('key')
metadata = pd.merge(info.loc[keys]['age'], pd.DataFrame.from_dict({'key': keys, 'tiv': tiv}), how='inner', on='key')
metadata.set_index('key')
dir = Path('/mnt/qdata/raheppt1/data/brainage/nako/interim/vbm/test4/mri')
proc_files = sorted(list(dir.glob('mwp1*nii')))
proc_keys = [f.stem[4:10] for f in proc_files]
proc_metadata = metadata.set_index('key').loc[proc_keys]
print(len(proc_metadata))
gray_matter_map_filenames = proc_files
gray_matter_map_filenames = sorted([str(f) for f in gray_matter_map_filenames])[:n_subjects]
age = np.array(proc_metadata['age'].tolist())[:n_subjects]
tiv = np.array(proc_metadata['tiv'].tolist())[:n_subjects]
tiv[np.isnan(tiv)] = 0
tiv = tiv[:, np.newaxis]
nifti_masker = MultiNiftiMasker(standardize=False, smoothing_fwhm=smoothness, memory=None, n_jobs=jobs, verbose=1) #, cache options
gm_maps_masked = nifti_masker.fit_transform(gray_matter_map_filenames)
gm_maps_masked = np.concatenate(gm_maps_masked, axis=0)
n_samples, n_features = gm_maps_masked.shape
print('%d samples, %d features' % (n_subjects, n_features))
print(f'{time.perf_counter() - t0} s')
### Inference with massively univariate model ###
print("Massively univariate model")
# Remove features with too low between-subject variance
variance_threshold = VarianceThreshold(threshold=var_threshold)
# Statistical inference
data = variance_threshold.fit_transform(gm_maps_masked)
#data = gm_maps_masked
neg_log_pvals, t_scores_original_data, _ = permuted_ols(
age, data, # + intercept as a covariate by default
confounding_vars=tiv,
n_perm=permutations, # 1,000 in the interest of time; 10000 would be better
n_jobs=jobs) # CPUs
signed_neg_log_pvals = neg_log_pvals * np.sign(t_scores_original_data)
signed_neg_log_pvals_unmasked = nifti_masker.inverse_transform(
variance_threshold.inverse_transform(signed_neg_log_pvals))
print(f'{time.perf_counter() - t0} s')
nib.save(signed_neg_log_pvals_unmasked, 'test.nii.gz') | 1.90625 | 2 |
delete.py | cemitchell07/sgmatrix_to_swe | 1 | 12771435 | <gh_stars>1-10
##########################################################################################
## Delete Functions for Identity Services Engine and Stealthwatch SMC
##
## This script contains all functions used to delete events from SMC for
## sgmatrix_to_swe.py
##########################################################################################
## System Requirements:
## Stealthwatch Version: 7.0.0 or higher
## Identity Services Engine: 2.3 or higher (pxGrid 2.0 Websocket)
##########################################################################################
## Author: <NAME>
## License: BSD 3-Clause
## Version: 1.0
## Email: <EMAIL>
##########################################################################################
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
## AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
## IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
## DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
## FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
## DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
## SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
## CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
## OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
## OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
##########################################################################################
import json
import requests
def deleteevent(config, event_id):
try:
requests.packages.urllib3.disable_warnings()
except:
pass
url = "https://" + config.get_swe_host() + "/token/v2/authenticate"
# Create the login request data
login_request_data = {"username": config.get_swe_user(), "password": config.get_swe_pass()}
# Initialize the Requests session
api_session = requests.Session()
# Perform the POST request to login
response = api_session.request("POST", url, verify=False, data=login_request_data)
# If the login was successful
if(response.status_code == 200):
# Delete the event from the the SMC
url = 'https://' + config.get_swe_host() + '/smc-configuration/rest/v1/tenants/' + config.get_swe_tenant() + '/policy/customEvents/' + str(event_id)
response = api_session.request("DELETE", url, verify=False)
# If successfully
if (response.status_code == 200):
# If successful return response
deleteresponse = "DELETED"
# If unable to delete
else:
deleteresponse = "FAILED"
uri = 'https://' + config.get_swe_host() + '/token'
response = api_session.delete(uri, timeout=30, verify=False)
# If the login was unsuccessful
else:
print("An error has ocurred, while logging in, with the following code {}".format(response.status_code))
return deleteresponse
| 1.429688 | 1 |
heartrate.py | oldnapalm/vpower | 14 | 12771436 | <filename>heartrate.py
#!/usr/bin/env python
import os, sys
import time
import csv
import platform
from ant.core import driver
from ant.core import node
from ant.plus.heartrate import *
from usb.core import find
from PowerMeterTx import PowerMeterTx
from config import DEBUG, LOG, NETKEY, POWER_SENSOR_ID
from functions import interp
if getattr(sys, 'frozen', False):
# If we're running as a pyinstaller bundle
SCRIPT_DIR = os.path.dirname(sys.executable)
else:
SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
antnode = None
hr_monitor = None
power_meter = None
last = 0
stopped = True
xp = [0]
yp = [0]
zones_file = '%s/zones.csv' % SCRIPT_DIR
if os.path.isfile(zones_file):
with open(zones_file, 'r') as fd:
reader = csv.reader(fd)
next(reader, None)
for line in reader:
xp.append(int(line[0]))
yp.append(int(line[1]))
else:
xp.extend([80, 100, 120, 140, 160, 180])
yp.extend([0, 110, 140, 170, 200, 230])
def stop_ant():
if hr_monitor:
print("Closing heart rate monitor")
hr_monitor.close()
if power_meter:
print("Closing power meter")
power_meter.close()
power_meter.unassign()
if antnode:
print("Stopping ANT node")
antnode.stop()
pywin32 = False
if platform.system() == 'Windows':
def on_exit(sig, func=None):
stop_ant()
try:
import win32api
win32api.SetConsoleCtrlHandler(on_exit, True)
pywin32 = True
except ImportError:
print("Warning: pywin32 is not installed, use Ctrl+C to stop")
def heart_rate_data(computed_heartrate, event_time_ms, rr_interval_ms):
global last
global stopped
t = int(time.time())
if t >= last + 1:
power = int(interp(xp, yp, computed_heartrate))
if power:
power_meter.update(power)
stopped = False
elif not stopped:
power_meter.update(power)
stopped = True
last = t
try:
devs = find(find_all=True, idVendor=0x0fcf)
for dev in devs:
if dev.idProduct in [0x1008, 0x1009]:
stick = driver.USB2Driver(log=LOG, debug=DEBUG, idProduct=dev.idProduct, bus=dev.bus, address=dev.address)
try:
stick.open()
except:
continue
stick.close()
break
else:
print("No ANT devices available")
if getattr(sys, 'frozen', False):
input()
sys.exit()
antnode = node.Node(stick)
print("Starting ANT node")
antnode.start()
network = node.Network(NETKEY, 'N:ANT+')
antnode.setNetworkKey(0, network)
print("Starting power meter with ANT+ ID " + repr(POWER_SENSOR_ID))
try:
# Create the power meter object and open it
power_meter = PowerMeterTx(antnode, POWER_SENSOR_ID)
power_meter.open()
except Exception as e:
print("power_meter error: " + repr(e))
power_meter = None
print("Starting heart rate monitor")
try:
# Create the heart rate monitor object and open it
hr_monitor = HeartRate(antnode, network, {'onHeartRateData': heart_rate_data})
hr_monitor.open()
except Exception as e:
print("hr_monitor error: " + repr(e))
hr_monitor = None
print("Main wait loop")
while True:
try:
time.sleep(1)
except (KeyboardInterrupt, SystemExit):
break
except Exception as e:
print("Exception: " + repr(e))
if getattr(sys, 'frozen', False):
input()
finally:
if not pywin32:
stop_ant()
| 2.546875 | 3 |
parser/parse.py | raymond-s/data-visualizer | 0 | 12771437 | <filename>parser/parse.py
#!/usr/bin/env python2.7
import json
import csv
import sys
import os
import keen
import itertools
import urllib3
urllib3.disable_warnings()
csv.field_size_limit(sys.maxsize)
data_rows = []
#helper function to check if incoming data is a decimal number
def isfloat(value):
try:
float(value)
return True
except:
return False
#helper function to check if incoming data is an integer
def isint(value):
try:
int(value)
return True
except:
return False
def main():
keen.project_id = "<insert-your-credentials-here"
keen.write_key = "<insert-your-credentials-here"
keen.read_key = "<insert-your-credentials-here"
keen.master_key = "<insert-your-credentials-here"
with open ('LoanStats3d.csv') as csvfile:
reader = csv.reader(csvfile, delimiter=',')
x = 0
for row in reader:
if (len(row) == 52):
for x in range(0,len(row)):
if (isint(row[x])):
row[x] = int(row[x])
elif (isfloat(row[x])):
row[x] = float(row[x])
try: #specific data that needs small manipulation to record
row[6] = float(row[6][0:-1])
row[33] = float(row[33][0:-1])
except:
pass
data_rows.append(row)
columns = data_rows.pop(0) #pop first row, which are the name of the columns
loanee = {};
for x in range(0,1000):
loanee = dict(zip(columns, data_rows[x]))
#print(loanee["term"])
keen.add_event("Loanee", loanee)
print("Event #" + str(x) + " loaded")
main() | 3.359375 | 3 |
deformable_conv.py | kennis-coder/multimodal_generative_fusion_framework | 1 | 12771438 | <gh_stars>1-10
# -*- encoding: utf-8 -*-
# @Date : 6/27/19
# @Author : <NAME>
from keras import activations
from keras import constraints
from keras import initializers
from keras import regularizers
from keras.backend import (arange,
reshape,
flatten,
repeat_elements,
concatenate,
conv2d,
spatial_2d_padding,
transpose, expand_dims)
from keras.engine.base_layer import Layer, InputSpec
from keras.utils import conv_utils
from tensorflow import meshgrid, floor, ceil, gather_nd, clip_by_value
class DeformConv2d(Layer):
def __init__(self,
kernel_size=3,
strides=1,
padding='valid',
dilation_rate=1,
activation=None,
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
kernel_regularizer=None,
bias_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs):
"""
:param kernel_size: 奇数整数或奇数整数数组,数组的长度为2
:param strides:
:param padding:
:param dilation_rate:
:param activation:
:param kernel_initializer:
:param bias_initializer:
:param kernel_regularizer:
:param bias_regularizer:
:param kernel_constraint:
:param bias_constraint:
:param kwargs:
"""
super(DeformConv2d, self).__init__(**kwargs)
# H_kernel = kernel_size[0]
# W_kernel = kernel_size[1]
self.kernel_size = conv_utils.normalize_tuple(kernel_size, 2,
'kernel_size')
assert len(self.kernel_size) is 2, u'kernel_size 必须为2'
assert self.kernel_size[0] & 1 is 1 and self.kernel_size[1] & 1 is 1, u"卷积和的高和宽必须为奇数"
# N = H_kernel * W_kernel
# 卷积输出通道为2 * N,即分别捕捉x轴与y轴的坐标偏移
self.filters = 2 * self.kernel_size[0] * self.kernel_size[1]
self.strides = conv_utils.normalize_tuple(strides, 2, 'strides')
self.padding = conv_utils.normalize_padding(padding)
self.dilation_rate = conv_utils.normalize_tuple(dilation_rate, 2,
'dilation_rate')
self.activation = activations.get(activation)
self.kernel_initializer = initializers.get(kernel_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.bias_constraint = constraints.get(bias_constraint)
self.input_spec = InputSpec(ndim=4)
self.kernel = None
self.bias = None
def build(self, input_shape):
"""
:param input_shape: 输入特征图shape
:return: None
"""
in_channel = input_shape[-1]
out_channel = self.filters
kernel_shape = self.kernel_size + (in_channel, out_channel)
self.kernel = self.add_weight(shape=kernel_shape,
initializer=self.kernel_initializer,
name='kernel',
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint)
self.bias = self.add_weight(shape=(out_channel,),
initializer=self.bias_initializer,
name='bias',
regularizer=self.bias_regularizer,
constraint=self.bias_constraint)
self.input_spec = InputSpec(ndim=4,
axes={-1: in_channel})
self.built = True
@staticmethod
def _calc_p0(out_height, out_width, N):
"""
卷积核输出特征图像素的x坐标值与y坐标值。
:param out_height: 卷积核输出的特征图的高度
:param out_width: 卷积核输出特征图的宽度
:param N: 卷积核输出特征图的channel的1/2, 即 H_kernel * W_kernel
:return: 输出是一个维度为[1, height, width, 2N]的张量,前N个是x轴的坐标值,后N个是y轴的坐标值
"""
x = arange(1, out_height + 1)
y = arange(1, out_width + 1)
p0_x, p0_y = meshgrid(x, y)
flattened_x = flatten(p0_x)
reshaped_x = reshape(flattened_x, (1, out_height, out_width, 1))
repeated_x = repeat_elements(reshaped_x, rep=N, axis=-1)
flattened_y = flatten(p0_y)
reshaped_y = reshape(flattened_y, (1, out_height, out_width, 1))
repeated_y = repeat_elements(reshaped_y, rep=N, axis=-1)
p0 = concatenate((repeated_x, repeated_y), axis=-1)
return p0
def _calc_pn(self, N):
"""
卷积核覆盖的像素坐标,例如3×3的卷积核覆盖的范围是
[[(-1,-1), (0, -1), (1, -1)],
[(0, -1), (0, 0), (0, 1)],
[(1, -1), (1, 0), (1, 1)]]
x轴与y轴各有height × width个偏移量。
:param N: N为卷积核 height × width, H_kernel * W_kernel
:return: 前N个channel记录x轴的偏移量,后N个channel记录y轴的偏移量
"""
pn_x, pn_y = meshgrid(arange(-(self.kernel_size[0] - 1) // 2, (self.kernel_size[0] - 1) // 2 + 1),
arange(-(self.kernel_size[1] - 1) // 2, (self.kernel_size[1] - 1) // 2 + 1))
pn = concatenate((flatten(pn_x), flatten(pn_y)), axis=0)
pn = reshape(pn, shape=(1, 1, 1, 2 * N))
return pn
def _calc_p(self, offset_field, out_height, out_width, N):
"""
方格坐标 + 可学习偏移坐标
:param offset_field: 学习到的偏移坐标
:param out_height: 卷积核输出的特征图的高度
:param out_width: 卷积核输出特征图的宽度
:param N: N为卷积核 height × width, H_kernel * W_kernel
:return: 形变卷积核输出的坐标(浮点型)
"""
p0 = DeformConv2d._calc_p0(out_height, out_width, N)
pn = self._calc_pn(N)
# [1, H_out, W_out, 2 * H_kernel * W_kernel] + [1, 1, 1, 2 * H_kernel * W_kernel] +
# [batch_size, H_out, W_out, 2 * H_kernel * W_kernel] =>
# [batch_size, H_out, W_out, 2 * H_kernel * W_kernel]
p = p0 + pn + offset_field
return p
@staticmethod
def _meshgrid2indexpair(x, y):
"""
将meshgrid坐标转化为(x,y)对坐标
:param x: [batch_size, H_out, W_out, H_kernel * W_kernel]
:param y: [batch_size, H_out, W_out, H_kernel * W_kernel]
:return:
"""
batch_size, out_height, out_width, N = x.get_shape()
batch_axis = repeat_elements(arange(batch_size), rep=out_height * out_width * N, axis=0)
batch_axis = reshape(batch_axis, (1, -1))
flattened_x = reshape(x, (1, -1))
flattened_y = reshape(y, (1, -1))
# [b*h*w*N, 3]
index = concatenate((batch_axis, flattened_x, flattened_y), axis=0)
index = transpose(index)
# [b, h*w*N, 3]
index = reshape(index, shape=(batch_size, -1, 3))
return index
@staticmethod
def _calc_xq(inputs, q, N):
"""
从输入特征图中获取输入特征图的像素值
:param inputs: 输入特征图,shape为[batch_size, H_in, W_in, channel]
:param q: 像素坐标,shape为[batch_size, H_out, W_out, 2 * H_kernel * W_kernel]
:return: 像素值
"""
x = q[..., :N]
y = q[..., N:]
batch_size, out_height, out_width, _ = x.get_shape()
# tf.gather_nd(paras, indices)的shape计算公式为
# indices.shape[:-1] + paras.shape[indices.shape[-1]: ]
# 由此可以得出outputs的shape为[batch_size, H_out * W_out * N, channel]
index = DeformConv2d._meshgrid2indexpair(x, y)
x_offset = gather_nd(inputs, indices=index)
# [batch_size, H_out, W_out, N, channel]
x_offset = reshape(x_offset, (batch_size, out_height, out_width, N, -1))
return x_offset
def _bilinear_interpolation(self, p, N, inputs):
"""
利用双线性插值将浮点数坐标转化为有实际意义的像素值
y
lt| rt
+---+
| . |
+---+-->x
lb rb
:param p: 形变卷积核的坐标(浮点型)
:param N: N为卷积核 H_kernel * W_kernel
:param inputs: 为输入特征图
:return:
"""
_, in_height, in_width, channel = inputs.get_shape()
in_height = float(in_height.value)
in_width = float(in_width.value)
# clip_by_value p
# 将形变卷积输出的坐标限定在输入特征图的范围之内
p = concatenate([clip_by_value(p[..., :N], 0, in_height - 1),
clip_by_value(p[..., N:], 0, in_width - 1)],
axis=-1)
# 向下取整,例如:(1.2, 3.5) => (1, 3)
q_lb = floor(p) # left bottom
# left bottom坐标限定在输入特征图的范围之内
q_lb = concatenate([clip_by_value(q_lb[..., :N], 0, in_height - 1),
clip_by_value(q_lb[..., N:], 0, in_width - 1)],
axis=-1)
# 向上取整,例如:(1.2, 3.5) => (2, 4)
q_rt = ceil(p) # right top
# right top坐标限定在输入特征图的范围之内
q_rt = concatenate([clip_by_value(q_rt[..., :N], 0, in_height - 1),
clip_by_value(q_rt[..., N:], 0, in_width - 1)],
axis=-1)
# 计算left top的坐标,即(1, 4)
q_lt = concatenate([q_lb[..., :N], q_rt[..., N:]], axis=-1)
# 计算 right bottom的坐标,即(2, 3)
q_rb = concatenate([q_rt[..., :N], q_lb[..., N:]], axis=-1)
# bilinear kernel (b, h, w, N)
# 利用公式将坐标映射为像素,映射公式为
# (x1-x)×(y1-y)×Pixel(x0,y0)+
# (x-x0)×(y1-y)×Pixel(x1,y0)+
# (x1-x)×(y-y0)×Pixel(x0,y1)+
# (x-x0)×(y-y0)×Pixel(x1,y1)+
# (x1-x)×(y1-y) 即|rt-p|
g_rt = (q_rt[..., :N] - p[..., :N]) * (q_rt[..., N:] - p[..., N:])
# (x-x0)×(y1-y),即|lt-p|
g_lt = (p[..., :N] - q_lt[..., :N]) * (q_lt[..., N:] - p[..., N:])
# (x1-x)×(y-y0),即|rb-p|
g_rb = (q_rb[..., :N] - p[..., :N]) * (p[..., N:] - q_rb[..., N:])
# (x-x0)×(y-y0),即|lb-p|
g_lb = (q_lb[..., :N] - p[..., :N]) * (q_lb[..., N:] - p[..., N:])
# Pixel(x0, y0)
x_q_lb = self._calc_xq(inputs, q_lb, N)
# Pixel(x1, y0)
x_q_rb = self._calc_xq(inputs, q_rb, N)
# Pixel(x0, y1)
x_q_lt = self._calc_xq(inputs, q_lt, N)
# Pixel(x1, y1)
x_q_rt = self._calc_xq(inputs, q_rt, N)
# 为便于将g_rt与x_q_lb哈达玛积,需要将[batch_size, H_out, W_out, H_kernel * W_kernel]
# 转换为[batch_size, H_out, W_out, H_kernel * W_kernel, channel]
g_rt = repeat_elements(expand_dims(g_rt, axis=-1), channel, axis=-1)
g_lt = repeat_elements(expand_dims(g_lt, axis=-1), channel, axis=-1)
g_rb = repeat_elements(expand_dims(g_rb, axis=-1), channel, axis=-1)
g_lb = repeat_elements(expand_dims(g_lb, axis=-1), channel, axis=-1)
x_offset = g_rt * x_q_lb + g_lt * x_q_rb + g_rb * x_q_lt + g_lb * x_q_rt
return x_offset
def compute_output_shape(self, input_shape):
"""
输出shape为[batch_size, H_out*H_kernel, W_out*W_kernel, channel]
:param input_shape: [batch_size, H_in, W_in, channel]
:return:
"""
space = input_shape[1:-1]
new_space = []
for i in range(len(space)):
new_dim = conv_utils.conv_output_length(
space[i],
self.kernel_size[i],
padding=self.padding,
stride=self.strides[i],
dilation=self.dilation_rate[i])
new_space.append(new_dim)
assert len(new_space) is 2
return (input_shape[0], new_space[0] * self.kernel_size[0],
new_space[1] * self.kernel_size[1], input_shape[-1])
def call(self, inputs, **kwargs):
"""
通过参数学习得到输入特征图卷积核的偏移量offset
:param inputs: 输入特征图,shape为[batch_size, H_in, W_in, C_in]
:param kwargs:
:return:
"""
# inputs shape [batch_size, H_in, W_in, C_in]
inputs = spatial_2d_padding(inputs)
# shape [batch_size, H_out, W_out, 2 * H_kernel * W_kernel]
offset_field = conv2d(
inputs,
self.kernel,
strides=self.strides,
padding=self.padding,
data_format=None,
dilation_rate=self.dilation_rate)
offset_shape = offset_field.get_shape()
batch_size = offset_shape[0].value
# N = H_kernel * W_kernel
N = offset_shape[-1].value // 2
out_height = offset_shape[1].value
out_width = offset_shape[2].value
p = self._calc_p(offset_field, out_height, out_width, N)
x_offset = self._bilinear_interpolation(p, N, inputs)
# 输出结果为[batch_size, H_out*H_kernel, W_out*W_kernel, channel]
x_offset = reshape(x_offset, (batch_size, out_height*self.kernel_size[0],
out_width*self.kernel_size[1], -1))
return x_offset
| 2.09375 | 2 |
jsonrpc/tests/test_backend_django/settings.py | DMantis/json-rpc | 409 | 12771439 | SECRET_KEY = 'secret'
ROOT_URLCONF = 'jsonrpc.tests.test_backend_django.urls'
ALLOWED_HOSTS = ['testserver']
DATABASE_ENGINE = 'django.db.backends.sqlite3'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ':memory:',
}
}
JSONRPC_MAP_VIEW_ENABLED = True
| 1.296875 | 1 |
registry/packages.py | AraOshin/civic-sandbox-backend | 1 | 12771440 | <reponame>AraOshin/civic-sandbox-backend<gh_stars>1-10
packages = {
'Greenspace': {
'description': '',
'foundations' : [
'neighborhood_development_18_027',
'neighborhood_development_18_021'
],
'default_foundation' : 'neighborhood_development_18_027',
'slides' : [
'neighborhood_development_18_003',
'neighborhood_development_18_004',
'neighborhood_development_18_005'
],
'default_slide' : [
'neighborhood_development_18_003',
'neighborhood_development_18_004'
],
},
'Food Access': {
'description': '',
'foundations' : [
'Percent Renter Occupied',
'poverty rate'
],
'default_foundation' : 'poverty rate',
'slides' : [
'neighborhood_development_18_007',
'neighborhood_development_18_008',
'neighborhood_development_18_005',
'neighborhood_development_18_012'
],
'default_slide' : 'neighborhood_development_18_008'
},
'Sweeps': {
'description': '',
'foundations' : [
'neighborhood_development_18_020',
'neighborhood_development_18_021',
'neighborhood_development_18_027',
'neighborhood_development_18_034',
'neighborhood_development_18_028',
'neighborhood_development_18_030'
],
'default_foundation' : 'neighborhood_development_18_020',
'slides' : [
'neighborhood_development_18_010',
'neighborhood_development_18_011',
'neighborhood_development_18_009',
'neighborhood_development_18_003',
'neighborhood_development_18_004',
],
'default_slide' : [
'neighborhood_development_18_010',
],
},
'Bikes': {
'description': '',
'foundations' : [
'neighborhood_development_18_021'
],
'default_foundation' : 'neighborhood_development_18_021',
'slides' : [
'neighborhood_development_18_001',
'neighborhood_development_18_002',
'neighborhood_development_18_006',
'neighborhood_development_18_014',
'neighborhood_development_18_013'
],
'default_slide' : [
'neighborhood_development_18_001',
'neighborhood_development_18_002'
],
},
'Disaster Resilience': {
'description': '',
'foundations' : [
'disaster_resilience_18_002',
'disaster_resilience_18_003',
'disaster_resilience_18_004',
'disaster_resilience_18_005',
],
'default_foundation' : 'disaster_resilience_18_002',
'slides' : [
'disaster_resilience_18_001',
],
'default_slide' : [
'disaster_resilience_18_001',
]
},
'Evictions': {
'description': '',
'foundations' : [
'neighborhood_development_18_022',
'neighborhood_development_18_023',
'neighborhood_development_18_024',
'neighborhood_development_18_025',
'neighborhood_development_18_026',
'neighborhood_development_18_032',
],
'default_foundation' : 'neighborhood_development_18_024',
'slides' : [
'neighborhood_development_18_012',
'neighborhood_development_18_007',
'neighborhood_development_18_009',
'housing_affordability_18_001'
],
'default_slide' : ['neighborhood_development_18_009',]
},
'Voters': {
'description': '',
'foundations' : [
'neighborhood_development_18_015',
'neighborhood_development_18_016',
'neighborhood_development_18_017',
'neighborhood_development_18_018',
'neighborhood_development_18_019'
],
'default_foundation' : 'voters18',
'slides' : [
'neighborhood_development_18_007',
'neighborhood_development_18_004',
'neighborhood_development_18_006',
'neighborhood_development_18_002',
],
'default_slide' : []
},
'Transportation': {
'description': '',
'foundations' : [
'transportation_systems_18_005',
],
'default_foundation' : 'transportation_systems_18_005',
'slides' : [
'transportation_systems_18_001',
'transportation_systems_18_003',
'transportation_systems_18_004',
],
'default_slide' : [
'transportation_systems_18_001',
'transportation_systems_18_003',
'transportation_systems_18_004',
],
}
} | 1.210938 | 1 |
instabot/user/user.py | SOUFIANEZAZA/instapro | 84 | 12771441 | import hashlib
import json
import os
import pickle
import uuid
import requests
import logging
from .. import config
from ..api.request import Request
users_folder_path = config.PROJECT_FOLDER_PATH + config.USERS_FOLDER_NAME
class Dotdict(dict):
__getattr__ = dict.get
__setattr__ = dict.__setitem__
__delattr__ = dict.__delitem__
__repr__ = dict.__repr__
def __str__(self):
s = ""
for key, value in self.items():
s += "%s: %s\n" % (str(key), str(value))
return s
def __getstate__(self):
return self.__dict__
def __setstate__(self, d):
self.__dict__.update(d)
class User(object):
def __init__(self, username, password):
self.name = username
self.password = password
self.device_uuid = str(uuid.uuid4())
self.guid = str(uuid.uuid4())
self.device_id = 'android-' + \
hashlib.md5(username.encode('utf-8')).hexdigest()[:16]
self.session = requests.Session()
self.id = None
self.logged_id = False
self.counters = Dotdict({})
self.limits = Dotdict({})
self.delays = Dotdict({})
self.filters = Dotdict({})
if not self.login():
return None
def login(self):
self.session.headers.update({
'Connection': 'close',
'Accept': '*/*',
'Content-type': 'application/x-www-form-urlencoded; charset=UTF-8',
'Cookie2': '$Version=1',
'Accept-Language': 'en-US',
'User-Agent': config.USER_AGENT})
data = {
'phone_id': self.device_uuid,
'username': self.name,
'guid': self.guid,
'device_id': self.device_id,
'password': <PASSWORD>,
'login_attempt_count': '0'}
message = Request.send(
self.session, 'accounts/login/', json.dumps(data))
if message is None:
logging.getLogger('main').warning(self.name + ' login failed')
self.logged_in = False
self.save()
return False
self.id = str(message["logged_in_user"]["pk"])
self.rank_token = "%s_%s" % (self.id, self.guid)
self.logged_in = True
logging.getLogger('main').info(self.name + ' successful authorization')
self.save()
return True
def save(self):
if not os.path.exists(users_folder_path):
if not os.path.exists(config.PROJECT_FOLDER_PATH):
os.makedirs(config.PROJECT_FOLDER_PATH)
os.makedirs(users_folder_path)
output_path = users_folder_path + "%s.user" % self.name
with open(output_path, 'wb') as foutput:
pickle.dump(self, foutput)
def delete(self):
input_path = users_folder_path + "%s.user" % self.name
if os.path.exists(input_path):
os.remove(input_path)
def dump(self):
items = self.__dict__.copy()
# del items["counters"]
return json.dumps(items, indent=2)
| 2.171875 | 2 |
setup.py | frague59/django-yamlfield | 0 | 12771442 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
from distutils.core import Command
class TestCommand(Command):
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
from django.conf import settings
settings.configure(
DATABASES={
'default': {
'NAME': ':memory:',
'ENGINE': 'django.db.backends.sqlite3'
}
},
MIDDLEWARE_CLASSES=(),
INSTALLED_APPS=('yamlfield',)
)
from django.core.management import call_command
import django
django.setup()
call_command('test', 'yamlfield')
setup(
name='django-yamlfield',
version='1.0.3',
description='A Django database field for storing YAML data',
author='The Los Angeles Times Data Desk',
author_email='<EMAIL>',
url="http://django-yamlfield.readthedocs.io/",
packages=find_packages(),
include_package_data=True,
license="MIT",
install_requires=(
'PyYAML>=3.10',
'six>=1.4.1'
),
classifiers=[
'Development Status :: 5 - Production/Stable',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Framework :: Django',
'Framework :: Django :: 1.9',
'Framework :: Django :: 1.10',
'License :: OSI Approved :: MIT License',
],
cmdclass={'test': TestCommand,}
)
| 1.945313 | 2 |
2020/Day 21/advent.py | Urgau/advent_of_code | 0 | 12771443 | #!/usr/bin/env python3
import sys
def main(args):
with open(args[1], "r") as file:
ingredients = []
allergens = {}
repices = []
for line in file:
line = line.strip().replace(",", " ")[:-1]
sIngredients, sAllergens = line.split("(contains")
lIngredients, lAllergens = sIngredients.split(), sAllergens.split()
repices.append(lIngredients)
for ingredient in lIngredients:
if ingredient not in ingredients:
ingredients.append(ingredient)
for allergen in lAllergens:
if allergen not in allergens:
allergens[allergen] = lIngredients
else:
allergens[allergen] = list(set(allergens[allergen]).intersection(lIngredients))
while True:
exclusives = []
for allergen in allergens:
if len(allergens[allergen]) == 1:
exclusives.extend(allergens[allergen])
if len(exclusives) == len(allergens):
break
for allergen in allergens:
if len(allergens[allergen]) == 1:
continue
allergens[allergen] = list(set(allergens[allergen]).difference(exclusives))
if len(allergens[allergen]) == 1:
exclusives.extend(allergens[allergen])
ingredientsWithAllergen = []
for allergen in allergens:
ingredientsWithAllergen.extend(allergens[allergen])
ingredientsWithoutAllergen = list(set(ingredients).difference(ingredientsWithAllergen))
somme = 0
for repice in repices:
for ingredient in ingredientsWithoutAllergen:
somme += repice.count(ingredient)
print("How many times do ingredients without allergens appear? {}".format(somme))
allergensInversed = dict((v[0], k) for k, v in allergens.items())
ingredientsWithAllergenSorted = sorted(ingredientsWithAllergen, key=lambda a: allergensInversed[a])
print("What is your canonical dangerous ingredient list? {}"
.format(','.join(ingredientsWithAllergenSorted)))
if __name__ == "__main__":
main(sys.argv)
| 3.4375 | 3 |
13Dec2016.py | andela-dmigwi/project-euler-solutions | 0 | 12771444 | <reponame>andela-dmigwi/project-euler-solutions
# Solution 2
# def fib(num, l=[]):
# a=b=1
# while(True):
# a+=b
# if a % 2 == 0:
# l.append(a)
# a,b=b,a
# if a >= num:
# break
# return l
# print(sum(fib(4000000)))
# Solution 3
# import math
# def factors(num):
# factors_list = []
# for value in range(2, math.ceil(math.sqrt(num))):
# if not num % value:
# factors_list.append(value)
# factors_list.append(num / value)
# return factors_list
# def prime_factors():
# facts = factors(600851475143)
# primes = []
# for num in facts:
# for val in range(2, math.ceil(math.sqrt(num))):
# if not num % val:
# break
# else:
# primes.append(num)
# return primes
# print(prime_factors())
# Solution 4
palindrome = []
for val in range(2, 1000)[::-1]:
for value in range(2, val)[::-1]:
str_int = str(val * value)
if list(str_int) == list(str_int)[::-1]:
palindrome.append((val * value))
print (max(palindrome))
| 3.203125 | 3 |
tests/test_flaskapp.py | ricardochaves/flask_template | 1 | 12771445 | import pytest
from app import app
@pytest.fixture
def client():
client = app.test_client()
yield client
def test_empty_db(client):
"""Start with a blank database."""
rv = client.get("/")
assert b"Hello World!" in rv.data
| 2.34375 | 2 |
tests/test_projection.py | SnowEx/snowexsql | 5 | 12771446 | import shutil
from os import mkdir, remove
from os.path import dirname, isdir, isfile, join
import pytest
from geoalchemy2.shape import to_shape
from geoalchemy2.types import WKTElement
from numpy.testing import assert_almost_equal
from rasterio.crs import CRS
from snowexsql.projection import *
@pytest.mark.parametrize('info, expected', [
# Test we add UTM info when its not provided
({'latitude': 39.039, 'longitude': -108.003}, {'easting': 759397.644, 'northing': 4325379.675, 'utm_zone': 12}),
# Test we add lat long when its not provided
({'easting': 759397.644, 'northing': 4325379.675, 'utm_zone': 12}, {'latitude': 39.039, 'longitude': -108.003}),
# Test ignoring easting in another projection
({'latitude': 39.008078, 'longitude': -108.184794, 'utm_wgs84_easting': 743766.4795, 'utm_wgs84_northing': 4321444.155},
{'easting': 743766.480, 'northing': 4321444.155}),
# Confirm we force the zone to zone 12
({'latitude':39.097464, 'longitude':-107.862476}, {'northing':4332280.1658, 'easting':771338.607})
])
def test_reproject_point_in_dict(info, expected):
"""
Test adding point projection information
"""
result = reproject_point_in_dict(info)
for k, v in expected.items():
assert k in result
if type(v) == float:
assert_almost_equal(v, result[k], 3)
else:
assert v == result[k]
def test_add_geom():
"""
Test add_geom adds a WKB element to a dictionary containing easting/northing info
"""
info = {'easting': 759397.644, 'northing': 4325379.675, 'utm_zone': 12}
result = add_geom(info, 26912)
# Ensure we added a geom key and value that is WKTE
assert 'geom' in result.keys()
assert type(result['geom']) == WKTElement
# Convert it to pyshapely for testing/ data integrity
p = to_shape(result['geom'])
assert p.x == info['easting']
assert p.y == info['northing']
assert result['geom'].srid == 26912
class TestReprojectRasterByEPSG():
output_f = join(dirname(__file__), 'test.tif')
# def teardown_method(self):
# '''
# Remove our output file
# '''
# if isfile(self.output_f):
# remove(self.output_f)
@classmethod
def teardown_method(self):
remove(self.output_f)
@pytest.mark.parametrize("input_f, epsg, bounds", [
('uavsar_latlon.amp1.real.tif', 26912,
(748446.1945536422, 4325651.650770078, 751909.2857505103, 4328702.971977075)),
])
def test_reproject(self, input_f, epsg, bounds):
"""
test reprojecting a raster from EPSG to another
"""
d = dirname(__file__)
f = join(d, 'data', input_f)
reproject_raster_by_epsg(f, self.output_f, epsg)
with rasterio.open(self.output_f) as dataset:
dbounds = dataset.bounds
dcrs = dataset.crs
# Test our epsg was assigned
assert CRS.from_epsg(epsg) == dataset.crs
# Assert bounds
for i, v in enumerate(bounds):
assert_almost_equal(v, dataset.bounds[i], 3)
| 1.976563 | 2 |
vqa_experiments/s_mac/s_write_unit.py | Bidur-Khanal/REMIND | 67 | 12771447 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# MIT License
#
# Copyright (c) 2018 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# ------------------------------------------------------------------------------
#
# Copyright (C) IBM Corporation 2018
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
s_write_unit.py:
- Implementation of the :py:class:`WriteUnit` for the ``S-MAC`` network (simplified MAC).
- Cf https://arxiv.org/abs/1803.03067 for the reference MAC paper (Hudson and Manning, ICLR 2018).
"""
__author__ = "<NAME> & <NAME>"
from torch.nn import Module
from vqa_experiments.s_mac.utils_mac import linear
class WriteUnit(Module):
"""
Implementation of the :py:class:`WriteUnit` for the ``S-MAC`` model.
.. note::
This implementation is part of a simplified version of the MAC network, where modifications regarding \
the different units have been done to reduce the number of linear layers (and thus number of parameters).
This is part of a submission to the ViGIL workshop for NIPS 2018. Feel free to use this model and refer to it \
with the following BibTex:
::
@article{marois2018transfer,
title={On transfer learning using a MAC model variant},
author={<NAME> <NAME> <NAME> <NAME>},
journal={arXiv preprint arXiv:1811.06529},
year={2018}
}
"""
def __init__(self, dim):
"""
Constructor for the :py:class:`WriteUnit` of the ``S-MAC`` model.
:param dim: global 'd' hidden dimension.
:type dim: int
"""
# call base constructor
super(WriteUnit, self).__init__()
# linear layer to create the new memory state from the current read vector (coming from the read unit)
self.concat_layer = linear(dim, dim, bias=True)
def forward(self, read_vector):
"""
Forward pass of the :py:class:`WriteUnit` for the ``S-MAC`` model.
:param read_vector: current read vector (output of the :py:class:`ReadUnit`), shape `[batch_size x dim]`.
:type read_vector: :py:class:`torch.Tensor`
:return: current memory state, shape [batch_size x mem_dim] (:py:class:`torch.Tensor`).
"""
return self.concat_layer(read_vector)
| 1.28125 | 1 |
bot/__init__.py | SmartManoj/run-py-bot | 0 | 12771448 | '''Run Python Code on Telegram [BOT]'''
| 1.085938 | 1 |
zen.py | plovefish/python-sdprac | 0 | 12771449 | import urllib.request
from bs4 import BeautifulSoup
from random import randint
# fetch the full html
fp = urllib.request.urlopen("https://www.python.org/dev/peps/pep-0020/")
mybytes = fp.read()
mystr = mybytes.decode("utf8")
fp.close()
# fetch the zen of python
soup = BeautifulSoup(mystr, 'html.parser')
txt = soup.pre.string
list_lines = txt.splitlines()
index_one = randint(1, 19)
print(list_lines[index_one])
| 3.140625 | 3 |
mmdet2trt/core/__init__.py | tehkillerbee/mmdetection-to-tensorrt | 496 | 12771450 | <reponame>tehkillerbee/mmdetection-to-tensorrt
from .anchor import * # noqa: F401,F403
from .bbox import * # noqa: F401,F403
from .post_processing import * # noqa: F401,F403
| 1.125 | 1 |