max_stars_repo_path stringlengths 3 269 | max_stars_repo_name stringlengths 4 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.05M | score float64 0.23 5.13 | int_score int64 0 5 |
|---|---|---|---|---|---|---|
video_processing/video_constants.py | UrosOgrizovic/CelebCaption | 2 | 12772051 | <filename>video_processing/video_constants.py
RECTANGLE_COLOR = (255, 200, 0) # (B, G, R)
LABEL_COLOR = (255, 0, 0) | 1.625 | 2 |
elephant/__init__.py | Qkley/elephant | 0 | 12772052 | <filename>elephant/__init__.py
# -*- coding: utf-8 -*-
"""
Elephant is a package for the analysis of neurophysiology data, based on Neo.
:copyright: Copyright 2014-2019 by the Elephant team, see `doc/authors.rst`.
:license: Modified BSD, see LICENSE.txt for details.
"""
from . import (statistics,
spike_train_generation,
spike_train_correlation,
unitary_event_analysis,
cubic,
spectral,
kernels,
spike_train_dissimilarity,
spike_train_surrogates,
signal_processing,
current_source_density,
change_point_detection,
phase_analysis,
sta,
conversion,
neo_tools,
spade,
cell_assembly_detection,
waveform_features)
try:
from . import pandas_bridge
from . import asset
except ImportError:
# requirements-extras are missing
pass
def _get_version():
import os
elephant_dir = os.path.dirname(__file__)
with open(os.path.join(elephant_dir, 'VERSION')) as version_file:
version = version_file.read().strip()
return version
__version__ = _get_version()
| 1.734375 | 2 |
features/environment.py | dandeliondeathray/metamorph | 0 | 12772053 | <reponame>dandeliondeathray/metamorph<filename>features/environment.py
import pymetamorph.metamorph as morph
from confluent_kafka import Producer, Consumer
class Service:
def __init__(self):
self._producer = Producer({'bootstrap.servers': 'localhost:9092'})
self._consumer = Consumer({'bootstrap.servers': 'localhost:9092', 'group.id': 'mygroup',
'default.topic.config': {'auto.offset.reset': 'smallest'}})
self._received = []
def terminate(self):
self._consumer.close()
self._consumer = None
self._producer = None
def subscribe_to(self, *topics):
print("Topics: '{}'".format(list(topics)))
self._consumer.subscribe(list(topics))
def send_message(self, key, value, topic):
print("Sending {}, {} to topic {}".format(key, value, topic))
self._producer.produce(topic, value=value, key=key)
self._producer.flush()
def await_message(self, value, topic):
for i in range(0, 10):
msg = self._consumer.poll(timeout=1.0)
print("Polled:", msg)
if msg is None:
continue
if not msg.error():
print("Topic: '{}', Value: '{}'".format(msg.topic(), msg.value()))
value_as_string = msg.value().decode('UTF-8')
if msg.topic() == topic and value_as_string == value:
return msg
self._received.append(msg)
raise RuntimeError("No message {} in topic {} was received".format(value, topic))
def before_feature(context, feature):
context.metamorph = morph.Metamorph()
context.metamorph.connect()
def before_scenario(context, scenario):
context.metamorph.request_kafka_reset(["test_topic", "events"])
context.metamorph.await_reset_complete()
context.service = Service()
def after_scenario(context, scenario):
context.service.terminate()
| 2.5 | 2 |
llvm/utils/lit/tests/Inputs/shtest-not/fail.py | medismailben/llvm-project | 2,338 | 12772054 | #!/usr/bin/env python
import print_environment
import sys
print_environment.execute()
sys.exit(1)
| 1.367188 | 1 |
test/samplesheet/test_initialize.py | iosonofabio/singlet | 11 | 12772055 | <reponame>iosonofabio/singlet
#!/usr/bin/env python
# vim: fdm=indent
'''
author: <NAME>
date: 15/08/17
content: Test SampleSheet class.
'''
def test_initialize():
from singlet.samplesheet import SampleSheet
ss = SampleSheet.from_sheetname('example_sheet_tsv')
def test_initialize_fromdataset():
from singlet.samplesheet import SampleSheet
ct = SampleSheet.from_datasetname('example_dataset')
| 1.6875 | 2 |
Swati Singh/volume_of_sphere_with_radius_6.py | asumit499/Python-BootCamp | 4 | 12772056 | # 18. Write a language program to get the volume of a sphere with radius 6
radius=6
volume=(4/3)*3.14*(radius**3)
print("Volume of sphere with radius 6= ",volume)
| 4.15625 | 4 |
caffe2/python/layers/sampling_train.py | KevinKecc/caffe2 | 585 | 12772057 | # Copyright (c) 2016-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
## @package sampling_train
# Module caffe2.python.layers.sampling_train
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import schema
from caffe2.python.layers.layers import ModelLayer, get_layer_class
from caffe2.python.layers.sampling_trainable_mixin import SamplingTrainableMixin
class SamplingTrain(ModelLayer):
def __init__(
self,
model,
input_record,
prediction_layer,
output_dims,
subtract_log_odd=True,
name='sampling_train',
**kwargs
):
super(SamplingTrain, self).__init__(
model, name, input_record, **kwargs
)
layer_class = get_layer_class(prediction_layer)
assert issubclass(layer_class, SamplingTrainableMixin)
assert 'indices' in input_record
assert isinstance(input_record.indices, schema.Scalar),\
"input_record.indices is expected to be a schema.Scalar"
assert 'input' in input_record
self.subtract_log_odd = subtract_log_odd
if self.subtract_log_odd:
assert 'sampling_prob' in input_record
self._prediction_layer = layer_class(
model,
input_record.input,
output_dims=output_dims,
**kwargs
)
self._prediction_layer.train_param_blobs = [
model.net.NextBlob(str(blob) + '_sampled')
for blob in self._prediction_layer.param_blobs
]
self.params = self._prediction_layer.params
self.output_schema = self._prediction_layer.output_schema
def add_ops(self, net):
self._prediction_layer.add_ops(net)
def add_train_ops(self, net):
for full_blob, sampled_blob in zip(
self._prediction_layer.param_blobs,
self._prediction_layer.train_param_blobs
):
net.Gather([full_blob, self.input_record.indices()], sampled_blob)
self._prediction_layer.add_train_ops(net)
if not self.subtract_log_odd:
return
log_q = net.Log(self.input_record.sampling_prob(),
net.NextScopedBlob("log_q"))
net.Sub([self.output_schema(), log_q], self.output_schema(),
broadcast=1, use_grad_hack=1)
| 1.945313 | 2 |
SchoolApp/apps/classes/models.py | Ceres445/Team-10-Python-Project | 4 | 12772058 | from datetime import timedelta
from django.contrib.auth.models import User
from django.contrib.sites.models import Site
from django.db import models
from django.urls import reverse
from django.utils import timezone
from django.utils.crypto import get_random_string
from invitations.adapters import get_invitations_adapter
from invitations.base_invitation import AbstractBaseInvitation
from apps.home.models import Classes
class Assignment(models.Model):
key_class = models.ForeignKey(
Classes, on_delete=models.CASCADE, related_name="assignments"
)
title = models.TextField(max_length=400, default="Title")
questions = models.FileField(blank=True, upload_to="questions/")
created_at = models.DateTimeField(auto_now_add=True)
ends_at = models.DateTimeField(blank=True, null=True)
def is_active(self):
if self.ends_at is None:
if self.created_at < (timezone.now() - timedelta(days=120)):
return False
return True
if self.ends_at > timezone.now():
return True
return False
def __str__(self):
return f"{self.key_class.class_name} - {self.title}"
class Upload(models.Model):
assignment = models.ForeignKey(
Assignment, on_delete=models.CASCADE, related_name="upload"
)
author = models.ForeignKey(User, on_delete=models.CASCADE, related_name="uploads")
file = models.FileField(upload_to="uploads", null=False)
created_at = models.DateTimeField(auto_now_add=True)
def __str__(self):
return f"{self.author.username} - {self.assignment}"
class ClassInvitation(AbstractBaseInvitation):
email = models.EmailField(unique=False, verbose_name='email_address',
max_length=420)
created = models.DateTimeField(verbose_name='created',
default=timezone.now)
invited_class = models.ForeignKey(Classes, on_delete=models.CASCADE, related_name='invitees')
@classmethod
def create(cls, email, inviter=None, **kwargs):
key = get_random_string(64).lower()
instance = cls._default_manager.create(
email=email,
invited_class=kwargs.pop('invited_class', None),
key=key,
inviter=inviter,
**kwargs)
return instance
def key_expired(self):
expiration_date = (
self.sent + timedelta(days=3))
return expiration_date <= timezone.now()
def send_invitation(self, request, **kwargs):
invite_url = reverse('AcceptInvite',
args=[self.key])
ctx = kwargs
ctx.update({
'invite_url': request.build_absolute_uri(invite_url),
'register_url': request.build_absolute_uri(reverse("register")),
'site_name': self.invited_class.class_name,
'email': self.email,
'key': self.key,
'inviter': self.inviter,
})
email_template = 'emails/email_invite'
get_invitations_adapter().send_mail(
email_template,
self.email,
ctx)
self.sent = timezone.now()
self.save()
def __str__(self):
return "Invite: {0}".format(self.email)
| 2.125 | 2 |
src/gym_checkers/envs/__init__.py | qorrect/boardgames | 0 | 12772059 | <reponame>qorrect/boardgames
from gym.envs.registration import register
from .checkers import CheckersEnv
print('NOW REGISTERING checkers-v0')
register(
id='checkers-v0',
entry_point='gym_checkers.envs:CheckersEnv',
)
| 1.429688 | 1 |
color_tools.py | SheffieldCao/BoYuanRodeLaneDetection | 0 | 12772060 | <reponame>SheffieldCao/BoYuanRodeLaneDetection<gh_stars>0
import cv2
import numpy as np
def select_yellow(image):
'''
Generate mask of specified color(yellow)
Attributes:
Input:
- image: RGB image
'''
hsv = cv2.cvtColor(image, cv2.COLOR_RGB2HSV)
lower = np.array([20,60,60])
upper = np.array([38,174, 250])
mask = cv2.inRange(hsv, lower, upper)
return mask
def select_white(image):
'''
Generate mask of specified color(yellow)
Attributes:
Input:
- image: RGB image
'''
lower = np.array([170,170,170])
upper = np.array([255,255,255])
mask = cv2.inRange(image, lower, upper)
return mask
| 3.125 | 3 |
tools/testing/plugin.py | kkallday/envoy | 1 | 12772061 | <gh_stars>1-10
#
# This is pytest plugin providing fixtures for tests.
#
import importlib
from contextlib import contextmanager, ExitStack
from typing import ContextManager, Iterator
from unittest.mock import patch
import pytest
@contextmanager
def nested(*contexts) -> Iterator[list]:
with ExitStack() as stack:
yield [stack.enter_context(context) for context in contexts]
def _patches(*args, prefix: str = "") -> ContextManager:
"""Takes a list of module/class paths to patch and an optional prefix
The prefix is used to prefix all of the paths
The patches are applied in a nested set of context managers.
The yields (mocks) are yielded as a tuple.
"""
patched = []
prefix = f"{prefix}." if prefix else ""
for arg in args:
if isinstance(arg, (list, tuple)):
path, kwargs = arg
patched.append(patch(f"{prefix}{path}", **kwargs))
else:
patched.append(patch(f"{prefix}{arg}"))
return nested(*patched)
@pytest.fixture
def patches():
return _patches
| 2.546875 | 3 |
app/api/atleta/tests.py | gahhhenrikk/gerenciador-de-equipes | 1 | 12772062 | <filename>app/api/atleta/tests.py
from django.urls import reverse
from rest_framework.test import APITestCase, APIClient
from rest_framework.views import status
from .models import Atleta
from .serializers import AtletaSerializer
from django.utils import timezone
class BaseViewTest(APITestCase):
client = APIClient()
@staticmethod
def criar_atleta(nome="", email="", nascimento=timezone.now(),peso=0, altura=0, detalhes="", documento="", condicao_fisica="EM CONDICOES", esporte_capacitado="Futebol", treinador=0, equipe=""):
if nome != "" and email != "" and nascimento != timezone.now() and peso != 0 and altura != 0 and detalhes != "" and documento != "" and condicao_fisica != "EM CONDICOES" and esporte_capacitado != "Futebol" and treinador != 0 and equipe != "":
Atleta.objects.create(nome=nome, email=email, nascimento=nascimento,peso=peso, altura=altura, detalhes=detalhes, documento=documento, condicao_fisica=condicao_fisica, esporte_capacitado=esporte_capacitado, treinador=treinador, equipe=equipe)
def configuracao(self):
self.criar_atleta("<NAME>", "<EMAIL>", timezone.now(), 71, 1.65, "SEM DETALHES", "122345678", "SEM CONDICOES", "Futebol", 1, 'A1')
self.criar_atleta("<NAME>", "<EMAIL>", timezone.now(),54, 1.62,"SEM DETALHES", "122342671","EM CONDICOES","Futebol", 1, 'A2')
self.criar_atleta("<NAME>", "<EMAIL>", timezone.now(), 65, 1.74, "SEM DETALHES", "622325678", "EM CONDICOES", "Volei", 2, 'A1')
self.criar_atleta("<NAME>", "<EMAIL>", timezone.now(),67, 1.61,"SEM DETALHES", "222341678","EM CONDICOES","Futebol", 1, 'A2')
self.criar_atleta("<NAME>", "<EMAIL>", timezone.now(), 82, 1.65, "SEM DETALHES", "1322345678", "SEM CONDICOES", "Volei", 2, 'A1')
class GetAllAtletasTest(BaseViewTest):
def teste_recuperar_todos_atletas(self):
"""
Esse teste garante que todos atletas adicionados no método de configuração exista no endpoint GET
"""
response = self.client.get(
reverse("todos-atletas")
)
# fetch the data from db
expected = Atleta.objects.all()
serialized = AtletaSerializer(expected, many=True)
self.assertEqual(response.data, serialized.data)
self.assertEqual(response.status_code, status.HTTP_200_OK) | 2.375 | 2 |
mysql_service.py | DhirenSc/claimit-flask | 0 | 12772063 | # this is the mysql service used to communicate with the backend
import mysql.connector
from datetime import datetime
from flask import jsonify
import json
# connector method for the spothole db
def connect():
return mysql.connector.connect(
host="localhost",
user="",
passwd="",
database=""
)
# post new claim
def post_claim_data(data):
db = connect()
cursor = db.cursor()
sql = "INSERT INTO __claims__ (claim_id, imageURL, severity, userId, status, make, model, vehicle_year, phone_no, created_date) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s)"
val = (data["claimId"], str(data["imageUrls"]), data["severity"], data["userId"], data["status"], data["make"], data["model"], data["year"], data["phoneNo"], datetime.utcnow())
cursor.execute(sql, val)
db.commit()
return str(cursor.rowcount) + " record inserted."
# get claims for a user
def get_user_claims_data(data):
db = connect()
cursor = db.cursor()
sql = "SELECT * FROM __claims__ WHERE userId = %s ORDER BY last_updated DESC"
userId = (data["userId"], )
cursor.execute(sql, userId)
results = cursor.fetchall()
payload = []
content = {}
for result in results:
content = {'claimId': result[0], 'imageUrl': result[1], 'severity': result[2], 'userId': result[3], 'status': result[4], 'created_date': result[5], 'last_updated': result[6], 'make': result[7], 'model': result[8], 'vehicle_year': result[9], 'phone': result[10]}
payload.append(content)
content = {}
return jsonify(payload)
# update profile data
def post_user_profile_data(data):
print(data)
db = connect()
cursor = db.cursor()
sql = "INSERT INTO __users__ (user_id, email_id, name, photo_url) VALUES (%s, %s, %s, %s) ON DUPLICATE KEY UPDATE name=%s, photo_url=%s"
val = (data["userId"], data["emailId"], data["name"], data["photoURL"], data["name"], data["photoURL"])
cursor.execute(sql, val)
db.commit()
return str(cursor.rowcount) + " records affected." | 2.71875 | 3 |
tests/ui/data.py | technolotrix/chat | 0 | 12772064 | <reponame>technolotrix/chat
# I usually do not hard-code urls here,
# but there is not much need for complex configuration
BASEURL = 'https://simple-chat-asapp.herokuapp.com/'
login_button_text = 'Login'
sign_in_message = 'Sign in to Chat'
who_are_you = 'Who are you?'
who_are_you_talking_to = 'Who are you talking to?'
chatting_text = 'Chatting'
chatting_with_text = 'You\'re {0}, and you\'re chatting with {1}'
say_something_text = 'Say something...'
send_button_text = 'Send'
# If these need to remain a secret, I normally create a separate file
# for secret data and exclude it in .gitignore
username_1 = 'nicole 1'
username_2 = 'nicole 2'
username_3 = 'nicole 3'
long_username = 'hello' * 20
empty_username = ''
# Messages
hello_message = 'Hello chat!'
hello_2_message = 'Hello to you too!'
secret_message = 'We have a secret! Ssh!'
long_message = '1234567890' * 100
comma_message = 'Hello, I have a comma!'
| 1.960938 | 2 |
cpmpy/fancy.py | tias/hakank | 279 | 12772065 | <reponame>tias/hakank
"""
Mr Greenguest puzzle (a.k.a fancy dress problem) in cpmpy.
Problem (and LPL) code in
http://diuflx71.unifr.ch/lpl/GetModel?name=/demo/demo2
'''
Mr. Greenfan wants to give a dress party where the male guests
must wear green dresses. The following rules are given:
1 If someone wears a green tie he has to wear a green shirt.
2 A guest may only wear green socks and a green shirt
if he wears a green tie or a green hat.
3 A guest wearing a green shirt or a green hat or who does
not wear green socks must wear a green tie.
4 A guest who is not dressed according to rules 1-3 must
pay a $11 entrance fee.
Mr Greenguest wants to participate but owns only a green shirt
(otherwise he would have to pay one for $9). He could buy
a green tie for $10, a green hat (used) for $2 and green socks
for $12.
What is the cheapest solution for Mr Greenguest to participate?
'''
Model created by <NAME>, <EMAIL>
See also my cpmpy page: http://www.hakank.org/cpmpy/
"""
import sys
import numpy as np
from cpmpy import *
from cpmpy.solvers import *
from cpmpy_hakank import *
def fancy():
# variables
# t: tie
# h: hat
# r: shirt
# s: socks
# n: entrance fee
t = boolvar(name="t")
h = boolvar(name="h")
r = boolvar(name="r")
s = boolvar(name="s")
n = boolvar(name="n")
cost = intvar(0,100,name="cost")
model = Model(minimize=cost)
# constraints
# This is a straight translation from the LPL code
# ( (t->r) \/ n)
model += [ t.implies(r) | n]
# ( ((s \/ r) -> (t \/ h)) \/ n )
model += [ ( (s | r).implies(t|h)) | n]
# ( ((r \/ h \/ not s) -> t) \/ n )
model += [(r | h | ~(s)).implies(t | n)]
model += [cost == 10*t + 2*h + 12*s + 11*n]
ss = CPM_ortools(model)
num_solutions = 0
if ss.solve():
num_solutions += 1
print("cost:",cost.value())
print("t:",t.value(),"h:",h.value(),"r:",r.value(),"s:",s.value(),"n:",n.value())
print("num_solutions:", num_solutions)
fancy()
| 3.296875 | 3 |
back-end/www/tests/vision_tests.py | TUD-KInD/COCTEAU | 0 | 12772066 | from basic_tests import BasicTest
from models.model_operations import scenario_operations
from models.model_operations import topic_operations
from models.model_operations import user_operations
from models.model_operations import vision_operations
from models.model import db
import unittest
class VisionTest(BasicTest):
"""Test case for visions."""
def setUp(self):
db.create_all()
self.topic = topic_operations.create_topic("test", "test")
self.scenario_1 = scenario_operations.create_scenario(
"t1", "d1", "i1", self.topic.id)
self.scenario_2 = scenario_operations.create_scenario(
"t2", "d2", "i2", self.topic.id)
self.mood = vision_operations.create_mood("happy")
self.user_1 = user_operations.create_user("user1")
self.user_2 = user_operations.create_user("user2")
def test_get_all_moods(self):
moods = vision_operations.get_all_moods()
assert len(moods) == 1
assert moods[0].name == "happy"
def test_create_vision(self):
medias = [
{
"url": "http://url_to_image.com",
"description": "description",
"type": "VIDEO"
},
{
"url": "http://url_to_image.com",
"description": "description",
"type": "GIF"
},
{
"url": "http://url_to_image.com",
"description": "description",
"unsplash_image_id":"uid",
"unsplash_creator_name":"name",
"unsplash_creator_url":"url",
"type": "IMAGE"
},
{
"description": "description",
"type": "TEXT"
}
]
mood_id = self.mood.id
user_id = self.user_1.id
scenario_id = self.scenario_1.id
vision = vision_operations.create_vision(
mood_id=mood_id, medias=medias, user_id=user_id, scenario_id=scenario_id)
assert vision in db.session
for m in vision.medias:
assert m in db.session
def test_get_vision_by_id(self):
medias = [
{
"url": "http://url_to_image.com",
"description": "description",
"type": "VIDEO"
},
{
"url": "http://url_to_image.com",
"description": "description",
"type": "GIF"
},
{
"url": "http://url_to_image.com",
"description": "description",
"unsplash_image_id":"uid",
"unsplash_creator_name":"name",
"unsplash_creator_url":"url",
"type": "IMAGE"
},
{
"description": "description",
"type": "TEXT"
}
]
mood_id = self.mood.id
user_id = self.user_1.id
scenario_id = self.scenario_1.id
vision = vision_operations.create_vision(
mood_id=mood_id, medias=medias, user_id=user_id, scenario_id=scenario_id)
retrieved_vision = vision_operations.get_vision_by_id(vision.id)
assert retrieved_vision.mood_id == mood_id and retrieved_vision.user_id == user_id and retrieved_vision.scenario_id == scenario_id
assert vision.medias == retrieved_vision.medias
def test_get_vision_by_user_id(self):
medias = [
{
"url": "http://url_to_image.com",
"description": "description",
"type": "VIDEO"
},
{
"url": "http://url_to_image.com",
"description": "description",
"type": "GIF"
},
{
"url": "http://url_to_image.com",
"description": "description",
"unsplash_image_id":"uid",
"unsplash_creator_name":"name",
"unsplash_creator_url":"url",
"type": "IMAGE"
},
{
"description": "description",
"type": "TEXT"
}
]
mood_id = self.mood.id
user_id = self.user_1.id
scenario_id = self.scenario_1.id
vision_1 = vision_operations.create_vision(
mood_id=mood_id, medias=medias, user_id=user_id, scenario_id=scenario_id)
vision_2 = vision_operations.create_vision(
mood_id=mood_id, medias=medias[:-1], user_id=user_id, scenario_id=scenario_id)
user_id_2 = self.user_2.id
vision_3 = vision_operations.create_vision(
mood_id=mood_id, medias=medias, user_id=user_id_2, scenario_id=scenario_id)
retrieved_visions = vision_operations.get_visions_by_user(user_id, paginate=False, order=None)
assert len(retrieved_visions) == 2
c1 = retrieved_visions[0].medias == vision_1.medias
c2 = retrieved_visions[0].mood_id == vision_1.mood_id
c3 = retrieved_visions[0].scenario_id == vision_1.scenario_id
assert c1 and c2 and c3
c4 = retrieved_visions[1].medias == vision_2.medias
c5 = retrieved_visions[1].mood_id == vision_2.mood_id
c6 = retrieved_visions[1].scenario_id == vision_2.scenario_id
assert c4 and c5 and c6
def test_get_visions_by_scenario(self):
medias = [
{
"url": "http://url_to_image.com",
"description": "description",
"type": "VIDEO"
},
{
"url": "http://url_to_image.com",
"description": "description",
"type": "GIF"
},
{
"url": "http://url_to_image.com",
"description": "description",
"unsplash_image_id":"uid",
"unsplash_creator_name":"name",
"unsplash_creator_url":"url",
"type": "IMAGE"
},
{
"description": "description",
"type": "TEXT"
}
]
mood_id = self.mood.id
user_id = self.user_1.id
scenario_id = self.scenario_1.id
vision_1 = vision_operations.create_vision(
mood_id=mood_id, medias=medias, user_id=user_id, scenario_id=scenario_id)
vision_2 = vision_operations.create_vision(
mood_id=mood_id, medias=medias[:-1], user_id=user_id, scenario_id=scenario_id)
scenario_id_2 = self.scenario_2.id
vision_3 = vision_operations.create_vision(
mood_id=mood_id, medias=medias, user_id=user_id, scenario_id=scenario_id_2)
retrieved_visions = vision_operations.get_visions_by_scenario(user_id, paginate=False, order=None)
assert len(retrieved_visions) == 2
c1 = retrieved_visions[0].medias == vision_1.medias
c2 = retrieved_visions[0].mood_id == vision_1.mood_id
c3 = retrieved_visions[0].scenario_id == vision_1.scenario_id
assert c1 and c2 and c3
c4 = retrieved_visions[1].medias == vision_2.medias
c5 = retrieved_visions[1].mood_id == vision_2.mood_id
c6 = retrieved_visions[1].scenario_id == vision_2.scenario_id
assert c4 and c5 and c6
def test_update_vision(self):
medias = [
{
"url": "http://url_to_image.com",
"description": "description",
"type": "VIDEO"
},
{
"url": "http://url_to_image.com",
"description": "description",
"type": "GIF"
},
{
"url": "http://url_to_image.com",
"description": "description",
"unsplash_image_id":"uid",
"unsplash_creator_name":"name",
"unsplash_creator_url":"url",
"type": "IMAGE"
},
{
"description": "description",
"type": "TEXT"
}
]
mood_id = self.mood.id
user_id = self.user_1.id
scenario_id = self.scenario_1.id
vision = vision_operations.create_vision(
mood_id=mood_id, medias=medias, user_id=user_id, scenario_id=scenario_id)
new_mood = vision_operations.create_mood("sad")
vision_operations.update_vision(vision.id, mood_id=new_mood.id)
retrieved_vision = vision_operations.get_vision_by_id(vision.id)
assert retrieved_vision.mood_id == new_mood.id
old_medias = vision.medias
new_medias = [
{
"url": "http://url_to_image.com_q",
"description": "description_q",
"unsplash_image_id":"uid_q",
"unsplash_creator_name":"name_q",
"unsplash_creator_url":"url_q",
"type": "IMAGE"
}
]
vision_operations.update_vision(vision.id, medias=new_medias)
retrieved_vision = vision_operations.get_vision_by_id(vision.id)
assert len(retrieved_vision.medias) == 1 and retrieved_vision.medias[0].url == new_medias[0]["url"]
assert retrieved_vision.medias[0].description == new_medias[0]["description"]
assert retrieved_vision.medias[0].media_type.name == new_medias[0]["type"]
for m in old_medias:
assert m not in db.session
def test_remove_vision(self):
medias = [
{
"url": "http://url_to_image.com",
"description": "description",
"type": "VIDEO"
},
{
"url": "http://url_to_image.com",
"description": "description",
"type": "GIF"
},
{
"url": "http://url_to_image.com",
"description": "description",
"unsplash_image_id":"uid",
"unsplash_creator_name":"name",
"unsplash_creator_url":"url",
"type": "IMAGE"
},
{
"description": "description",
"type": "TEXT"
}
]
mood_id = self.mood.id
user_id = self.user_1.id
scenario_id = self.scenario_1.id
vision = vision_operations.create_vision(
mood_id=mood_id, medias=medias, user_id=user_id, scenario_id=scenario_id)
assert vision in db.session
vision_operations.remove_vision(vision.id)
assert vision not in db.session
if __name__ == "__main__":
unittest.main()
| 2.421875 | 2 |
src/deep_nlp/embed_cnn/embcnnmodel_gradcam.py | ENSAE-CKW/nlp_understanding | 3 | 12772067 | <reponame>ENSAE-CKW/nlp_understanding
import torch.nn as nn
import torch
import torch.nn.functional as F
from ..grad_cam.model import GradCamBaseModel
class classifier3F(GradCamBaseModel):
# define all the layers used in model
def __init__(self, wv, no_words, embedding_dim, nb_filter, height_filter, output_dim, dropout, padded):
# Constructor
super().__init__()
self.height_filter = height_filter
self.padded = padded
self.no_words= no_words
# embedding layer
self.embedding = nn.Embedding.from_pretrained(wv)
self.conv1_conv= nn.ModuleList()
for height in height_filter:
self.conv1_conv.append(
nn.Conv2d(in_channels=1, out_channels=int(nb_filter), kernel_size=(int(float(height)), embedding_dim))
)
self.before_conv.add_module("conv1_conv_{}".format(height), self.conv1_conv[-1])
self.conv1_relu = nn.ReLU()
self.before_conv.add_module("conv1_relu", self.conv1_relu)
if self.padded:
self.conv1_maxpool= nn.Sequential(
nn.MaxPool1d(no_words, stride=1)
)
self.pool.add_module("conv1_maxpool", self.conv1_maxpool)
else:
self.conv1_maxpool= nn.ModuleList()
for height in height_filter:
self.conv1_maxpool.append(
nn.MaxPool1d(no_words - height + 1, stride=1)
# nn.MaxPool1d(no_words, stride=1)
)
self.pool.add_module("conv1_maxpool_{}".format(height), self.conv1_maxpool[-1])
self.fc = nn.Linear(len(height_filter) * nb_filter, output_dim)
self.sm = nn.LogSoftmax(dim=1)
self.dp = nn.Dropout(p=dropout)
self.after_conv.add_module("dp", self.dp)
self.after_conv.add_module("fc", self.fc)
self.after_conv.add_module("sm", self.sm)
self.params = [wv, no_words, embedding_dim, nb_filter, height_filter, output_dim, dropout, padded]
def get_params(self):
return self.params
def get_activations(self, x):
# Documentation said to !!!
# Each forward step, reset gradient list to only get the one from the actual run (=from this forward step)
self.reset_gradient_list()
x = self.embedding(x)
x = x.unsqueeze(1)
conv_before= []
for i in range(len(self.height_filter)):
conv_before.append(self.before_conv[i])
relu = self.before_conv.conv1_relu
if self.padded:
x_padded = [nn.ZeroPad2d((0, 0, 0, height - 1))(x) for height in self.height_filter]
x_padded = list(zip(x_padded, conv_before))
x = [relu(conv(x)).squeeze(3) for x, conv in x_padded]
else:
x = [relu(conv(x)).squeeze(3)for conv in conv_before]
return x
def forward(self, x):
# Documentation said to !!!
x = self.get_activations(x)
# Documentation said to !!!
# Apply relu after convolution layer
for i in x:
# Documentation said to !!!
if i.requires_grad:
h= self.register_hook(i)
if self.padded:
x_copy= [self.pool(i).squeeze(2) for i in x]
else:
x_copy= []
for i in range(len(self.height_filter)):
x_copy.append(self.pool[i](x[i]).squeeze(2))
x = torch.cat(tuple(x_copy), dim=1)
x= self.after_conv(x)
return x
| 2.359375 | 2 |
detect_blur.py | mansikataria/whatsyourstyle-gan | 0 | 12772068 | # import the necessary packages
from imutils import paths
import argparse
import cv2
import os
def variance_of_laplacian(image):
# compute the Laplacian of the image and then return the focus
# measure -- the variance of the Laplacian
return cv2.Laplacian(image, cv2.CV_64F).var()
# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--images", required=True,
help="path to input directory of images")
ap.add_argument("-t", "--threshold", type=float, default=130.0,
help="focus measures that fall below this value will be considered 'blurry'")
ap.add_argument("-d", "--delete", type=bool, default="false",
help="whether to delete 'blurry' images or not")
args = vars(ap.parse_args())
# loop over the input images
for imagePath in paths.list_images(args["images"]):
# load the image, convert it to grayscale, and compute the
# focus measure of the image using the Variance of Laplacian
# method
image = cv2.imread(imagePath)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
fm = variance_of_laplacian(gray)
text = "Not Blurry"
# if the focus measure is less than the supplied threshold,
# then the image should be considered "Blurry"
if fm < args["threshold"]:
text = "Blurry"
# print out the Focus Measure and
# result -- 'Blurry'/'Not Blurry'
print(imagePath)
print('focus measure', fm)
print('result', text)
# show the image
cv2.putText(image, "{}: {:.2f}".format(text, fm), (10, 30),
cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 0, 255), 3)
# cv2.imshow("Image", image)
cv2.imwrite("blur_test_result/"+imagePath.split("/",1)[1] , image)
key = cv2.waitKey(0)
# based on whether the 'delete' flag is set
# delete the Blurry image
if(args["delete"] == "true") :
if(text == "Blurry"):
try:
os.remove(imagePath)
except: pass | 3.265625 | 3 |
tests/test_api.py | marcidy/meetup-api | 52 | 12772069 | <reponame>marcidy/meetup-api<filename>tests/test_api.py
import pytest
from meetup.api import Client, MeetupObject, MeetupObjectList
@pytest.fixture
def api_client():
return Client()
def test_get_find_groups(api_client):
find_group_info = api_client.GetFindGroups()
assert isinstance(find_group_info, MeetupObjectList)
assert isinstance(find_group_info[0], MeetupObject)
assert isinstance(find_group_info[-1], MeetupObject)
| 2.203125 | 2 |
utils/stitch_bigshot.py | mikedh/truss | 3 | 12772070 | <filename>utils/stitch_bigshot.py<gh_stars>1-10
import sys
import cv2
import numpy as np
# stitches together tiled screenshots into one large screenshot
def create_fn(base, x, y):
return "{}{}x{}.png".format(base, y, x)
def infer_size(fn):
print(fn)
img = cv2.imread(fn)
return img.shape
def insert_image(fn, dest, col, imwidth, row, imheight):
xpos0 = imwidth * col
xpos1 = xpos0 + imwidth
ypos0 = imheight * row
ypos1 = ypos0 + imheight
im = cv2.imread(fn)
dest[ypos0:ypos1, xpos0:xpos1, :] = im
def main():
basefn = sys.argv[1]
cols = int(sys.argv[2])
rows = int(sys.argv[3])
destfn = sys.argv[4]
imsize = infer_size(create_fn(basefn, 0, 0))
imw = imsize[1]
imh = imsize[0]
print("Src image size: {}".format(str(imsize)))
destwidth = cols * imsize[1]
destheight = rows * imsize[0]
destimage = np.zeros((destheight, destwidth, 3), dtype=np.uint8)
for row in range(rows):
for col in range(cols):
insert_image(create_fn(basefn, rows - row - 1, col), destimage,
col, imw, row, imh)
cv2.imwrite(destfn, destimage)
if __name__ == '__main__':
main() | 2.71875 | 3 |
concat_frames.py | dellacortelab/refinement | 2 | 12772071 | import pandas as pd
import argparse
import os
import mdtraj
import numpy as np
parser = argparse.ArgumentParser(description='Script to generate trajectories containing only top scoring frames as scored by RWPlus. These top scoring trajectories can then be averaged with Gromacs to produce an averaged structure.')
parser.add_argument('-p','--path',help='Path to directory containing all refinement trajectories and RWPlus score files.',required=True,dest='path')
parser.add_argument('--percent',help='Percent of top scoring structures to average over. Default: 15,5,40,1',nargs='*',default=[15,5,40,1],type=int)
args = parser.parse_args()
dir_path = args.path
percent = args.percent
all_trajs = dict()
rw_df = pd.DataFrame(columns = ['traj_idx','frame_idx','score'])
for file in os.listdir(dir_path):
if file.endswith('.dcd') and file.startswith('refinement_'):
print(f'Reading {file}')
traj_idx = int(file[file.rfind('_')+1:file.rfind('.')])
curr_traj = mdtraj.load(os.path.join(dir_path,file),top=os.path.join(dir_path,f'minimized_{traj_idx}.pdb'))
curr_traj.remove_solvent(inplace=True)
all_trajs[traj_idx] = curr_traj
elif file.endswith('.txt') and file.startswith('scorelist_'):
print(f'Reading {file}')
traj_idx = int(file[file.rfind('_')+1:file.rfind('.')])
with open(os.path.join(dir_path,file),'r') as f:
scores = f.readlines()
scores = np.array(scores,dtype=float)
num_frames = len(scores)
df = pd.DataFrame(list(zip([traj_idx]*num_frames,np.arange(num_frames),scores)),columns=['traj_idx','frame_idx','score'])
rw_df = rw_df.append(df)
rw_df.sort_values(by=['score'],inplace=True)
num_frames = len(rw_df)
for perc in percent:
num_top = round(perc*.01*num_frames)
print(perc)
print(num_top)
best_frames = rw_df.head(num_top)
for idx in all_trajs.keys():
traj_best = best_frames[best_frames['traj_idx'] == idx]
try:
newtraj = newtraj.join([all_trajs[idx][list(traj_best['frame_idx'])]])
except:
newtraj = all_trajs[idx][list(traj_best['frame_idx'])]
print(len(newtraj))
print(f'Saving top {perc}% of frames to top_{perc}_percent.xtc')
newtraj.save(os.path.join(dir_path,f'top_{perc}_percent.xtc'),force_overwrite=True)
del newtraj
| 2.671875 | 3 |
graphzoo/dataloader/__init__.py | AnoushkaVyas/GraphZoo | 2 | 12772072 | from .dataloader import *
from .download import * | 1.140625 | 1 |
authentication_service/migrations/0007_auto_20180502_0905.py | hedleyroos/core-authentication-service | 1 | 12772073 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.12 on 2018-05-02 09:05
from __future__ import unicode_literals
import authentication_service.models
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('authentication_service', '0006_auto_20180424_0848'),
]
operations = [
migrations.AlterField(
model_name='coreuser',
name='q',
field=authentication_service.models.AutoQueryField(null=True),
),
]
| 1.554688 | 2 |
examples/example_async_reduceable.py | tzoiker/async-reduce | 0 | 12772074 | import asyncio
import time
from async_reduce import async_reduceable
@async_reduceable()
async def fetch(url):
print('- fetch page: ', url)
await asyncio.sleep(1)
return time.time()
async def amain():
coros = [
fetch('/page') for _ in range(10)
]
print('-- Simultaneous run')
done, pending = await asyncio.wait(coros)
print('Results:')
for f in done:
print(
await f
)
def main():
asyncio.run(amain())
if __name__ == '__main__':
main()
| 3.171875 | 3 |
homeassistant/util/ulid.py | eyager1/core | 1 | 12772075 | """Helpers to generate ulids."""
from random import getrandbits
import time
def ulid_hex() -> str:
"""Generate a ULID in lowercase hex that will work for a UUID.
This ulid should not be used for cryptographically secure
operations.
This string can be converted with https://github.com/ahawker/ulid
ulid.from_uuid(uuid.UUID(ulid_hex))
"""
return f"{int(time.time()*1000):012x}{getrandbits(80):020x}"
def ulid() -> str:
"""Generate a ULID.
This ulid should not be used for cryptographically secure
operations.
01AN4Z07BY 79KA1307SR9X4MV3
|----------| |----------------|
Timestamp Randomness
48bits 80bits
This string can be loaded directly with https://github.com/ahawker/ulid
import homeassistant.util.ulid as ulid_util
import ulid
ulid.parse(ulid_util.ulid())
"""
ulid_bytes = int(time.time() * 1000).to_bytes(6, byteorder="big") + int(
getrandbits(80)
).to_bytes(10, byteorder="big")
# This is base32 crockford encoding with the loop unrolled for performance
#
# This code is adapted from:
# https://github.com/ahawker/ulid/blob/06289583e9de4286b4d80b4ad000d137816502ca/ulid/base32.py#L102
#
enc = "0123456789ABCDEFGHJKMNPQRSTVWXYZ"
return (
enc[(ulid_bytes[0] & 224) >> 5]
+ enc[ulid_bytes[0] & 31]
+ enc[(ulid_bytes[1] & 248) >> 3]
+ enc[((ulid_bytes[1] & 7) << 2) | ((ulid_bytes[2] & 192) >> 6)]
+ enc[((ulid_bytes[2] & 62) >> 1)]
+ enc[((ulid_bytes[2] & 1) << 4) | ((ulid_bytes[3] & 240) >> 4)]
+ enc[((ulid_bytes[3] & 15) << 1) | ((ulid_bytes[4] & 128) >> 7)]
+ enc[(ulid_bytes[4] & 124) >> 2]
+ enc[((ulid_bytes[4] & 3) << 3) | ((ulid_bytes[5] & 224) >> 5)]
+ enc[ulid_bytes[5] & 31]
+ enc[(ulid_bytes[6] & 248) >> 3]
+ enc[((ulid_bytes[6] & 7) << 2) | ((ulid_bytes[7] & 192) >> 6)]
+ enc[(ulid_bytes[7] & 62) >> 1]
+ enc[((ulid_bytes[7] & 1) << 4) | ((ulid_bytes[8] & 240) >> 4)]
+ enc[((ulid_bytes[8] & 15) << 1) | ((ulid_bytes[9] & 128) >> 7)]
+ enc[(ulid_bytes[9] & 124) >> 2]
+ enc[((ulid_bytes[9] & 3) << 3) | ((ulid_bytes[10] & 224) >> 5)]
+ enc[ulid_bytes[10] & 31]
+ enc[(ulid_bytes[11] & 248) >> 3]
+ enc[((ulid_bytes[11] & 7) << 2) | ((ulid_bytes[12] & 192) >> 6)]
+ enc[(ulid_bytes[12] & 62) >> 1]
+ enc[((ulid_bytes[12] & 1) << 4) | ((ulid_bytes[13] & 240) >> 4)]
+ enc[((ulid_bytes[13] & 15) << 1) | ((ulid_bytes[14] & 128) >> 7)]
+ enc[(ulid_bytes[14] & 124) >> 2]
+ enc[((ulid_bytes[14] & 3) << 3) | ((ulid_bytes[15] & 224) >> 5)]
+ enc[ulid_bytes[15] & 31]
)
| 3.390625 | 3 |
judge/supplementary_gt.py | zwangab91/ctw-baseline | 333 | 12772076 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import json
import matplotlib.pyplot as plt
import os
import plot_tools
import settings
from pythonapi import anno_tools
def plt_print_text(*args):
print('plot_tools.print_text', *args[:-1])
with plt.style.context({
'pdf.fonttype': 42,
}):
plot_tools.print_text(*args)
plt_print_text.concurrent = False
print_text = plt_print_text
def main():
with open(settings.DATA_LIST) as f:
data_list = json.load(f)
lines = []
with open(settings.TRAIN) as f:
lines += f.read().splitlines()
with open(settings.VAL) as f:
lines += f.read().splitlines()
with open(settings.TEST_DETECTION_GT) as f:
lines += f.read().splitlines()
def gt2array(gt):
color = '#0f0'
a = list()
for char in anno_tools.each_char(gt):
if char['is_chinese']:
a.append({'polygon': char['polygon'], 'text': char['text'], 'color': color, 'fontsize': 10})
for char in gt['ignore']:
a.append({'polygon': char['polygon'], 'text': '', 'color': '#ff0', 'fontsize': 10})
return a
selected = [
('0000507', 0, 0, 2048, 2048),
('1023899', 0, 0, 2048, 2048),
('1031755', 0, 0, 2048, 2048),
('1044721', 0, 0, 2048, 2048),
('1046905', 0, 0, 2048, 2048),
('2000215', 0, 0, 2048, 2048),
('2004154', 0, 0, 2048, 2048),
('2005679', 0, 0, 2048, 2048),
('2024003', 0, 0, 2048, 2048),
('3005669', 0, 0, 2048, 2048),
('3029319', 0, 0, 2048, 2048),
('3040629', 0, 0, 2048, 2048),
('3001838', 0, 650, 700, 550),
('1041797', 530, 740, 700, 550),
]
if not os.path.isdir(settings.PRINTTEXT_DRAWING_DIR):
os.makedirs(settings.PRINTTEXT_DRAWING_DIR)
tasks = []
for image_id, x, y, w, h in selected:
i = [o['image_id'] for o in data_list['train'] + data_list['val'] + data_list['test_det']].index(image_id)
gt = json.loads(lines[i])
crop = (x, y, w, h)
file_name = os.path.join(settings.TRAINVAL_IMAGE_DIR if i < len(data_list['train'] + data_list['val']) else settings.TEST_IMAGE_DIR, gt['file_name'])
output = os.path.join(settings.PRINTTEXT_DRAWING_DIR, 'gt_{}_{}_{}_{}_{}.pdf'.format(image_id, x, y, w, h))
print_text(file_name, output, {
'boxes': gt2array(gt),
'crop': crop,
})
if __name__ == '__main__':
main()
| 2.4375 | 2 |
FeatureStacker.py | menikhilpandey/Supply-Chain-Risk-Management | 11 | 12772077 | # class FeatureStacker(BaseEstimator):
# """Stacks several transformer objects to yield concatenated features.
# Similar to pipeline, a list of tuples ``(name, estimator)`` is passed
# to the constructor.
# """
# def __init__(self, transformer_list):
# self.transformer_list = transformer_list
#
# def get_feature_names(self):
# pass
#
# def fit(self, X, y=None):
# for name, trans in self.transformer_list:
# trans.fit(X, y)
# return self
#
# def transform(self, X):
# features = []
# for name, trans in self.transformer_list:
# features.append(trans.transform(X))
# issparse = [sparse.issparse(f) for f in features]
# if np.any(issparse):
# features = sparse.hstack(features).tocsr()
# else:
# features = np.hstack(features)
# return features
#
# def get_params(self, deep=True):
# if not deep:
# return super(FeatureStacker, self).get_params(deep=False)
# else:
# out = dict(self.transformer_list)
# for name, trans in self.transformer_list:
# for key, value in trans.get_params(deep=True).iteritems():
# out['%s__%s' % (name, key)] = value
# return out | 2.734375 | 3 |
go.py | cwbooth5/f5go | 0 | 12772078 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""This is the Go Redirector. It uses short mnemonics as redirects to otherwise
long URLs. Few remember how to write in cursive, most people don't remember
common phone numbers, and just about everyone needs a way around bookmarks.
"""
import os
import os.path
import pwd
import socket
import sys
import urllib
import ConfigParser
import cherrypy
import jinja2
import random
from optparse import OptionParser
from core import ListOfLinks, Link, MYGLOBALS, InvalidKeyword
import tools
__author__ = "<NAME> <<EMAIL>>"
__credits__ = "<NAME>, <NAME>, treebird"
config = ConfigParser.ConfigParser()
config.read('go.cfg')
MYGLOBALS.cfg_urlFavicon = config.get('goconfig', 'cfg_urlFavicon')
try:
MYGLOBALS.cfg_hostname = config.get('goconfig', 'cfg_hostname')
except ConfigParser.NoOptionError:
MYGLOBALS.cfg_hostname = socket.gethostbyname(socket.gethostname())
MYGLOBALS.cfg_urlSSO = config.get('goconfig', 'cfg_urlSSO')
MYGLOBALS.cfg_urlEditBase = "https://" + MYGLOBALS.cfg_hostname
MYGLOBALS.cfg_listenPort = int(config.get('goconfig', 'cfg_listenPort'))
def config_jinja():
"""Construct a jinja environment, provide filters and globals
to templates.
"""
env = jinja2.Environment(loader=jinja2.FileSystemLoader("."))
env.filters['time_t'] = tools.prettytime
env.filters['int'] = int
env.filters['escapekeyword'] = tools.escapekeyword
env.globals["enumerate"] = enumerate
env.globals["sample"] = random.sample
env.globals["len"] = len
env.globals["min"] = min
env.globals["str"] = str
env.globals["list"] = tools.makeList
env.globals.update(globals())
return env
class Root(object):
env = config_jinja()
def redirect(self, url, status=307):
cherrypy.response.status = status
cherrypy.response.headers["Location"] = url
def undirect(self):
raise cherrypy.HTTPRedirect(cherrypy.request.headers.get("Referer", "/"))
def notfound(self, msg):
return env.get_template("notfound.html").render(message=msg)
def redirectIfNotFullHostname(self, scheme=None):
if scheme is None:
scheme = cherrypy.request.scheme
# redirect to our full hostname to get the user's cookies
if cherrypy.request.scheme != scheme or cherrypy.request.base.find(MYGLOBALS.cfg_hostname) < 0:
fqurl = scheme + "://" + MYGLOBALS.cfg_hostname
fqurl += cherrypy.request.path_info
if cherrypy.request.query_string:
fqurl += "?" + cherrypy.request.query_string
raise cherrypy.HTTPRedirect(fqurl)
def redirectToEditLink(self, **kwargs):
if "linkid" in kwargs:
url = "/_edit_/%s" % kwargs["linkid"]
del kwargs["linkid"]
else:
url = "/_add_"
return self.redirect(url + "?" + urllib.urlencode(kwargs))
def redirectToEditList(self, listname, **kwargs):
baseurl = "/_editlist_/%s?" % tools.escapekeyword(listname)
return self.redirect(baseurl + urllib.urlencode(kwargs))
@cherrypy.expose
def robots_txt(self):
# Specifically for the internal GSA
return file("robots.txt").read()
@cherrypy.expose
def favicon_ico(self):
cherrypy.response.headers["Cache-control"] = "max-age=172800"
return self.redirect(MYGLOBALS.cfg_urlFavicon, status=301)
@cherrypy.expose
def bootstrap_css(self):
cherrypy.response.headers["Cache-control"] = "max-age=172800"
cherrypy.response.headers["Content-Type"] = "text/css"
return file("bootstrap.min.css").read()
@cherrypy.expose
def lucky(self):
luckylink = random.choice(MYGLOBALS.g_db.getNonFolders())
luckylink.clicked()
return self.redirect(tools.deampify(luckylink.url()))
@cherrypy.expose
def index(self, **kwargs):
self.redirectIfNotFullHostname()
if "keyword" in kwargs:
return self.redirect("/" + kwargs["keyword"])
return env.get_template('index.html').render(now=tools.today())
@cherrypy.expose
def default(self, *rest, **kwargs):
self.redirectIfNotFullHostname()
keyword = rest[0]
rest = rest[1:]
forceListDisplay = False
# action = kwargs.get("action", "list")
if keyword[0] == ".": # force list page instead of redirect
forceListDisplay = True
keyword = keyword[1:]
if rest:
keyword += "/"
elif forceListDisplay and cherrypy.request.path_info[-1] == "/":
# allow go/keyword/ to redirect to go/keyword but go/.keyword/
# to go to the keyword/ index
keyword += "/"
# try it as a list
try:
ll = MYGLOBALS.g_db.getList(keyword, create=False)
except InvalidKeyword as e:
return self.notfound(str(e))
if not ll: # nonexistent list
# check against all special cases
matches = []
for R in MYGLOBALS.g_db.regexes.values():
matches.extend([(R, L, genL) for L, genL in R.matches(keyword)])
if not matches:
kw = tools.sanitary(keyword)
if not kw:
return self.notfound("No match found for '%s'" % keyword)
# serve up empty fake list
return env.get_template('list.html').render(L=ListOfLinks(linkid=0), keyword=kw)
elif len(matches) == 1:
R, L, genL = matches[0] # actual regex, generated link
R.clicked()
L.clicked()
return self.redirect(tools.deampify(genL.url()))
else: # len(matches) > 1
LL = ListOfLinks(linkid=-1) # -1 means non-editable
LL.links = [genL for R, L, genL in matches]
return env.get_template('list.html').render(L=LL, keyword=keyword)
listtarget = ll.getDefaultLink()
if listtarget and not forceListDisplay:
ll.clicked()
listtarget.clicked()
return self.redirect(tools.deampify(listtarget.url()))
tmplList = env.get_template('list.html')
return tmplList.render(L=ll, keyword=keyword)
@cherrypy.expose
def special(self):
LL = ListOfLinks(linkid=-1)
LL.name = "Smart Keywords"
LL.links = MYGLOBALS.g_db.getSpecialLinks()
env.globals['MYGLOBALS.g_db'] = MYGLOBALS.g_db
return env.get_template('list.html').render(L=LL, keyword="special")
@cherrypy.expose
def _login_(self, redirect=""):
tools.getSSOUsername(redirect)
if redirect:
return self.redirect(redirect)
return self.undirect()
@cherrypy.expose
def _link_(self, linkid):
link = MYGLOBALS.g_db.getLink(linkid)
if link:
link.clicked()
return self.redirect(link.url(), status=301)
cherrypy.response.status = 404
return self.notfound("Link %s does not exist" % linkid)
@cherrypy.expose
def _add_(self, *args, **kwargs):
# _add_/tag1/tag2/tag3
link = Link()
link.lists = [MYGLOBALS.g_db.getList(listname, create=False) or ListOfLinks(linkid=0, name=listname) for listname in args]
return env.get_template("editlink.html").render(L=link, returnto=(args and args[0] or None), **kwargs)
@cherrypy.expose
def _edit_(self, linkid, **kwargs):
link = MYGLOBALS.g_db.getLink(linkid)
if link:
return env.get_template("editlink.html").render(L=link, **kwargs)
# edit new link
return env.get_template("editlink.html").render(L=Link(), **kwargs)
@cherrypy.expose
def _editlist_(self, keyword, **kwargs):
K = MYGLOBALS.g_db.getList(keyword, create=False)
if not K:
K = ListOfLinks()
return env.get_template("list.html").render(L=K, keyword=keyword)
@cherrypy.expose
def _setbehavior_(self, keyword, **kwargs):
K = MYGLOBALS.g_db.getList(keyword, create=False)
if "behavior" in kwargs:
K._url = kwargs["behavior"]
return self.redirectToEditList(keyword)
@cherrypy.expose
def _delete_(self, linkid, returnto=""):
# username = getSSOUsername()
MYGLOBALS.g_db.deleteLink(MYGLOBALS.g_db.getLink(linkid))
return self.redirect("/." + returnto)
@cherrypy.expose
def _modify_(self, **kwargs):
username = tools.getSSOUsername()
linkid = kwargs.get("linkid", "")
title = tools.escapeascii(kwargs.get("title", ""))
lists = kwargs.get("lists", [])
url = kwargs.get("url", "")
otherlists = kwargs.get("otherlists", "")
returnto = kwargs.get("returnto", "")
# remove any whitespace/newlines in url
url = "".join(url.split())
if type(lists) not in [tuple, list]:
lists = [lists]
lists.extend(otherlists.split())
if linkid:
link = MYGLOBALS.g_db.getLink(linkid)
if link._url != url:
MYGLOBALS.g_db._changeLinkUrl(link, url)
link.title = title
newlistset = []
for listname in lists:
if "{*}" in url:
if listname[-1] != "/":
listname += "/"
try:
newlistset.append(MYGLOBALS.g_db.getList(listname, create=True))
except:
return self.redirectToEditLink(error="invalid keyword '%s'" % listname, **kwargs)
for LL in newlistset:
if LL not in link.lists:
LL.addLink(link)
for LL in [x for x in link.lists]:
if LL not in newlistset:
LL.removeLink(link)
if not LL.links:
MYGLOBALS.g_db.deleteList(LL)
link.lists = newlistset
link.editedBy(username)
MYGLOBALS.g_db.save()
return self.redirect("/." + returnto)
if not lists:
return self.redirectToEditLink(error="delete links that have no lists", **kwargs)
if not url:
return self.redirectToEditLink(error="URL required", **kwargs)
# if url already exists, redirect to that link's edit page
if url in MYGLOBALS.g_db.linksByUrl:
link = MYGLOBALS.g_db.linksByUrl[url]
# only modify lists; other fields will only be set if there
# is no original
combinedlists = set([x.name for x in link.lists]) | set(lists)
fields = {'title': link.title or title,
'lists': " ".join(combinedlists),
'linkid': str(link.linkid)
}
return self.redirectToEditLink(error="found identical existing URL; confirm changes and re-submit", **fields)
link = MYGLOBALS.g_db.addLink(lists, url, title, username)
MYGLOBALS.g_db.save()
return self.redirect("/." + returnto)
@cherrypy.expose
def _internal_(self, *args, **kwargs):
# check, toplinks, special, dumplist
return env.get_template(args[0] + ".html").render(**kwargs)
@cherrypy.expose
def toplinks(self, n="100"):
return env.get_template("toplinks.html").render(n=int(n))
@cherrypy.expose
def variables(self):
return env.get_template("variables.html").render()
@cherrypy.expose
def help(self):
return env.get_template("help.html").render()
@cherrypy.expose
def _override_vars_(self, **kwargs):
cherrypy.response.cookie["variables"] = urllib.urlencode(kwargs)
cherrypy.response.cookie["variables"]["max-age"] = 10 * 365 * 24 * 3600
return self.redirect("/variables")
@cherrypy.expose
def _set_variable_(self, varname="", value=""):
if varname and value:
MYGLOBALS.g_db.variables[varname] = value
MYGLOBALS.g_db.save()
return self.redirect("/variables")
def main(opts):
cherrypy.config.update({'server.socket_host': '::',
'server.socket_port': MYGLOBALS.cfg_listenPort,
'request.query_string_encoding': "latin1",
})
# cherrypy.https = s = cherrypy._cpserver.Server()
# s.socket_host = '::'
# s.socket_port = 443
# s.ssl_module = 'pyopenssl'
# s.ssl_certificate = 'go.crt'
# s.ssl_private_key = 'go.key'
# s.ssl_certificate_chain = 'gd_bundle.crt'
# s.subscribe()
# checkpoint the database every 60 seconds
# cherrypy.process.plugins.BackgroundTask(60, lambda: MYGLOBALS.g_db.save()).start()
file_path = os.getcwd().replace("\\", "/")
conf = {'/images': {"tools.staticdir.on": True, "tools.staticdir.dir": file_path + "/images"}}
print "Cherrypy conf: %s" % conf
if opts.runas:
# Check for requested user, raises KeyError if they don't exist.
pwent = pwd.getpwnam(opts.runas)
# Drop privs to requested user, raises OSError if not privileged.
cherrypy.process.plugins.DropPrivileges(
cherrypy.engine, uid=pwent.pw_uid, gid=pwent.pw_gid).subscribe()
cherrypy.config.update(conf) # hack? TODO
cherrypy.quickstart(Root(), "/", config=conf)
env = config_jinja()
if __name__ == "__main__":
parser = OptionParser()
parser.add_option("-i", dest="importfile", action="store",
help="Import a link database from a file.")
parser.add_option("-e", action="store", dest="exportfile",
help="Export a link database to a file.")
parser.add_option("--dump", dest="dump", action="store_true",
help="Dump the db to stdout.")
parser.add_option("--runas", dest="runas",
help="Run as the provided user.")
(opts, args) = parser.parse_args()
if opts.importfile:
MYGLOBALS.g_db._import(opts.importfile)
elif opts.exportfile:
MYGLOBALS.g_db._export(opts.exportfile)
elif opts.dump:
MYGLOBALS.g_db._dump(sys.stdout)
else:
env = config_jinja()
main(opts)
| 2.390625 | 2 |
setup.py | zathras777/rosti | 0 | 12772079 | <reponame>zathras777/rosti<gh_stars>0
from setuptools import setup
from os import path
import io
here = path.abspath(path.dirname(__file__))
# Get the long description from the relevant file
with io.open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='rosti',
version='0.4.1',
description='Script to clean nasty code from a compromised php site.',
long_description=long_description,
url='https://github.com/zathras777/rosti',
author='<NAME>',
author_email='<EMAIL>',
license='Unlicense',
classifiers=[
'Development Status :: 3 - Alpha',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
],
keywords='php scanner infected',
entry_points={
'console_scripts': ['rosti=rosti:main']
},
download_url='https://github.com/zathras777/rosti/archive/0.4.1.zip',
test_suite='tests'
)
| 1.703125 | 2 |
tinysqlbuilder/builder.py | koichirock/tinysqlbuilder | 0 | 12772080 | from typing import Union
from .sql import Condition, Query, full_outer_join, inner_join, left_outer_join, right_outer_join
all = ["Query", "QueryBuilder"]
class QueryBuilder:
"""Query builder."""
def __init__(self, table: str) -> None:
self._query = Query(table)
def select(self, *columns: str) -> "QueryBuilder":
"""Select columns."""
self._query.columns = list(columns)
return self
def where(self, condition: Union[str, Condition]) -> "QueryBuilder":
"""Add condition."""
self._query.condition = condition
return self
def join(self, table: Union[str, Query], condition: Union[str, Condition]) -> "QueryBuilder":
"""Add join."""
self._query.joins.append(inner_join(table, condition))
return self
def left_outer_join(
self, table: Union[str, Query], condition: Union[str, Condition]
) -> "QueryBuilder":
"""Add left outer join."""
self._query.joins.append(left_outer_join(table, condition))
return self
def right_outer_join(
self, table: Union[str, Query], condition: Union[str, Condition]
) -> "QueryBuilder":
"""Add right outer join."""
self._query.joins.append(right_outer_join(table, condition))
return self
def full_outer_join(
self, table: Union[str, Query], condition: Union[str, Condition]
) -> "QueryBuilder":
"""Add full outer join."""
self._query.joins.append(full_outer_join(table, condition))
return self
def subquery(self, alias: str) -> "QueryBuilder":
"""Build subquery."""
self._query.alias = alias
return self
def build(self) -> Query:
"""Build query."""
return self._query
| 2.921875 | 3 |
bench/dj/hello/views.py | edilio/PyWebPerf | 0 | 12772081 | import django
from django.http import HttpResponse
import random
def rand_string(min, max):
"""Returns a randomly-generated string, of a random length.
Args:
min (int): Minimum string length to return, inclusive
max (int): Maximum string length to return, inclusive
"""
int_gen = random.randint
string_length = int_gen(min, max)
return ''.join([chr(int_gen(ord(' '), ord('~')))
for __ in range(string_length)])
BODY = rand_string(10240, 10240).encode('utf-8') # NOQA
HEADERS = {'X-Test': 'Funky Chicken'}
_body = BODY
_headers = HEADERS
def hello(request, account_id):
user_agent = request.META['HTTP_USER_AGENT'] # NOQA
limit = request.GET.get('limit', '10') # NOQA
response = HttpResponse(_body)
for name, value in _headers.items():
response[name] = value
return response
| 2.71875 | 3 |
experiment/dow_jones.py | IbarakikenYukishi/two-stage-MDL | 2 | 12772082 | <reponame>IbarakikenYukishi/two-stage-MDL<filename>experiment/dow_jones.py
import sys
sys.path.append('./')
from utils.utils import calc_AUC
from copy import deepcopy
import matplotlib.pyplot as plt
import numpy as np
import utils.hsdmdl2_nml as hsdmdl2_nml
import utils.hsdmdl1_nml as hsdmdl1_nml
import utils.sdmdl_nml as sdmdl_nml
import tsmdl.aw2s_mdl as aw2s_mdl
import tsmdl.fw2s_mdl as fw2s_mdl
import dmdl.hsdmdl2 as hsdmdl2
import dmdl.hsdmdl1 as hsdmdl1
import dmdl.sdmdl as sdmdl
from functools import partial
import pandas as pd
import optuna
import changefinder
import bocpd
import datetime as dt
import boto3
from settings import AWS_S3_BUCKET_NAME
# data frame
df = pd.read_csv('./data/dow-jones-closing.csv')
# starting and ending date
start_date = dt.datetime(1972, 7, 5)
end_date = dt.datetime(1975, 6, 30)
df['Date'] = pd.to_datetime(df['Date'])
df['Rate'] = 0
for i in range(1, len(df)):
df.iloc[i, 2] = (df.iloc[i, 1] / df.iloc[i - 1, 1]) - 1
df = df[(df['Date'] >= start_date) & (df['Date'] <= end_date)]
df = df.reset_index(drop=True)
# starting points of gradual changes
cp1 = dt.datetime(1973, 1, 30)
cp2 = dt.datetime(1973, 10, 19)
cp3 = dt.datetime(1974, 8, 9)
# 1) the conviction of <NAME> and <NAME>, Jr. in the Watergate incident on January 30, 1973
# 2) the declaration of the oil embargo by the Organization of Petroleum Exporting Countries (OPEC) against the United States on October 19, 1973
# 3) the resignation of President Nixon on August 9, 1974.
cp1_index = len(df[df['Date'] <= cp1])
cp2_index = len(df[df['Date'] <= cp2])
cp3_index = len(df[df['Date'] <= cp3])
changepoints = [cp1_index, cp2_index, cp3_index]
# parameters
tolerance_delay = 25
n_trials = 200
both = True
# BOCPD
def _objective_BOCPD(trial, train, changepoints, tolerance_delay):
lam = trial.suggest_int('lam', 2, 100)
alpha = trial.suggest_uniform('alpha', 1e-8, 10)
beta = trial.suggest_uniform('beta', 1e-8, 0.001)
kappa = trial.suggest_uniform('kappa', 1e-8, 0.001)
mu = 0
#mu = trial.suggest_uniform('mu', -1, 1)
h = partial(bocpd.constant_hazard, lam)
lik = bocpd.StudentT(alpha, beta, kappa, mu)
retrospective = bocpd.Retrospective(hazard_func=h, likelihood_func=lik)
scores = retrospective.calc_scores(train)
AUC = calc_AUC(scores, changepoints, tolerance_delay, both=both)
return -AUC
# hyperparameter tuning
objective_BOCPD = partial(_objective_BOCPD, train=np.array(df['Rate']),
changepoints=changepoints, tolerance_delay=tolerance_delay)
study = optuna.create_study()
study.optimize(objective_BOCPD, n_trials=n_trials)
# calculate scores
opt_lam = study.best_params['lam']
opt_alpha = study.best_params['alpha']
opt_beta = study.best_params['beta']
opt_kappa = study.best_params['kappa']
#opt_mu = study.best_params['mu']
opt_mu = 0
h = partial(bocpd.constant_hazard, opt_lam)
lik = bocpd.StudentT(opt_alpha, opt_beta, opt_kappa, opt_mu)
retrospective = bocpd.Retrospective(hazard_func=h, likelihood_func=lik)
bocpd_scores = retrospective.calc_scores(np.array(df['Rate']))
# CF
def _objective_CF(trial, train, changepoints, tolerance_delay):
# hyperparameters
r = trial.suggest_uniform('r', 0.01, 0.99)
order = trial.suggest_int('order', 1, 20)
smooth = trial.suggest_int('smooth', 3, 20)
retrospective = changefinder.Retrospective(r=r, order=order, smooth=smooth)
scores = retrospective.calc_scores(train)
AUC = calc_AUC(
scores, changepoints, tolerance_delay, both=both)
return -AUC
# hyperparameter tuning
objective_CF = partial(_objective_CF, train=np.array(df['Rate']),
changepoints=changepoints, tolerance_delay=tolerance_delay)
study = optuna.create_study()
study.optimize(objective_CF, n_trials=n_trials, n_jobs=-1)
opt_r = study.best_params['r']
opt_order = study.best_params['order']
opt_smooth = study.best_params['smooth']
retrospective = changefinder.Retrospective(
r=opt_r, order=opt_order, smooth=opt_smooth)
cf_scores = retrospective.calc_scores(np.array(df['Rate']))
# SDMDL 0th
def _objective_SDMDL(trial, train, changepoints, tolerance_delay, params):
nml_gaussian = partial(sdmdl_nml.nml_gaussian, mu_max=params[
"mu_max"], div_min=params["div_min"], div_max=params["div_max"])
complexity_gaussian = partial(sdmdl_nml.complexity_gaussian, mu_max=params[
"mu_max"], div_min=params["div_min"], div_max=params["div_max"])
window_size = trial.suggest_int('window_size', 5, 60)
retrospective = sdmdl.Retrospective(h=window_size, encoding_func=nml_gaussian,
complexity_func=complexity_gaussian, order=params["order"])
scores = retrospective.calc_scores(train)
n = len(scores)
scores[2 * window_size - 1: n] = scores[window_size - 1: n - window_size]
scores[0:2 * window_size] = np.nan
AUC = calc_AUC(scores, changepoints, tolerance_delay, both=both)
return -AUC
mu_max = 10
div_min = 1e-2
div_max = 1e2
params = {"mu_max": mu_max, "div_min": div_min,
"div_max": div_max, "order": 0}
# 0th D-MDL
# hyperparameter tuning
objective_SDMDL = partial(_objective_SDMDL, train=np.array(df['Rate']),
changepoints=changepoints, tolerance_delay=tolerance_delay, params=params)
study = optuna.create_study()
study.optimize(objective_SDMDL, n_trials=n_trials, n_jobs=-1)
opt_window_size = study.best_params['window_size']
nml_gaussian = partial(sdmdl_nml.nml_gaussian, mu_max=params["mu_max"],
div_min=params["div_min"], div_max=params["div_max"])
complexity_gaussian = partial(sdmdl_nml.complexity_gaussian, mu_max=params["mu_max"],
div_min=params["div_min"], div_max=params["div_max"])
retrospective = sdmdl.Retrospective(h=opt_window_size, encoding_func=nml_gaussian,
complexity_func=complexity_gaussian, order=params["order"])
sdmdl_0_scores = retrospective.calc_scores(np.array(df['Rate']))
n = len(sdmdl_0_scores)
sdmdl_0_scores[2 * opt_window_size -
1: n] = sdmdl_0_scores[opt_window_size - 1: n - opt_window_size]
sdmdl_0_scores[0:2 * opt_window_size] = np.nan
# 1st D-MDL
params = {"mu_max": mu_max, "div_min": div_min,
"div_max": div_max, "order": 1}
# hyperparameter tuning
objective_SDMDL = partial(_objective_SDMDL, train=np.array(df['Rate']),
changepoints=changepoints, tolerance_delay=tolerance_delay, params=params)
study = optuna.create_study()
study.optimize(objective_SDMDL, n_trials=n_trials, n_jobs=-1)
opt_window_size = study.best_params['window_size']
nml_gaussian = partial(sdmdl_nml.nml_gaussian, mu_max=params["mu_max"],
div_min=params["div_min"], div_max=params["div_max"])
complexity_gaussian = partial(sdmdl_nml.complexity_gaussian, mu_max=params["mu_max"],
div_min=params["div_min"], div_max=params["div_max"])
retrospective = sdmdl.Retrospective(h=opt_window_size, encoding_func=nml_gaussian,
complexity_func=complexity_gaussian, order=params["order"])
sdmdl_1_scores = retrospective.calc_scores(np.array(df['Rate']))
n = len(sdmdl_1_scores)
sdmdl_1_scores[2 * opt_window_size -
1: n] = sdmdl_1_scores[opt_window_size - 1: n - opt_window_size]
sdmdl_1_scores[0:2 * opt_window_size] = np.nan
# 2nd D-MDL
params = {"mu_max": mu_max, "div_min": div_min,
"div_max": div_max, "order": 2}
# hyperparameter tuning
objective_SDMDL = partial(_objective_SDMDL, train=np.array(df['Rate']),
changepoints=changepoints, tolerance_delay=tolerance_delay, params=params)
study = optuna.create_study()
study.optimize(objective_SDMDL, n_trials=n_trials, n_jobs=-1)
opt_window_size = study.best_params['window_size']
nml_gaussian = partial(sdmdl_nml.nml_gaussian, mu_max=params["mu_max"],
div_min=params["div_min"], div_max=params["div_max"])
complexity_gaussian = partial(sdmdl_nml.complexity_gaussian, mu_max=params["mu_max"],
div_min=params["div_min"], div_max=params["div_max"])
retrospective = sdmdl.Retrospective(h=opt_window_size, encoding_func=nml_gaussian,
complexity_func=complexity_gaussian, order=params["order"])
sdmdl_2_scores = retrospective.calc_scores(np.array(df['Rate']))
n = len(sdmdl_2_scores)
sdmdl_2_scores[2 * opt_window_size -
1: n] = sdmdl_2_scores[opt_window_size - 1: n - opt_window_size]
sdmdl_2_scores[0:2 * opt_window_size] = np.nan
# FW2S_MDL
def _objective_FW2S_MDL(trial, train, changepoints, tolerance_delay, params):
nml_gaussian = partial(sdmdl_nml.nml_gaussian, mu_max=params[
"mu_max"], div_min=params["div_min"], div_max=params["div_max"])
complexity_gaussian = partial(sdmdl_nml.complexity_gaussian, mu_max=params[
"mu_max"], div_min=params["div_min"], div_max=params["div_max"])
delta_0 = trial.suggest_uniform('delta_0', 0.001, 0.499)
window_size_1 = trial.suggest_int('window_size_1', 5, 60)
window_size_2 = trial.suggest_int('window_size_2', 5, 60)
retrospective_first = sdmdl.Retrospective(h=window_size_1, encoding_func=nml_gaussian,
complexity_func=complexity_gaussian, delta_0=delta_0, order=0)
retrospective_second = sdmdl.Retrospective(h=window_size_2, encoding_func=nml_gaussian,
complexity_func=complexity_gaussian, order=0)
retrospective = fw2s_mdl.Retrospective(
retrospective_first, retrospective_second)
scores = retrospective.calc_scores(train)
n = len(scores)
scores[2 * (window_size_1 + window_size_2): n] = scores[window_size_1 +
window_size_2: n - (window_size_1 + window_size_2)]
scores[0:2 * (window_size_1 + window_size_2)] = np.nan
AUC = calc_AUC(scores, changepoints, tolerance_delay, both=both)
return -AUC
mu_max = 10
div_min = 1e-2
div_max = 1e2
params = {"mu_max": mu_max, "div_min": div_min,
"div_max": div_max}
# hyperparameter tuning
objective_FW2S_MDL = partial(_objective_FW2S_MDL, train=np.array(df['Rate']),
changepoints=changepoints, tolerance_delay=tolerance_delay, params=params)
study = optuna.create_study()
study.optimize(objective_FW2S_MDL, n_trials=n_trials, n_jobs=-1)
opt_delta_0 = study.best_params['delta_0']
opt_window_size_1 = study.best_params['window_size_1']
opt_window_size_2 = study.best_params['window_size_2']
nml_gaussian = partial(sdmdl_nml.nml_gaussian, mu_max=params["mu_max"],
div_min=params["div_min"], div_max=params["div_max"])
complexity_gaussian = partial(sdmdl_nml.complexity_gaussian, mu_max=params["mu_max"],
div_min=params["div_min"], div_max=params["div_max"])
retrospective_first = sdmdl.Retrospective(h=opt_window_size_1, encoding_func=nml_gaussian,
complexity_func=complexity_gaussian, delta_0=opt_delta_0, order=0)
retrospective_second = sdmdl.Retrospective(h=opt_window_size_2, encoding_func=nml_gaussian,
complexity_func=complexity_gaussian, order=0)
retrospective = fw2s_mdl.Retrospective(
retrospective_first, retrospective_second)
fw2s_mdl_scores = retrospective.calc_scores(np.array(df['Rate']))
n = len(fw2s_mdl_scores)
fw2s_mdl_scores[2 * (opt_window_size_1 + opt_window_size_2): n] = fw2s_mdl_scores[opt_window_size_1 +
opt_window_size_2: n - (opt_window_size_1 + opt_window_size_2)]
fw2s_mdl_scores[0:2 * (opt_window_size_1 + opt_window_size_2)] = np.nan
sdmdl_0_scores[np.isnan(sdmdl_0_scores)] = np.nanmin(sdmdl_0_scores)
sdmdl_1_scores[np.isnan(sdmdl_1_scores)] = np.nanmin(sdmdl_1_scores)
sdmdl_2_scores[np.isnan(sdmdl_2_scores)] = np.nanmin(sdmdl_2_scores)
fw2s_mdl_scores[np.isnan(fw2s_mdl_scores)] = np.nanmin(fw2s_mdl_scores)
print("SDMDL 0th: ", calc_AUC(sdmdl_0_scores,
changepoints, tolerance_delay, both=both))
print("SDMDL 1st: ", calc_AUC(sdmdl_1_scores,
changepoints, tolerance_delay, both=both))
print("SDMDL 2nd: ", calc_AUC(sdmdl_2_scores,
changepoints, tolerance_delay, both=both))
print("FW2S-MDL: ", calc_AUC(fw2s_mdl_scores,
changepoints, tolerance_delay, both=both))
print("ChangeFinder: ", calc_AUC(
cf_scores, changepoints, tolerance_delay, both=both))
print("BOCPD: ", calc_AUC(bocpd_scores, changepoints, tolerance_delay, both=both))
df_result = pd.DataFrame()
row = pd.DataFrame({"method: ": ["SDMDL 0th"], "AUC: ": calc_AUC(
sdmdl_0_scores, changepoints, tolerance_delay, both=both)})
df_result = pd.concat([df_result, row], axis=0)
row = pd.DataFrame({"method: ": ["SDMDL 1st"], "AUC: ": calc_AUC(
sdmdl_1_scores, changepoints, tolerance_delay, both=both)})
df_result = pd.concat([df_result, row], axis=0)
row = pd.DataFrame({"method: ": ["SDMDL 2nd"], "AUC: ": calc_AUC(
sdmdl_2_scores, changepoints, tolerance_delay, both=both)})
df_result = pd.concat([df_result, row], axis=0)
row = pd.DataFrame({"method: ": ["FW2S-MDL"], "AUC: ": calc_AUC(
fw2s_mdl_scores, changepoints, tolerance_delay, both=both)})
df_result = pd.concat([df_result, row], axis=0)
row = pd.DataFrame({"method: ": ["ChangeFinder"], "AUC: ": calc_AUC(
cf_scores, changepoints, tolerance_delay, both=both)})
df_result = pd.concat([df_result, row], axis=0)
row = pd.DataFrame({"method: ": ["BOCPD"], "AUC: ": calc_AUC(
bocpd_scores, changepoints, tolerance_delay, both=both)})
df_result = pd.concat([df_result, row], axis=0)
df_result.to_csv("dow_jones.csv", index=False)
plt.clf()
fig, axes = plt.subplots(nrows=7, ncols=1, figsize=(20, 20))
fontsize = 18
axes[0].plot(df['Date'], df['Rate'], label='Raw Data')
axes[0].set_ylabel('Return', fontsize=fontsize)
axes[0].vlines([cp1, cp2, cp3], np.min(df['Rate']),
np.max(df['Rate']), color="black")
axes[0].set_ylim(np.min(df['Rate']), np.max(df['Rate']))
axes[0].legend(bbox_to_anchor=(1, 1), loc='upper right',
borderaxespad=0, fontsize=fontsize)
axes[0].set_xlim(start_date, end_date)
axes[0].grid(True)
axes[1].plot(df['Date'], sdmdl_0_scores, label='SD-MDL 0th')
axes[1].set_ylabel('Description Length', fontsize=fontsize)
axes[1].vlines([cp1, cp2, cp3], np.nanmin(sdmdl_0_scores) - 1,
np.nanmax(sdmdl_0_scores) + 1, color="black")
axes[1].set_ylim(np.nanmin(sdmdl_0_scores) - 1, np.nanmax(sdmdl_0_scores) + 1)
axes[1].legend(bbox_to_anchor=(1, 1), loc='upper right',
borderaxespad=0, fontsize=fontsize)
axes[1].set_xlim(start_date, end_date)
axes[1].grid(True)
axes[2].plot(df['Date'], sdmdl_1_scores, label='SD-MDL 1st')
axes[2].set_ylabel('Description Length', fontsize=fontsize)
axes[2].vlines([cp1, cp2, cp3], 0, np.nanmax(sdmdl_1_scores), color="black")
axes[2].set_ylim(0, np.nanmax(sdmdl_1_scores))
axes[2].legend(bbox_to_anchor=(1, 1), loc='upper right',
borderaxespad=0, fontsize=fontsize)
axes[2].set_xlim(start_date, end_date)
axes[2].grid(True)
axes[3].plot(df['Date'], sdmdl_2_scores, label='SD-MDL 2nd')
axes[3].set_ylabel('Description Length', fontsize=fontsize)
axes[3].vlines([cp1, cp2, cp3], 0, np.nanmax(sdmdl_1_scores), color="black")
axes[3].set_ylim(0, np.nanmax(sdmdl_1_scores))
axes[3].legend(bbox_to_anchor=(1, 1), loc='upper right',
borderaxespad=0, fontsize=fontsize)
axes[3].set_xlim(start_date, end_date)
axes[3].grid(True)
axes[4].plot(df['Date'], fw2s_mdl_scores, label='FW2S-MDL')
axes[4].set_ylabel('Description Length', fontsize=fontsize)
axes[4].vlines([cp1, cp2, cp3], np.nanmin(fw2s_mdl_scores) - 1,
np.nanmax(fw2s_mdl_scores) + 1, color="black")
axes[4].set_ylim(np.nanmin(fw2s_mdl_scores) - 1,
np.nanmax(fw2s_mdl_scores) + 1)
axes[4].legend(bbox_to_anchor=(1, 1), loc='upper right',
borderaxespad=0, fontsize=fontsize)
axes[4].set_xlim(start_date, end_date)
axes[4].grid(True)
axes[5].plot(df['Date'], cf_scores, label='ChangeFinder')
axes[5].set_ylabel('Change Score', fontsize=fontsize)
axes[5].vlines([cp1, cp2, cp3], np.nanmin(cf_scores) - 1,
np.nanmax(cf_scores) + 1, color="black")
axes[5].set_ylim(np.nanmin(cf_scores) - 1, np.nanmax(cf_scores) + 1)
axes[5].legend(bbox_to_anchor=(1, 1), loc='upper right',
borderaxespad=0, fontsize=fontsize)
axes[5].set_xlim(start_date, end_date)
axes[5].grid(True)
axes[6].plot(df['Date'], bocpd_scores, label='BOCPD')
axes[6].set_ylabel('Change Score', fontsize=fontsize)
axes[6].vlines([cp1, cp2, cp3], np.nanmin(bocpd_scores),
np.nanmax(bocpd_scores), color="black")
axes[6].set_ylim(np.nanmin(bocpd_scores), np.nanmax(bocpd_scores))
axes[6].legend(bbox_to_anchor=(1, 1), loc='upper right',
borderaxespad=0, fontsize=fontsize)
axes[6].set_xlim(start_date, end_date)
axes[6].grid(True)
fig.savefig("dow_jones.png")
# upload to s3
s3 = boto3.resource('s3')
bucket = s3.Bucket(AWS_S3_BUCKET_NAME)
bucket.upload_file("dow_jones.png", "dow_jones/" + "dow_jones.png")
bucket.upload_file("dow_jones.csv", "dow_jones/" + "dow_jones.csv")
| 2.328125 | 2 |
shard/src/ext/statistics_cog.py | architus/aut-bot | 27 | 12772083 | <reponame>architus/aut-bot
from datetime import timedelta, datetime
from collections import Counter, defaultdict
from concurrent.futures import ThreadPoolExecutor
from discord.ext import commands
from discord import Forbidden, HTTPException
import discord
import json
import pytz
from typing import Dict, List, Counter as TCounter
from string import punctuation
import src.generate.wordcount as wordcount_gen
from src.generate import member_growth
from lib.config import DISCORD_EPOCH, logger
from lib.ipc import manager_pb2 as message_type
from src.utils import mention_to_name, doc_url
class GuildData:
def __init__(self, bot, guild, dictionary, time_granularity=timedelta(days=1)):
self.bot = bot
self._up_to_date_after = pytz.utc.localize(datetime.utcnow())
self.guild = guild
self.dictionary, self.stops = dictionary
self.forbidden = False
self.time_granularity = time_granularity
self._join_dates = []
self._last_activity = {}
self._message_count = Counter()
self.correct_word_count = Counter()
self.words = defaultdict(Counter)
self.correct_words = defaultdict(Counter)
self.member_words = defaultdict(Counter)
self.mentions = defaultdict(Counter)
self.members = defaultdict(Counter)
self.channels = Counter()
self.times = defaultdict(lambda: defaultdict(lambda: defaultdict(int)))
def count_correct(self, string):
'''returns the number of correctly spelled words in a string'''
return len([w for w in string.split() if w in self.dictionary or w.upper() in ('A', 'I')])
def _filter_words(self, msg: discord.Message, words: List[str]):
if msg.author == self.bot.user:
return []
filtered = []
for w in words:
if w in self.stops:
continue
elif w in punctuation:
continue
else:
filtered.append(w)
return filtered
def _merge_counts(self, count: TCounter[TCounter[int]], member: discord.Member) -> TCounter[int]:
combined = Counter()
for ch_id, count in count.items():
ch = self.guild.get_channel(ch_id)
if ch is None:
continue
perms = ch.permissions_for(member)
if perms is not None and perms.read_messages and perms.read_message_history:
combined.update(count)
return combined
def _allowed_channels(self, ch_ids: List[int], member: discord.Member) -> List[int]:
for ch_id in ch_ids:
ch = self.guild.get_channel(ch_id)
if ch is None:
continue
perms = ch.permissions_for(member)
if perms is not None and perms.read_messages and perms.read_message_history:
yield ch_id
@property
def up_to_date(self):
return self._up_to_date_after == DISCORD_EPOCH
@property
def up_to_date_after(self):
return self._up_to_date_after
@up_to_date_after.setter
def up_to_date_after(self, after: datetime):
if after.tzinfo is None:
after = pytz.utc.localize(after)
self._up_to_date_after = max(DISCORD_EPOCH, after)
@property
def join_dates(self):
if self._join_dates is None:
self._join_dates = tuple(m.created_at for m in self.guild.members)
return self._join_dates
async def process_message(self, msg):
ch = msg.channel
self._message_count[ch.id] += 1
self._last_activity[ch.id] = msg.created_at
words = [w.lower() for w in msg.content.split()]
self.correct_word_count[ch.id] += self.count_correct(msg.content)
self.words[ch.id].update(self._filter_words(msg, words))
self.correct_words[ch.id][msg.author.id] += self.count_correct(msg.content)
self.member_words[ch.id][msg.author.id] += len(words)
date = msg.created_at - ((pytz.utc.localize(msg.created_at) - DISCORD_EPOCH) % self.time_granularity)
self.times[ch.id][date][msg.author.id] += 1
self.mentions[ch.id].update([m.id for m in msg.mentions])
self.members[ch.id][msg.author.id] += 1
self.channels[ch.id] += 1
def architus_count(self, member: discord.Member):
return self._merge_counts(self.members, member).get(self.bot.user.id, 0)
@property
def member_count(self):
return self.guild.member_count
def times_as_strings(self, member: discord.Member):
"""this is disgusting"""
combined = {}
for ch_id in self._allowed_channels(self.times.keys(), member):
for date, members in self.times[ch_id].items():
if date < datetime.now() - timedelta(days=90):
continue
if date in combined:
for member, count in members.items():
if member in combined[date]:
combined[date][member] += count
else:
combined[date][member] = count
else:
combined[date] = dict(members)
return {k.isoformat(): v for k, v in combined.items()}
def channel_counts(self, member: discord.Member):
return {i: self.channels[i] for i in self._allowed_channels(self.channels.keys(), member)}
def message_count(self, member: discord.Member):
msgs = 0
for ch_id in self._allowed_channels(self.channels.keys(), member):
msgs += self.channels[ch_id]
return msgs
def last_activity(self, member: discord.Member):
last = DISCORD_EPOCH.replace(tzinfo=None)
for ch_id in self._allowed_channels(self._last_activity.keys(), member):
if self._last_activity[ch_id] > last:
last = self._last_activity[ch_id]
return last
def member_counts(self, member: discord.Member):
return dict(self._merge_counts(self.members, member))
def correct_counts(self, member: discord.Member):
return dict(self._merge_counts(self.correct_words, member))
def mention_counts(self, member: discord.Member):
return dict(self._merge_counts(self.mentions, member))
def mention_count(self, member: discord.Member):
return sum(self._merge_counts(self.mentions, member).values())
def word_count(self, member: discord.Member):
return sum(self._merge_counts(self.words, member).values())
def word_counts(self, member: discord.Member):
return dict(self._merge_counts(self.member_words, member))
def common_words(self, member: discord.Member):
words = self._merge_counts(self.words, member).most_common(75)
for i, pair in enumerate(words):
try:
name = mention_to_name(self.guild, pair[0])
words[i] = (name, pair[1])
except ValueError:
continue
return words
class MessageStats(commands.Cog, name="Server Statistics"):
def __init__(self, bot):
self.bot = bot
self.cache = {} # type: Dict[int, GuildData]
with open('res/words/words.json') as f:
self.dictionary = json.loads(f.read())
with open('res/words/stops.json') as f:
self.stops = json.loads(f.read())
async def cache_guilds_history(self):
while not all(d.up_to_date for d in self.cache.values()):
for guild_d in self.cache.values():
if guild_d.up_to_date:
continue
before = guild_d.up_to_date_after
after = max(before - timedelta(days=30), DISCORD_EPOCH)
msgs = []
for ch in guild_d.guild.text_channels:
try:
async for msg in ch.history(
before=before.replace(tzinfo=None), after=after.replace(tzinfo=None), limit=None):
msgs.append(msg)
except Forbidden:
guild_d.forbidden = True
except HTTPException:
logger.exception(f"error while downloading '{ch.guild.name}.{ch.name}'")
# break
# TODO retry a few times
else:
for msg in msgs:
await guild_d.process_message(msg)
guild_d.up_to_date_after = after
def append_warning(self, data: GuildData, em: discord.Embed):
if not data.up_to_date:
em.set_footer(
text=f"Still indexing data before {data.up_to_date_after}",
icon_url="https://emojipedia-us.s3.dualstack.us-west-1"
".amazonaws.com/thumbs/120/twitter/259/warning_26a0.png")
@commands.Cog.listener()
async def on_ready(self):
logger.debug(f"Caching messages for {len(self.bot.guilds)} guilds...")
self.cache = {g.id: GuildData(self.bot, g, (self.dictionary, self.stops)) for g in self.bot.guilds}
await self.cache_guilds_history()
logger.debug(f"Message cache up-to-date for {len(self.bot.guilds)} guilds...")
@commands.Cog.listener()
async def on_message(self, msg):
if not msg.channel.guild:
return
await self.cache[msg.channel.guild.id].process_message(msg)
@commands.Cog.listener()
async def on_member_update(self, before, after):
if before != self.bot.user:
return
if before.guild_permissions != after.guild_permissions:
self.cache[before.guild.id] = GuildData(self.bot, before.guild, (self.dictionary, self.stops))
await self.cache_guilds_history()
@commands.Cog.listener()
async def on_guild_join(self, guild):
await self.cache_guilds_history()
@commands.command(aliases=['exclude'])
@doc_url("https://docs.archit.us/commands/statistics/#optout")
async def optout(self, ctx):
"""optout
Prevents Architus from displaying statistics about you.
Run again to reallow collection.
"""
settings = self.bot.settings[ctx.guild]
excludes = settings.stats_exclude
author = ctx.author
if author.id in excludes:
excludes.remove(author.id)
await ctx.send(f"{author.display_name}'s message data is now available")
else:
excludes.append(author.id)
await ctx.send(f"{author.display_name}'s message data is now hidden")
settings.stats_exclude = excludes
@commands.command(aliases=['growth'])
@doc_url("https://docs.archit.us/commands/statistics/#joins")
async def joins(self, ctx):
"""growth
View a pretty chart of member growth on the server.
"""
img = member_growth.generate(ctx.guild.members)
data = await self.bot.manager_client.publish_file(iter([message_type.File(file=img)]))
em = discord.Embed(title="Server Growth", description=ctx.guild.name)
em.set_image(url=data.url)
em.color = 0x35a125
em.set_footer(text=f"{ctx.guild.name} has a total of {ctx.guild.member_count} members")
await ctx.channel.send(embed=em)
@commands.command()
@doc_url("https://docs.archit.us/commands/statistics/#spellcheck")
async def spellcheck(self, ctx, victim: discord.Member = None):
"""spellcheck <member>
check the spelling of a member
"""
if victim is None:
victim = ctx.author
if victim.id in self.bot.settings[ctx.guild].stats_exclude:
await ctx.send(f"Sorry, {victim.display_name} has requested that their stats not be recorded :confused:")
return
data = self.cache[ctx.guild.id]
words = data.word_counts(ctx.author)[victim.id]
ratio = data.correct_counts(ctx.author)[victim.id] / (words or 1) * 100
msg = f"{ratio:.1f}% of the {words:,} words sent by {victim.display_name} are spelled correctly."
em = discord.Embed(title="Spellcheck", description=msg, color=0x03fc8c)
em.set_author(name=victim.display_name, icon_url=victim.avatar_url)
self.append_warning(data, em)
await ctx.send(embed=em)
@commands.command()
@doc_url("https://docs.archit.us/commands/statistics/#messagecount")
async def messagecount(self, ctx, victim: discord.Member = None):
"""messagecount [member]
Displays a graph of the top message senders. Optionally include a member to always include.
"""
data = self.cache[ctx.guild.id]
with ThreadPoolExecutor() as pool:
img = await self.bot.loop.run_in_executor(
pool,
wordcount_gen.generate,
ctx.guild,
data.member_counts(ctx.author),
data.word_counts(ctx.author),
victim)
resp = await self.bot.manager_client.publish_file(
iter([message_type.File(file=img)]))
em = discord.Embed(title="Top 5 Message Senders", description=ctx.guild.name, color=0x7b8fb7)
em.set_image(url=resp.url)
if victim:
if victim.id in self.bot.settings[ctx.guild].stats_exclude:
em.set_footer(text=f"{victim.display_name} has hidden their stats")
else:
em.set_footer(text="{0} has sent {1:,} words across {2:,} messages".format(
victim.display_name,
data.word_counts(ctx.author)[victim.id],
data.member_counts(ctx.author)[victim.id]), icon_url=victim.avatar_url)
self.append_warning(data, em)
await ctx.channel.send(embed=em)
def setup(bot):
bot.add_cog(MessageStats(bot))
| 2.390625 | 2 |
counting.py | comicdipesh99/Python-basics | 3 | 12772084 | numbers=[56,87,6,17,45,21]
i=0
var=0
while i<len(numbers):
num=numbers[i]
if num>20 and num<60:
var=var+1
i+=1
else:
i+=1
else:
print var | 3.4375 | 3 |
users/forms.py | Daniel-Alba15/TodoApp | 0 | 12772085 | <gh_stars>0
from django.contrib.auth.forms import PasswordChangeForm
from django import forms
from django.core.exceptions import ValidationError
from .models import MyUser
class UserForm(forms.Form):
email = forms.EmailField(max_length=200)
name = forms.CharField(max_length=100)
lastname = forms.CharField(max_length=100)
password = forms.CharField(widget=forms.PasswordInput)
password_confirm = forms.CharField(widget=forms.PasswordInput)
def clean_email(self):
email = super().clean().get('email')
query = MyUser.objects.filter(email=email)
if query:
self.add_error('email', ValidationError(
('Email already exists, please login'), code='eror'))
return email
def clean(self):
data = super().clean()
password = data.get('password')
password_confirm = data.get('password_confirm')
if password != password_confirm:
self.add_error('password', ValidationError(
('Passwords does not match!'), code='invalid'))
def save(self):
data = self.cleaned_data
data.pop('password_confirm')
user = MyUser.objects.create_user(**data)
user.save()
class Password(PasswordChangeForm):
def clean_new_password1(self):
password1 = self.cleaned_data.get('new_password1')
if len(password1) < 4:
raise ValidationError(
self.add_error('new_password1', 'Password too short!'),
)
return password1
def clean_new_password2(self):
password1 = self.cleaned_data.get('new_password1')
password2 = self.cleaned_data.get('new_password2')
if password1 and password2:
if password1 != password2:
raise ValidationError(
self.error_messages['password_mismatch'],
code='password_mismatch',
)
return password2
| 2.59375 | 3 |
constructor_io/modules/recommendations.py | Constructor-io/constructorio-python | 0 | 12772086 | '''Recommendations Module'''
from time import time
from urllib.parse import quote, urlencode
import requests as r
from constructor_io.helpers.exception import ConstructorException
from constructor_io.helpers.utils import (clean_params, create_auth_header,
create_request_headers,
create_shared_query_params,
throw_http_exception_from_response)
def _create_recommendations_url(pod_id, parameters, user_parameters, options):
'''Create URL from supplied parameters'''
query_params = create_shared_query_params(options, parameters, user_parameters)
if not pod_id or not isinstance(pod_id, str):
raise ConstructorException('pod_id is a required parameter of type string')
if parameters:
if parameters.get('num_results'):
query_params['num_results'] = parameters.get('num_results')
if parameters.get('item_ids'):
query_params['item_id'] = parameters.get('item_ids')
if parameters.get('term'):
query_params['term'] = parameters.get('term')
query_params['_dt'] = int(time()*1000.0)
query_params = clean_params(query_params)
query_string = urlencode(query_params, doseq=True)
return f'{options.get("service_url")}/recommendations/v1/pods/{quote(pod_id)}?{query_string}'
class Recommendations:
'''Recommendations Class'''
def __init__(self, options):
self.__options = options or {}
def get_recommendation_results(self, pod_id, parameters=None, user_parameters=None):
'''
Retrieve recommendation results from API
:param str pod_id: Recommendation pod identifier
:param dict parameters: Additional parameters to refine result set
:param int parameters.num_results: The total number of results to return
:param str|list parameters.item_ids: Item ID(s) to retrieve recommendations for (strategy specific)
:param str term: The term to use to refine results (strategy specific)
:param dict parameters.filters: Filters used to refine recommendation results (strategy specific)
:param str parameters.section: The section to return results from
:param dict user_parameters: Parameters relevant to the user request
:param int user_parameters.session_id: Session ID, utilized to personalize results
:param str user_parameters.client_id: Client ID, utilized to personalize results
:param str user_parameters.user_id: User ID, utilized to personalize results
:param str user_parameters.segments: User segments
:param dict user_parameters.test_cells: User test cells
:param str user_parameters.user_ip: Origin user IP, from client
:param str user_parameters.user_agent: Origin user agent, from client
:return: dict
''' # pylint: disable=line-too-long
if not parameters:
parameters = {}
if not user_parameters:
user_parameters = {}
request_url = _create_recommendations_url(pod_id, parameters, user_parameters, self.__options)
requests = self.__options.get('requests') or r
response = requests.get(
request_url,
auth=create_auth_header(self.__options),
headers=create_request_headers(self.__options, user_parameters)
)
print(response)
if not response.ok:
throw_http_exception_from_response(response)
json = response.json()
json_response = json.get('response')
if json_response:
if json_response.get('results') or json_response.get('results') == []:
result_id = json.get('result_id')
if result_id:
for result in json_response.get('results'):
result['result_id'] = result_id
return json
raise ConstructorException('get_recommendation_results response data is malformed')
| 2.6875 | 3 |
user/mixins.py | alizabetpoor/squadfinder_pubg | 2 | 12772087 | <gh_stars>1-10
from django.shortcuts import redirect
class CHECKUSERMIXIN:
def dispatch(self,request,*args,**kwargs):
if not request.user.is_authenticated:
return super().dispatch(request,*args,**kwargs)
else:
return redirect("user:user_setting") | 1.671875 | 2 |
src/mp_api/routes/_user_settings.py | rkingsbury/api | 0 | 12772088 | <gh_stars>0
from emmet.core._user_settings import UserSettingsDoc
from mp_api.core.client import BaseRester
class UserSettingsRester(BaseRester[UserSettingsDoc]): # pragma: no cover
suffix = "_user_settings"
document_model = UserSettingsDoc # type: ignore
primary_key = "consumer_id"
monty_decode = False
use_document_model = False
def set_user_settings(self, consumer_id, settings): # pragma: no cover
"""
Set user settings.
Args:
consumer_id: Consumer ID for the user
settings: Dictionary with user settings
Returns:
Dictionary with consumer_id and write status.
Raises:
MPRestError
"""
return self._post_resource(
body=settings, params={"consumer_id": consumer_id}
).get("data")
def get_user_settings(self, consumer_id): # pragma: no cover
"""
Get user settings.
Args:
consumer_id: Consumer ID for the user
Returns:
Dictionary with consumer_id and settings.
Raises:
MPRestError
"""
return self.get_data_by_id(consumer_id)
| 2 | 2 |
tests/jardin_conf_mysql.py | tommyh/jardin | 0 | 12772089 | import logging
DATABASES = {
'jardin_test': 'mysql://root:@localhost:3306/jardin_test',
# a db with multiple replica URLs. The 1st url refers to an active server. The 2nd url will fail to connect.
'multi_url_test': 'mysql://root:@localhost:3306/jardin_test mysql://root:@localhost:3333/jardin_test',
}
CACHE = {
'method': 'disk',
'options': {
'dir': '/tmp/jardin_cache',
'size': 10000
}
}
LOG_LEVEL = logging.INFO
| 2.09375 | 2 |
smhi/smhi.py | Menturan/SMHIpy | 1 | 12772090 | import json
from builtins import NotImplemented
from pprint import pprint
import aiohttp
import asyncio
import async_timeout
from aiohttp.client import _RequestContextManager
from logging import getLogger
from exceptions.SmhiExceptions import SmhiConnectionException
BASE_URL = 'https://opendata-download-metfcst.smhi.se/api/category/pmp3g/version/2/geotype/point/lon/{}/lat/{}/data.json'
logger = getLogger()
@asyncio.coroutine
def get_forecast(longitude: str, latitude: str) -> dict:
with aiohttp.ClientSession() as session:
url = BASE_URL.format(longitude, latitude)
print(url)
with async_timeout.timeout(10):
try:
response = yield from session.get(url)
text = (yield from response.json())
except Exception as e:
text = yield from response.text()
logger.exception("Could not fetch data from SMHI.")
raise SmhiConnectionException("Could not fetch data from SMHI.")
finally:
yield from response.release()
pprint(text)
def __check_response_for_error(response: aiohttp.ClientResponse):
if 200 <= response.status < 300:
raise Exception
def __format_response(response: json) -> json:
NotImplemented()
loop = asyncio.get_event_loop()
loop.run_until_complete(get_forecast("18.176879", "59.237234"))
| 2.765625 | 3 |
scripts/dataset_resize.py | duanzhiihao/mycv | 0 | 12772091 | <filename>scripts/dataset_resize.py<gh_stars>0
import argparse
from pathlib import Path
from tqdm import tqdm
from PIL import Image
import numpy as np
import cv2
import torch
import torchvision as tv
interp_dict = {
'nearest': tv.transforms.InterpolationMode.BICUBIC.NEAREST,
'bilinear': tv.transforms.InterpolationMode.BICUBIC.BILINEAR,
'bicubic': tv.transforms.InterpolationMode.BICUBIC.BICUBIC,
'hamming': tv.transforms.InterpolationMode.BICUBIC.HAMMING,
'lanczos': tv.transforms.InterpolationMode.BICUBIC.LANCZOS,
'box': tv.transforms.InterpolationMode.BICUBIC.BOX
}
class ResizeAndSave(torch.utils.data.Dataset):
def __init__(self, source, target, tgt_size, interpolation):
self.source = Path(source)
self.target = Path(target)
self.tgt_size = tgt_size
self.transform = tv.transforms.Compose([
tv.transforms.Resize(tgt_size, interpolation=interp_dict[interpolation]),
tv.transforms.CenterCrop(tgt_size)
])
print('Scanning through files and creating target folders...')
img_names = []
for p in tqdm(self.source.rglob('*.*')):
imname: Path = p.relative_to(self.source)
img_names.append(imname)
tgt_path = self.target / imname
if not tgt_path.parent.is_dir():
tgt_path.parent.mkdir(parents=True, exist_ok=False)
self.img_names = img_names
debug = 1
def __len__(self):
return len(self.img_names)
def __getitem__(self, index):
imname = self.img_names[index]
imname: Path
srcpath = self.source / imname
img = Image.open(srcpath).convert('RGB')
img = self.transform(img)
svpath = (self.target / imname).with_suffix('.png')
assert svpath.parent.is_dir(), f'Cannot find {svpath.parent}, which should exists.'
# save by cv2
im = np.array(img)
if False:
import matplotlib.pyplot as plt
plt.figure(); plt.imshow(im); plt.show()
cv2.imwrite(str(svpath), im[:,:,::-1].copy())
return 0
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--src', type=str, default='d:/datasets/imagenet/train')
parser.add_argument('--dst', type=str, default='d:/datasets/imagenet/train256')
parser.add_argument('--size', type=int, default=256)
parser.add_argument('--interp', type=str, default='bicubic')
parser.add_argument('--workers', type=int, default=0)
args = parser.parse_args()
saver = ResizeAndSave(args.src, args.dst, tgt_size=args.size, interpolation=args.interp)
batch_size = max(1, args.workers*4)
saveloader = torch.utils.data.DataLoader(
saver, batch_size=batch_size, shuffle=False, num_workers=args.workers,
pin_memory=True, drop_last=False
)
pbar = tqdm(saveloader)
for _ in pbar:
debug = 1
debug = 1
if __name__ == '__main__':
main()
| 2.171875 | 2 |
tests/test_bishop_movement.py | wuhw234/chess_bot | 0 | 12772092 | import unittest
from pieces.bishop import Bishop
from board import Board
class TestSum(unittest.TestCase):
def test_no_movement(self):
board = Board()
bishop1 = Bishop("W", 7, 0, board)
bishop2 = Bishop("W", 6, 1, board)
board.add_piece(bishop1, 7, 0)
board.add_piece(bishop2, 6, 1)
self.assertEqual(bishop1.generate_legal_moves(), [], "Should be empty array")
def test_capture(self):
board = Board()
bishop1 = Bishop("W", 7, 0, board)
bishop2 = Bishop("B", 6, 1, board)
board.add_piece(bishop1, 7, 0)
board.add_piece(bishop2, 6, 1)
self.assertEqual(bishop1.generate_legal_moves(), [(6, 1)], "Should be [(6, 1)]")
def test_capture_and_move(self):
board = Board()
bishop1 = Bishop("W", 6, 1, board)
bishop2 = Bishop("B", 7, 0, board)
bishop3 = Bishop("W", 5, 2, board)
bishop4 = Bishop("W", 5, 0, board)
bishop5 = Bishop("W", 7, 2, board)
board.add_piece(bishop1, 6, 1)
board.add_piece(bishop2, 7, 0)
board.add_piece(bishop3, 5, 2)
board.add_piece(bishop4, 5, 0)
board.add_piece(bishop5, 7, 2)
self.assertEqual(bishop1.generate_legal_moves(), [(7, 0)], "Should be [(7, 0)]")
def test_full_diagonal(self):
board = Board()
bishop1 = Bishop("W", 7, 7, board)
board.add_piece(bishop1, 7, 7)
self.assertEqual(bishop1.generate_legal_moves(),
[(6, 6), (5, 5), (4, 4), (3, 3), (2, 2), (1, 1,), (0, 0)],
"Should be [(6, 6), (5, 5), (4, 4), (3, 3), (2, 2), (1, 1,), (0, 0)]")
if __name__ == '__main__':
unittest.main() | 3.140625 | 3 |
bin/build.py | itay-moav/rahl_commander | 1 | 12772093 | #!/usr/local/bin/python3.4
# encoding: utf-8
'''
pyverse.bin.build -- builds DB objects into your database.
pyverse.bin.build Use this command to build the various code generate objects in your DB, like Stored Procedures, Functions, Views and Triggers
@author: <NAME>
@copyright: 2014 open source. All rights reserved.
@license: license
@contact: <EMAIL>
@deffield updated: Updated
'''
import sys
import os
sys.path.insert(0, os.path.dirname(os.path.realpath(__file__)) + '/..')
from app import parser as parser
import app.commands
import traceback
def main(parser):
'''Command line options.'''
try:
parser.add_argument("-s","--stored_proc", dest="stored_proc", action="store",nargs='?', default=False, const='All', \
help="build all stored procedures, or the folder/*.sql specified. Root folder is the database name.")
parser.add_argument("-w","--views", dest="views", action="store",nargs='?', default=False, const='All', \
help="build all views, or the folder/*.sql specified. Root folder is the database name.")
parser.add_argument("-t","--triggers", dest="triggers", action="store",nargs='?', default=False, const='All', \
help="build all triggers, or the folder/*.sql specified. Root folder is the database name.")
parser.add_argument("-f","--functions", dest="functions", action="store",nargs='?', default=False, const='All', \
help="build all functions, or the folder/*.sql config specified. Root folder is the database name.")
parser.add_argument("-c","--scripts", dest="scripts", action="store",nargs='?', default=False, const='All', \
help="run all scripts, or the folder/*.sql specified. Root folder is the database name.")
args = app.init(parser)
Builder = app.commands.BuildDBObj(args)
Builder.run()
# TODO if not run from another tool, I should let the exception be thrown so I can see proper error logs
except Exception:
traceback.print_exc()
return 1
#++++++++++++++++++++++++++++++++++++ MAIN ENTRY POINT ++++++++++++++++++++++++++++++++++
sys.exit(main(parser))
| 2.625 | 3 |
pins/__init__.py | sellorm/python-pins | 0 | 12772094 | <reponame>sellorm/python-pins<gh_stars>0
"""
A simple package to be able to "pin" data to an RStudio Connect instance
"""
import os
import json
import pickle
import tarfile
import tempfile
import requests as req
try:
from importlib import metadata
except ImportError:
# Running on pre-3.8 Python; use importlib-metadata package
import importlib_metadata as metadata
__version__ = metadata.version("pins")
def pin_rsconnect(data, pin_name, pretty_pin_name, connect_server, api_key):
"""
Make a pin on RStudio Connect.
Parameters:
data: any object that has a to_json method (eg. pandas DataFrame)
pin_name (str): name of pin, only alphanumeric and underscores
pretty_pin_name (str): display name of pin
connect_server (str): RStudio Connect server address e.g. https://connect.example.com/
api_key (str): API key of a user on RStudio Connect
Return:
Url of content
"""
# Save data
local_dir = tempfile.TemporaryDirectory()
data.to_json(local_dir.name + "/data.txt")
# Create landing page
i = open(local_dir.name + "/index.html", "w")
lines = ["<h1>Python Pin", "\n"]
for line in lines:
i.write(line)
i.close()
# Create Manifest
manifest = {
"version": 1,
"locale": "en_US",
"platform": "3.5.1",
"metadata": {
"appmode": "static",
"primary_rmd": None,
"primary_html": "index.html",
"content_category": "pin",
"has_parameters": False,
},
"packages": None,
"files": None,
"users": None,
}
with open(local_dir.name + "/manifest.json", "w") as manifest_conn:
json.dump(manifest, manifest_conn)
# Turn into tarfile
pins_tf = tempfile.NamedTemporaryFile(delete=False)
with tarfile.open(pins_tf.name, "w:gz") as tar:
tar.add(local_dir.name, arcname=os.path.basename(local_dir.name))
auth = {"Authorization": "Key " + api_key}
content = get_content(pin_name, pretty_pin_name, connect_server, auth)
content_url = connect_server + "/__api__/v1/content/" + content["guid"]
# Upload Bundle
with open(pins_tf.name, "rb") as tf_conn:
bundle = req.post(content_url + "/bundles", headers=auth, data=tf_conn)
bundle_id = bundle.json()["id"]
# Deploy bundle
deploy = req.post(
content_url + "/deploy", headers=auth, json={"bundle_id": bundle_id}
)
return {"dash_url": content["dashboard_url"], "content_url": content["content_url"]}
def get_content(pin_name, pretty_pin_name, connect_server, auth):
"""
Intermediate function to get content from Connect
"""
content = req.get(
connect_server + "/__api__/v1/content", headers=auth, params={"name": pin_name}
).json()
if content: # content item created already
return content[0]
data = {"access_type": "acl", "name": pin_name, "title": pretty_pin_name}
content = req.post(
connect_server + "/__api__/v1/content", headers=auth, json=data
).json()
return content
def pin_get_rsconnect(url, api_key):
"""
Get data from a python pin on RStudio Connect
Parameters:
url (str) content solo URL on Connect (NOT dashboard URL)
api_key (str): API key of a user on RStudio Connect
Returns:
JSON version of pin
"""
auth = {"Authorization": "Key " + api_key}
res = req.get(url + "/data.txt", headers=auth)
return res.json()
| 2.625 | 3 |
calls/migrations/0007_auto_20180906_2104.py | JoshZero87/site | 4 | 12772095 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.13 on 2018-09-06 21:04
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('calls', '0006_auto_20180906_0430'),
]
operations = [
migrations.AlterField(
model_name='callcampaign',
name='status',
field=models.IntegerField(choices=[(1, 'New'), (10, 'Approved'), (20, 'In Progress'), (30, 'Paused'), (40, 'Complete'), (50, 'Declined'), (60, 'Suspended')], default=1),
),
]
| 1.609375 | 2 |
src/lib.py | xplanr/101 | 0 | 12772096 | # vim: filetype=python ts=2 sw=2 sts=2 et :
# (c) 2021, <NAME> (<EMAIL>) unlicense.org
"""Standard library functions."""
import re
import sys
import copy
class obj:
"Simple base class with pretty print"
def __init__(i, **d): i.__dict__.update(d)
def __repr__(i) : return i.__class__.__name__+"{" + ', '.join(
[f":{k} {v}" for k, v in sorted(i.__dict__.items()) if k[0] != "_"]) + "}"
def clone(i): return obj(**copy.deepcopy(i.__dict__))
def cli(**d):
"If command line has :key val, and 'key' is in d, then d[key]=val"
i=-1
while i<len(sys.argv)-1:
i, key, now = i+1, sys.argv[i][1:], coerce(sys.argv[i+1])
if key in d:
i += 1
if type(now) == type(d[key]): d[key] = now
return d
def csv(src=None):
"""Iterator. returns lines from files or standard input or a string,
return an iterator for the lines."""
def lines(src):
for line in src:
line = re.sub(r'([\n\t\r ]|#.*)', '', line)
if line:
line = line.split(",")
line = [coerce(x) for x in line]
yield line
if src and src[-4:] == ".csv":
with open(src) as fp:
for out in lines(fp): yield out
else:
src = src.split("\n") if src else sys.stdin
for out in lines(src):
yield out
def coerce(string):
"""When appropriate, coerce `string` into some type.
Supports floats, ints, booleans and strings."""
if string == "True": return True
if string == "False": return False
try: return int(string)
except Exception:
try: return float(string)
except Exception: return string
def rs(l,r=0):
"Round a list to `r` decimal places."
return [(f"{x:.{r}f}" if isinstance(x,(int,float)) else str(x))
for x in l]
| 2.765625 | 3 |
pioinstaller/core.py | StanleyEPark/platformio-core-installer | 1 | 12772097 | <gh_stars>1-10
# Copyright (c) 2014-present PlatformIO <<EMAIL>>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
import os
import platform
import subprocess
import time
import click
import semantic_version
from pioinstaller import __version__, exception, home, util
log = logging.getLogger(__name__)
PIO_CORE_DEVELOP_URL = "https://github.com/platformio/platformio/archive/develop.zip"
UPDATE_INTERVAL = 60 * 60 * 24 * 3 # 3 days
def get_core_dir():
if os.getenv("PLATFORMIO_CORE_DIR"):
return os.getenv("PLATFORMIO_CORE_DIR")
core_dir = os.path.join(util.expanduser("~"), ".platformio")
if not util.IS_WINDOWS:
return core_dir
win_core_dir = os.path.splitdrive(core_dir)[0] + "\\.platformio"
if os.path.isdir(win_core_dir):
return win_core_dir
try:
if util.has_non_ascii_char(core_dir):
os.makedirs(win_core_dir)
with open(os.path.join(win_core_dir, "file.tmp"), "w") as fp:
fp.write("test")
os.remove(os.path.join(win_core_dir, "file.tmp"))
return win_core_dir
except: # pylint:disable=bare-except
pass
return core_dir
def get_cache_dir(path=None):
core_dir = path or get_core_dir()
path = os.path.join(core_dir, ".cache")
if not os.path.isdir(path):
os.makedirs(path)
return path
def install_platformio_core(shutdown_piohome=True, develop=False, ignore_pythons=None):
# pylint: disable=bad-option-value, import-outside-toplevel, unused-import, import-error, unused-variable, cyclic-import
from pioinstaller import penv
if shutdown_piohome:
home.shutdown_pio_home_servers()
penv_dir = penv.create_core_penv(ignore_pythons=ignore_pythons)
python_exe = os.path.join(
penv.get_penv_bin_dir(penv_dir), "python.exe" if util.IS_WINDOWS else "python"
)
command = [python_exe, "-m", "pip", "install", "-U"]
if develop:
click.echo("Installing a development version of PlatformIO Core")
command.append(PIO_CORE_DEVELOP_URL)
else:
click.echo("Installing PlatformIO Core")
command.append("platformio")
try:
subprocess.check_call(command)
except Exception as e: # pylint:disable=broad-except
error = str(e)
if util.IS_WINDOWS:
error = (
"If you have antivirus/firewall/defender software in a system,"
" try to disable it for a while.\n %s" % error
)
raise exception.PIOInstallerException(
"Could not install PlatformIO Core: %s" % error
)
platformio_exe = os.path.join(
penv.get_penv_bin_dir(penv_dir),
"platformio.exe" if util.IS_WINDOWS else "platformio",
)
try:
home.install_pio_home(platformio_exe)
except Exception as e: # pylint:disable=broad-except
log.debug(e)
click.secho(
"\nPlatformIO Core has been successfully installed into an isolated environment `%s`!\n"
% penv_dir,
fg="green",
)
click.secho("The full path to `platformio.exe` is `%s`" % platformio_exe, fg="cyan")
# pylint:disable=line-too-long
click.secho(
"""
If you need an access to `platformio.exe` from other applications, please install Shell Commands
(add PlatformIO Core binary directory `%s` to the system environment PATH variable):
See https://docs.platformio.org/page/installation.html#install-shell-commands
"""
% penv.get_penv_bin_dir(penv_dir),
fg="cyan",
)
return True
def check(dev=False, auto_upgrade=False, version_spec=None):
# pylint: disable=bad-option-value, import-outside-toplevel, unused-import, import-error, unused-variable, cyclic-import
from pioinstaller import penv
platformio_exe = os.path.join(
penv.get_penv_bin_dir(), "platformio.exe" if util.IS_WINDOWS else "platformio",
)
python_exe = os.path.join(
penv.get_penv_bin_dir(), "python.exe" if util.IS_WINDOWS else "python"
)
result = {}
if not os.path.isfile(platformio_exe):
raise exception.InvalidPlatformIOCore(
"PlatformIO executable not found in `%s`" % penv.get_penv_bin_dir()
)
if not os.path.isfile(os.path.join(penv.get_penv_dir(), "state.json")):
raise exception.InvalidPlatformIOCore(
"Could not found state.json file in `%s`"
% os.path.join(penv.get_penv_dir(), "state.json")
)
try:
result.update(fetch_python_state(python_exe))
except subprocess.CalledProcessError as e:
error = e.output.decode()
raise exception.InvalidPlatformIOCore(
"Could not import PlatformIO module. Error: %s" % error
)
piocore_version = convert_version(result.get("core_version"))
dev = dev or bool(piocore_version.prerelease if piocore_version else False)
result.update(
{
"core_dir": get_core_dir(),
"cache_dir": get_cache_dir(),
"penv_dir": penv.get_penv_dir(),
"penv_bin_dir": penv.get_penv_bin_dir(),
"platformio_exe": platformio_exe,
"installer_version": __version__,
"python_exe": python_exe,
"system": util.get_systype(),
"is_develop_core": dev,
}
)
if version_spec:
try:
if piocore_version not in semantic_version.Spec(version_spec):
raise exception.InvalidPlatformIOCore(
"PlatformIO Core version %s does not match version requirements %s."
% (str(piocore_version), version_spec)
)
except ValueError:
click.secho(
"Invalid version requirements format: %s. "
"More about Semantic Versioning: https://semver.org/" % version_spec
)
with open(os.path.join(penv.get_penv_dir(), "state.json")) as fp:
penv_state = json.load(fp)
if penv_state.get("platform") != platform.platform(terse=True):
raise exception.InvalidPlatformIOCore(
"PlatformIO installed using another platform `%s`. Your platform: %s"
% (penv_state.get("platform"), platform.platform(terse=True))
)
try:
subprocess.check_output([platformio_exe, "--version"], stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
error = e.output.decode()
raise exception.InvalidPlatformIOCore(
"Could not run `%s --version`.\nError: %s" % (platformio_exe, str(error))
)
if not auto_upgrade:
return result
time_now = int(round(time.time()))
last_piocore_version_check = penv_state.get("last_piocore_version_check")
if (
last_piocore_version_check
and (time_now - int(last_piocore_version_check)) < UPDATE_INTERVAL
):
return result
with open(os.path.join(penv.get_penv_dir(), "state.json"), "w") as fp:
penv_state["last_piocore_version_check"] = time_now
json.dump(penv_state, fp)
if not last_piocore_version_check:
return result
upgrade_core(platformio_exe, dev)
try:
result.update(fetch_python_state(python_exe))
except: # pylint:disable=bare-except
raise exception.InvalidPlatformIOCore("Could not import PlatformIO module")
return result
def fetch_python_state(python_exe):
code = """import platform
import json
import platformio
state = {
"core_version": platformio.__version__,
"python_version": platform.python_version()
}
print(json.dumps(state))
"""
state = subprocess.check_output(
[python_exe, "-c", code,], stderr=subprocess.STDOUT,
)
return json.loads(state.decode())
def convert_version(version):
try:
return semantic_version.Version(util.pepver_to_semver(version))
except: # pylint:disable=bare-except
return None
def upgrade_core(platformio_exe, dev=False):
command = [platformio_exe, "upgrade"]
if dev:
command.append("--dev")
try:
subprocess.check_output(
command, stderr=subprocess.PIPE,
)
return True
except Exception as e: # pylint:disable=broad-except
raise exception.PIOInstallerException(
"Could not upgrade PlatformIO Core: %s" % str(e)
)
def dump_state(target, state):
assert isinstance(target, str)
if os.path.isdir(target):
target = os.path.join(target, "get-platformio-core-state.json")
if not os.path.isdir(os.path.dirname(target)):
os.makedirs(os.path.dirname(target))
with open(target, "w") as fp:
json.dump(state, fp)
| 1.84375 | 2 |
cgbeacon2/server/blueprints/api_v1/controllers.py | Clinical-Genomics/cgbeacon2 | 0 | 12772098 | <filename>cgbeacon2/server/blueprints/api_v1/controllers.py
# -*- coding: utf-8 -*-
import logging
from cgbeacon2.constants import (
BUILD_MISMATCH,
INVALID_COORDINATES,
NO_MANDATORY_PARAMS,
NO_POSITION_PARAMS,
NO_SECONDARY_PARAMS,
QUERY_PARAMS_API_V1,
)
from cgbeacon2.models import DatasetAlleleResponse
from cgbeacon2.utils.add import add_variants as variants_loader
from cgbeacon2.utils.delete import delete_variants as variant_deleter
from cgbeacon2.utils.md5 import md5_key
from cgbeacon2.utils.parse import (
compute_filter_intervals,
count_variants,
extract_variants,
get_vcf_samples,
)
from cgbeacon2.utils.update import update_dataset
from flask import current_app
RANGE_COORDINATES = ("startMin", "startMax", "endMin", "endMax")
LOG = logging.getLogger(__name__)
def validate_add_data(req):
"""Validate the data specified in the paramaters of an add request received via the API.
Accepts:
req(flask.request): POST request received by server
Returns:
validate_request: True if validated, a string describing errong if not validated
"""
db = current_app.db
req_data = req.json
dataset_id = req_data.get("dataset_id")
dataset = db["dataset"].find_one({"_id": dataset_id})
# Invalid dataset
if dataset is None:
return "Invalid request. Please specify a valid dataset ID"
vcf_samples = get_vcf_samples(req_data.get("vcf_path"))
if not vcf_samples:
return "Error extracting info from VCF file, please check path to VCF"
samples = req_data.get("samples", [])
if overlapping_samples(vcf_samples, samples) is False:
return f"One or more provided samples were not found in VCF. VCF samples:{vcf_samples}"
genes = req_data.get("genes")
if genes is None: # Return validated OK and then load the entire VCF
return
if genes.get("id_type") not in ["HGNC", "Ensembl"]:
return "Please provide id_type (HGNC or Ensembl) for the given list of genes"
filter_intervals = compute_filter_intervals(req_data)
if filter_intervals is None:
return "Could not create a gene filter using the provided gene list"
def add_variants_task(req):
"""Perform the actual task of adding variants to the database after receiving an add request
Accepts:
req(flask.request): POST request received by server
"""
db = current_app.db
req_data = req.json
dataset_id = req_data.get("dataset_id")
samples = req_data.get("samples", [])
assembly = req_data.get("assemblyId")
filter_intervals = None
genes = req_data.get("genes")
if genes:
filter_intervals = compute_filter_intervals(req_data)
vcf_obj = extract_variants(
vcf_file=req_data.get("vcf_path"), samples=samples, filter=filter_intervals
)
nr_variants = count_variants(vcf_obj)
vcf_obj = extract_variants(
vcf_file=req_data.get("vcf_path"), samples=samples, filter=filter_intervals
)
added = variants_loader(
database=db,
vcf_obj=vcf_obj,
samples=set(samples),
assembly=assembly,
dataset_id=dataset_id,
nr_variants=nr_variants,
)
if added > 0:
# Update dataset object accordingly
update_dataset(database=db, dataset_id=dataset_id, samples=samples, add=True)
LOG.info(f"Number of inserted variants for samples:{samples}:{added}")
def overlapping_samples(dataset_samples, request_samples):
"""Check that samples provided by user are contained in either VCF of dataset object
Accepts:
dataset_samples(list): the list of samples contained in the dataset or the VCF
request_samples(list): the list of samples provided by user
Returns:
bool: True if all samples in the request are contained in the dataset or the VCF
"""
ds_sampleset = set(dataset_samples)
sampleset = set(request_samples)
# return False if not all samples in provided samples list are found in dataset
return all(sample in ds_sampleset for sample in sampleset)
def validate_delete_data(req):
"""Validate the data specified in the paramaters of a delete request received via the API.
Accepts:
req(flask.request): POST request received by server
Returns:
validate_request: True if validated, a string describing errong if not validated
"""
db = current_app.db
req_data = req.json
dataset_id = req_data.get("dataset_id")
dataset = db["dataset"].find_one({"_id": dataset_id})
samples = req_data.get("samples")
# Invalid dataset
if dataset is None:
return "Invalid request. Please specify a valid dataset ID"
# Invalid samples
if isinstance(samples, list) is False or not samples:
return "Please provide a valid list of samples"
if overlapping_samples(dataset.get("samples", []), samples) is False:
return "One or more provided samples was not found in the dataset"
def delete_variants_task(req):
"""Perform the actual task of removing variants from the database after receiving an delete request
Accepts:
req(flask.request): POST request received by server
"""
db = current_app.db
req_data = req.json
dataset_id = req_data.get("dataset_id")
samples = req_data.get("samples")
updated, removed = variant_deleter(db, dataset_id, samples)
if updated + removed > 0:
update_dataset(database=db, dataset_id=dataset_id, samples=samples, add=False)
LOG.info(f"Number of updated variants:{updated}. Number of deleted variants:{removed}")
def create_allele_query(resp_obj, req):
"""Populates a dictionary with the parameters provided in the request<<
Accepts:
resp_obj(dictionary): response data that will be returned by server
req(flask.request): request received by server
"""
customer_query = {}
mongo_query = {}
data = None
if req.method == "GET":
data = dict(req.args)
customer_query["datasetIds"] = req.args.getlist("datasetIds")
else: # POST method
if req.headers.get("Content-type") == "application/x-www-form-urlencoded":
data = dict(req.form)
customer_query["datasetIds"] = req.form.getlist("datasetIds")
else: # application/json, This should be default
data = req.json
customer_query["datasetIds"] = data.get("datasetIds", [])
# Remove null parameters from the query
remove_keys = []
for key, value in data.items():
if value == "":
remove_keys.append(key)
for key in remove_keys:
data.pop(key)
# loop over all available query params
for param in QUERY_PARAMS_API_V1:
if data.get(param):
customer_query[param] = data[param]
if "includeDatasetResponses" not in customer_query:
customer_query["includeDatasetResponses"] = "NONE"
# check if the minimal required params were provided in query
check_allele_request(resp_obj, customer_query, mongo_query)
# if an error occurred, do not query database and return error
if resp_obj.get("message") is not None:
resp_obj["message"]["allelRequest"] = customer_query
resp_obj["message"]["exists"] = None
resp_obj["message"]["datasetAlleleResponses"] = []
return
resp_obj["allelRequest"] = customer_query
return mongo_query
def check_allele_request(resp_obj, customer_query, mongo_query):
"""Check that the query to the server is valid
Accepts:
resp_obj(dict): response data that will be returned by server
customer_query(dict): a dictionary with all the key/values provided in the external request
mongo_query(dict): the query to collect variants from this server
"""
# If customer asks for a classical SNV
if customer_query.get("variantType") is None and all(
[
customer_query.get("referenceName"),
customer_query.get(
"start",
),
customer_query.get("end"),
customer_query.get("referenceBases"),
customer_query.get("alternateBases"),
customer_query.get("assemblyId"),
]
):
# generate md5_key to compare with our database
mongo_query["_id"] = md5_key(
customer_query["referenceName"],
customer_query["start"],
customer_query.get("end"),
customer_query["referenceBases"],
customer_query["alternateBases"],
customer_query["assemblyId"],
)
# Check that the 3 mandatory parameters are present in the query
if None in [
customer_query.get("referenceName"),
customer_query.get("referenceBases"),
customer_query.get("assemblyId"),
]:
# return a bad request 400 error with explanation message
resp_obj["message"] = dict(
error=NO_MANDATORY_PARAMS,
allelRequest=customer_query,
)
return
# check if genome build requested corresponds to genome build of the available datasets:
if len(customer_query.get("datasetIds", [])) > 0:
dset_builds = current_app.db["dataset"].find(
{"_id": {"$in": customer_query["datasetIds"]}}, {"assembly_id": 1, "_id": 0}
)
dset_builds = [dset["assembly_id"] for dset in dset_builds if dset["assembly_id"]]
for dset in dset_builds:
if dset != customer_query["assemblyId"]:
# return a bad request 400 error with explanation message
resp_obj["message"] = dict(
error=BUILD_MISMATCH,
allelRequest=customer_query,
)
return
# alternateBases OR variantType is also required
if all(
param is None
for param in [
customer_query.get("alternateBases"),
customer_query.get("variantType"),
]
):
# return a bad request 400 error with explanation message
resp_obj["message"] = dict(
error=NO_SECONDARY_PARAMS,
allelRequest=customer_query,
)
return
# Check that genomic coordinates are provided (even rough)
if (
customer_query.get("start") is None
and any([coord in customer_query.keys() for coord in RANGE_COORDINATES]) is False
):
# return a bad request 400 error with explanation message
resp_obj["message"] = dict(
error=NO_POSITION_PARAMS,
allelRequest=customer_query,
)
return
if customer_query.get("start"): # query for exact position
try:
if customer_query.get("end") is not None:
mongo_query["end"] = int(customer_query["end"])
mongo_query["start"] = int(customer_query["start"])
except ValueError:
# return a bad request 400 error with explanation message
resp_obj["message"] = dict(
error=INVALID_COORDINATES,
allelRequest=customer_query,
)
# Range query
elif any([coord in customer_query.keys() for coord in RANGE_COORDINATES]): # range query
# In general startMin <= startMax <= endMin <= endMax, but allow fuzzy ends query
fuzzy_start_query = {}
fuzzy_end_query = {}
try:
if "startMin" in customer_query:
fuzzy_start_query["$gte"] = int(customer_query["startMin"])
if "startMax" in customer_query:
fuzzy_start_query["$lte"] = int(customer_query["startMax"])
if "endMin" in customer_query:
fuzzy_end_query["$gte"] = int(customer_query["endMin"])
if "endMax" in customer_query:
fuzzy_end_query["$lte"] = int(customer_query["endMax"])
except ValueError:
# return a bad request 400 error with explanation message
resp_obj["message"] = dict(
error=INVALID_COORDINATES,
allelRequest=customer_query,
)
if fuzzy_start_query:
mongo_query["start"] = fuzzy_start_query
if fuzzy_end_query:
mongo_query["end"] = fuzzy_end_query
if mongo_query.get("_id") is None:
# perform normal query
mongo_query["assemblyId"] = customer_query["assemblyId"]
mongo_query["referenceName"] = customer_query["referenceName"]
mongo_query["referenceBases"] = customer_query["referenceBases"]
if "alternateBases" in customer_query:
mongo_query["alternateBases"] = customer_query["alternateBases"]
if "variantType" in customer_query:
mongo_query["variantType"] = customer_query["variantType"]
else:
# use only variant _id in query
mongo_query.pop("start")
mongo_query.pop("end", None)
def dispatch_query(mongo_query, response_type, datasets=[], auth_levels=([], False)):
"""Query variant collection using a query dictionary
Accepts:
mongo_query(dic): a query dictionary
response_type(str): individual dataset responses -->
ALL means all datasets even those that don't have the queried variant
HIT means only datasets that have the queried variant
MISS means opposite to HIT value, only datasets that don't have the queried variant
NONE don't return datasets response.
datasets(list): dataset ids from request "datasetIds" field
auth_levels(tuple): (registered access datasets(list), bona_fide_status(bool))
Returns:
tuple(bool, list): (allele_exists(bool), datasetAlleleResponses(list))
"""
variant_collection = current_app.db["variant"]
LOG.info(f"Perform database query -----------> {mongo_query}.")
LOG.info(f"Response level (datasetAlleleResponses) -----> {response_type}.")
# End users are only interested in knowing which datasets have one or more specific vars, return only datasets and callCount
variants = list(
variant_collection.find(mongo_query, {"_id": 0, "datasetIds": 1, "call_count": 1})
)
if len(variants) == 0:
return False, []
# Filter variants by auth level specified by user token (or lack of it)
variants = results_filter_by_auth(variants, auth_levels)
if response_type == "NONE":
if len(variants) > 0:
return True, []
else:
# request datasets:
req_dsets = set(datasets)
# IDs of datasets found for this variant(s)
result = create_ds_allele_response(response_type, req_dsets, variants)
return result
return False, []
def results_filter_by_auth(variants, auth_levels):
"""Filter variants returned by query using auth levels (specified by token, if present, otherwise public access only datasets)
Accepts:
variants(list): a list of variants returned by database query
auth_levels(tuple): (registered access datasets(list), bona_fide_status(bool))
Return:
filtered_variants(list): Variants filtered using authlevel criteria
"""
# Filter variants by auth level (specified by token, if present, otherwise public access only datasets)
ds_collection = current_app.db["dataset"]
public_ds = ds_collection.find({"authlevel": "public"})
pyblic_ds_ids = [ds["_id"] for ds in public_ds]
LOG.info(f"The following public dataset were found in database:{public_ds}")
registered_access_ds_ids = auth_levels[0]
controlled_access_ds_ids = []
if auth_levels[1] is True: # user has access to controlled access datasets
controlled_access_ds = ds_collection.find({"authlevel": "controlled"})
controlled_access_ds_ids = [ds["_id"] for ds in controlled_access_ds]
dataset_filter = pyblic_ds_ids + registered_access_ds_ids + controlled_access_ds_ids
# Filter results
LOG.info(f"Filtering out results with datasets different from :{dataset_filter}")
filtered_variants = []
for variant in variants:
for key in variant.get("datasetIds", []):
if key in dataset_filter:
filtered_variants.append(variant)
return filtered_variants
def create_ds_allele_response(response_type, req_dsets, variants):
"""Create a Beacon Dataset Allele Response
Accepts:
response_type(str): ALL, HIT or MISS
req_dsets(set): datasets requested, could be empty
variants(list): a list of query results
Returns:
ds_responses(list): list of cgbeacon2.model.DatasetAlleleResponse
"""
ds_responses = []
exists = False
all_dsets = current_app.db["dataset"].find()
all_dsets = {ds["_id"]: ds for ds in all_dsets}
if len(req_dsets) == 0: # if query didn't specify any dataset
# Use all datasets present in this beacon
req_dsets = set(all_dsets)
for ds in req_dsets:
# check if database contains a dataset with provided ID:
if ds not in all_dsets:
LOG.info(f"Provided dataset {ds} could not be found in database")
continue
ds_response = DatasetAlleleResponse(all_dsets[ds], variants).__dict__
# collect responses according to the type of response requested
if (
response_type == "ALL"
or (response_type == "HIT" and ds_response["exists"] is True)
or (response_type == "MISS" and ds_response["exists"] is False)
):
ds_responses.append(ds_response)
if ds_response["exists"] is True:
exists = True
return exists, ds_responses
| 2.296875 | 2 |
tests/test_scheduler.py | orkiguazio/mlrun | 0 | 12772099 | <filename>tests/test_scheduler.py
from datetime import datetime
import pytest
from croniter import CroniterBadCronError
from mlrun import scheduler
class Runtime(list):
def run(self, *args, **kw):
self.append((datetime.now(), args, kw))
def test_scheduler():
sched = scheduler.Scheduler()
rt = Runtime()
sched.add('* * * * *', rt, (1, 2), {'a': 1, 'b': 2})
assert 1 == len(sched), 'bad jobs'
# TODO: Speed up clock so we can see the job scheduled
def test_bad_schedule():
sched = scheduler.Scheduler()
with pytest.raises(CroniterBadCronError):
sched.add('* * * *', None)
| 2.578125 | 3 |
agant/models/keras/generator/cgan.py | Sinha-Raunak/gan-toolkit | 0 | 12772100 | <filename>agant/models/keras/generator/cgan.py
"""
Let us see what all is required to be installed
"""
from keras.models import Sequential, Model
from keras.layers import Input, Dense, Reshape, Flatten, Dropout,multiply
from keras.layers.advanced_activations import LeakyReLU
from keras.layers import BatchNormalization, Activation, ZeroPadding2D, Embedding
import numpy as np
def Generator(conf_data):
latent_dim = conf_data['generator']['latent_dim']
img_shape = (conf_data['generator']['input_shape'],conf_data['generator']['input_shape'],conf_data['generator']['channels'])
num_classes = conf_data['GAN_model']['classes']
model = Sequential()
model.add(Dense(256, input_dim=latent_dim))
model.add(LeakyReLU(alpha=0.2))
model.add(BatchNormalization(momentum=0.8))
model.add(Dense(512))
model.add(LeakyReLU(alpha=0.2))
model.add(BatchNormalization(momentum=0.8))
model.add(Dense(1024))
model.add(LeakyReLU(alpha=0.2))
model.add(BatchNormalization(momentum=0.8))
model.add(Dense(np.prod(img_shape), activation='tanh'))
model.add(Reshape(img_shape))
model.summary()
noise = Input(shape=(latent_dim,))
label = Input(shape=(1,), dtype='int32')
label_embedding = Flatten()(Embedding(num_classes, latent_dim)(label))
model_input = multiply([noise, label_embedding])
img = model(model_input)
return Model([noise, label], img) | 2.328125 | 2 |
create_item.py | EgleSer/GildedRose-Refactoring-Kata | 0 | 12772101 | <filename>create_item.py
from gilded_rose import Item
class RegularItem(Item):
"""I needed Item class to have update_quality function, but altering with Item class is against the rules"""
def update_quality(self):
"""The Quality of an item is never more than 50.
The Quality of an item is never negative"""
if 50 > self.quality > 0:
"""Once the sell by date has passed, Quality degrades twice as fast"""
if self.sell_in <= 0:
self.quality -= 2
else:
self.quality -= 1
elif self.quality <= 0:
self.quality = 0
else:
self.quality = 50
self.sell_in -= 1
class ItemCreate(object):
def create(self, name, sell_in, quality):
if name == "<NAME>":
return AgedBrie(name, sell_in, quality)
if "Sulfuras" in name:
return Sulfuras(name, sell_in, quality)
if "Conjured" in name:
return Conjured(name, sell_in, quality)
if "Backstage" in name:
return Backstage(name, sell_in, quality)
return RegularItem(name, sell_in, quality)
class AgedBrie(RegularItem):
"""'Aged Brie' actually increases in Quality the older it gets"""
def update_quality(self):
if 0 < self.quality < 50:
self.quality += 1
elif self.quality <= 0:
self.quality = 0
else:
self.quality = 50
self.sell_in -= 1
class Sulfuras(RegularItem):
"""'Sulfuras', being a legendary item, never has to be sold. It's Quality is 80 and it never alters"""
def update_quality(self):
self.quality = 80
self.sell_in = "N/A"
class Conjured(RegularItem):
"""'Conjured' items degrade in Quality twice as fast as normal items"""
def update_quality(self):
if self.quality > 2:
self.quality -= 2
else:
self.quality = 0
self.sell_in -= 1
class Backstage(RegularItem):
"""Quality increases by 2 when there are 10 days or less and by 3 when there are
5 days or less but Quality drops to 0 after the concert"""
def update_quality(self):
if 10 >= self.sell_in > 5:
self.quality += 2
elif 5 >= self.sell_in > 0:
self.quality += 3
elif self.sell_in < 0:
self. quality = 0
else:
self.quality += 1
if self.quality > 50:
self.quality = 50
self.sell_in -= 1
| 3.796875 | 4 |
root/mldl/RequirementTutorial/OpenCV/PracticalPythonOpenCV/canny.py | chyidl/chyidlTutorial | 5 | 12772102 | <filename>root/mldl/RequirementTutorial/OpenCV/PracticalPythonOpenCV/canny.py
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
"""
Canny Edge Detection is a popular edge detection algorithm. It was developed by <NAME> in 1986. It is a multi-stage algorithm algorithm and we will go through each stages.
1. Noise Reduction
Since edge detection is susceptible to noise in the image, first step is to remove the noise in the image with a 5 x 5 Gaussian filter.
2. Finding Intensity Gradient of the image
Gradient direction is always perpendicular to edges. it is rounded to one of four angles representing vertical, horizontal and two diagonal directions.
3. Non-maximum Suppression:
After getting gradient magnitude and direction, a full scan of image is done to remove any unwanted pixels which may not constitude the edge.
4.
"""
import numpy as np
import argparse
import cv2
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--image", required = True,
help = "Path to the image")
args = vars(ap.parse_args())
image = cv2.imread(args["image"])
image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# remove "noisy" edges in the image
image = cv2.GaussianBlur(image, (5, 5), 0)
cv2.imshow("Blurred", image)
# Values in between threshold1 and threshold2 are either classified as edges
# or non-edges based on how their intensities
canny = cv2.Canny(image, 30, 150)
cv2.imshow("Canny", canny)
cv2.waitKey(0)
| 3.75 | 4 |
nuplan/planning/training/preprocessing/test/test_collate_dataloader.py | motional/nuplan-devkit | 128 | 12772103 | import unittest
import torch.utils.data
from nuplan.planning.scenario_builder.nuplan_db.test.nuplan_scenario_test_utils import get_test_nuplan_scenario
from nuplan.planning.simulation.trajectory.trajectory_sampling import TrajectorySampling
from nuplan.planning.training.data_loader.scenario_dataset import ScenarioDataset
from nuplan.planning.training.preprocessing.feature_builders.raster_feature_builder import RasterFeatureBuilder
from nuplan.planning.training.preprocessing.feature_builders.vector_map_feature_builder import VectorMapFeatureBuilder
from nuplan.planning.training.preprocessing.feature_collate import FeatureCollate
from nuplan.planning.training.preprocessing.feature_preprocessor import FeaturePreprocessor
from nuplan.planning.training.preprocessing.features.vector_map import VectorMap
from nuplan.planning.training.preprocessing.target_builders.ego_trajectory_target_builder import (
EgoTrajectoryTargetBuilder,
)
NUM_BATCHES = 20
class TestCollateDataLoader(unittest.TestCase):
"""
Tests data loading functionality
"""
def setUp(self) -> None:
"""Set up the test case."""
self.batch_size = 4
feature_preprocessor = FeaturePreprocessor(
cache_path=None,
feature_builders=[
RasterFeatureBuilder(
map_features={'LANE': 1.0, 'INTERSECTION': 1.0, 'STOP_LINE': 0.5, 'CROSSWALK': 0.5},
num_input_channels=4,
target_width=224,
target_height=224,
target_pixel_size=0.5,
ego_width=2.297,
ego_front_length=4.049,
ego_rear_length=1.127,
ego_longitudinal_offset=0.0,
baseline_path_thickness=1,
),
VectorMapFeatureBuilder(radius=20),
],
target_builders=[EgoTrajectoryTargetBuilder(TrajectorySampling(time_horizon=6.0, num_poses=12))],
force_feature_computation=False,
)
# Keep only a few scenarios instead of testing the whole extraction
scenario = get_test_nuplan_scenario()
scenarios = [scenario] * 3
dataset = ScenarioDataset(scenarios=scenarios, feature_preprocessor=feature_preprocessor)
self.dataloader = torch.utils.data.DataLoader(
dataset=dataset,
batch_size=self.batch_size,
num_workers=2,
pin_memory=False,
drop_last=True,
collate_fn=FeatureCollate(),
)
def test_dataloader(self) -> None:
"""
Tests that the training dataloader can be iterated without errors
"""
dataloader = self.dataloader
dataloader_iter = iter(dataloader)
iterations = min(len(dataloader), NUM_BATCHES)
for _ in range(iterations):
features, targets = next(dataloader_iter)
self.assertTrue("vector_map" in features.keys())
vector_map: VectorMap = features["vector_map"]
self.assertEqual(vector_map.num_of_batches, self.batch_size)
self.assertEqual(len(vector_map.coords), self.batch_size)
self.assertEqual(len(vector_map.multi_scale_connections), self.batch_size)
if __name__ == '__main__':
unittest.main()
| 2.28125 | 2 |
advanced_algs/knapsack/knapsack_test.py | angelusualle/algorithms | 0 | 12772104 | import unittest
from knapsack_with_repetition import knapsack_with_repetition
from knapsack_no_repetition import knapsack_no_repetition
class Test_Case_Knapsack(unittest.TestCase):
def test_knapsack_with_repetition(self):
self.assertEqual(knapsack_with_repetition([6, 3, 4, 2], [30,14,16,9], 10), 48)
def test_knapsack_no_repetition(self):
self.assertEqual(knapsack_no_repetition([6, 3, 4, 2], [30,14,16,9], 10), 46)
if __name__ == '__main__':
unittest.main() | 3.328125 | 3 |
pypy/tool/pytest/app_rewrite.py | nanjekyejoannah/pypy | 333 | 12772105 | import re
ASCII_IS_DEFAULT_ENCODING = False
cookie_re = re.compile(r"^[ \t\f]*#.*coding[:=][ \t]*[-\w.]+")
BOM_UTF8 = '\xef\xbb\xbf'
def _prepare_source(fn):
"""Read the source code for re-writing."""
try:
stat = fn.stat()
source = fn.read("rb")
except EnvironmentError:
return None, None
if ASCII_IS_DEFAULT_ENCODING:
# ASCII is the default encoding in Python 2. Without a coding
# declaration, Python 2 will complain about any bytes in the file
# outside the ASCII range. Sadly, this behavior does not extend to
# compile() or ast.parse(), which prefer to interpret the bytes as
# latin-1. (At least they properly handle explicit coding cookies.) To
# preserve this error behavior, we could force ast.parse() to use ASCII
# as the encoding by inserting a coding cookie. Unfortunately, that
# messes up line numbers. Thus, we have to check ourselves if anything
# is outside the ASCII range in the case no encoding is explicitly
# declared. For more context, see issue #269. Yay for Python 3 which
# gets this right.
end1 = source.find("\n")
end2 = source.find("\n", end1 + 1)
if (not source.startswith(BOM_UTF8) and
cookie_re.match(source[0:end1]) is None and
cookie_re.match(source[end1 + 1:end2]) is None):
try:
source.decode("ascii")
except UnicodeDecodeError:
# Let it fail in real import.
return None, None
# On Python versions which are not 2.7 and less than or equal to 3.1, the
# parser expects *nix newlines.
return stat, source
| 2.546875 | 3 |
Code/combined.py | Karan-Malik/Smart-Attendance-and-Engagement-Detection-System | 0 | 12772106 | <reponame>Karan-Malik/Smart-Attendance-and-Engagement-Detection-System
# -*- coding: utf-8 -*-
"""
Created on Wed Dec 8 18:59:42 2021
@author: karan
"""
import datetime
import os
import time
import cv2
import pandas as pd
from scipy.spatial import distance as dist
from imutils.video import VideoStream
from imutils import face_utils
from threading import Thread
import numpy as np
import playsound
import argparse
import imutils
import dlib
def recognize_attendence():
recognizer = cv2.face.LBPHFaceRecognizer_create()
recognizer.read("TrainingImageLabel"+os.sep+"Trainner.yml")
harcascadePath = "haarcascade_default.xml"
faceCascade = cv2.CascadeClassifier(harcascadePath)
df = pd.read_csv("StudentDetails"+os.sep+"StudentDetails.csv",header=None)
df.columns=['Id','Name']
font = cv2.FONT_HERSHEY_SIMPLEX
col_names = ['Id', 'Name', 'Date', 'Time']
attendance = pd.DataFrame(columns=col_names)
cam = cv2.VideoCapture(0, cv2.CAP_DSHOW)
cam.set(3, 640)
cam.set(4, 480)
minW = 0.1 * cam.get(3)
minH = 0.1 * cam.get(4)
while True:
ret, im = cam.read()
gray = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
faces = faceCascade.detectMultiScale(gray, 1.2, 5,
minSize = (int(minW), int(minH)),flags = cv2.CASCADE_SCALE_IMAGE)
for(x, y, w, h) in faces:
cv2.rectangle(im, (x, y), (x+w, y+h), (10, 159, 255), 2)
Id, conf = recognizer.predict(gray[y:y+h, x:x+w])
if conf < 100:
#print(df)
aa = df.loc[df['Id'] == Id]['Name'].values
confstr = " {0}%".format(round(100 - conf))
tt = str(Id)+"-"+aa
elif conf>200:
Id = ' Unknown '
tt = str(Id)
confstr = " {0}%".format(round(100 - conf))
if (conf) > 55:
ts = time.time()
date = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d')
timeStamp = datetime.datetime.fromtimestamp(ts).strftime('%H:%M:%S')
aa = str(aa)[2:-2]
attendance.loc[len(attendance)] = [Id, aa, date, timeStamp]
tt = str(tt)[2:-2]
# if(100-conf) > 67:
# tt = tt + " [Pass]"
# cv2.putText(im, str(tt), (x+5,y-5), font, 1, (255, 255, 255), 2)
# else:
cv2.putText(im, str(tt), (x + 5, y - 5), font, 1, (255, 255, 255), 2)
if (100-conf) > 67:
cv2.putText(im, str(confstr), (x + 5, y + h - 5), font,1, (0, 255, 0),1 )
elif (100-conf) > 50:
cv2.putText(im, str(confstr), (x + 5, y + h - 5), font, 1, (0, 255, 255), 1)
else:
cv2.putText(im, str(confstr), (x + 5, y + h - 5), font, 1, (0, 0, 255), 1)
attendance = attendance.drop_duplicates(subset=['Id'], keep='first')
cv2.imshow('Attendance', im)
if (cv2.waitKey(1) == ord('q')):
break
print(attendance)
ts = time.time()
date = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d')
timeStamp = datetime.datetime.fromtimestamp(ts).strftime('%H:%M:%S')
Hour, Minute, Second = timeStamp.split(":")
fileName = "Attendance"+os.sep+"Attendance_"+date+"_"+Hour+"-"+Minute+"-"+Second+".csv"
attendance.to_csv(fileName, index=False)
print("Attendance Successful")
cam.release()
cv2.destroyAllWindows()
def eye_aspect_ratio(eye):
A = dist.euclidean(eye[1], eye[5])
B = dist.euclidean(eye[2], eye[4])
C = dist.euclidean(eye[0], eye[3])
ear = (A + B) / (2.0 * C)
return ear
def detect_drowsy():
EYE_AR_THRESH = 0.3
EYE_AR_CONSEC_FRAMES = 48
COUNTER = 0
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor('shape_predictor_68_face_landmarks.dat')
(lStart, lEnd) = face_utils.FACIAL_LANDMARKS_IDXS["left_eye"]
(rStart, rEnd) = face_utils.FACIAL_LANDMARKS_IDXS["right_eye"]
vs = cv2.VideoCapture(0, cv2.CAP_DSHOW)
time.sleep(1.0)
while True:
_,frame = vs.read()
frame = imutils.resize(frame, width=450)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
rects = detector(gray, 0)
for rect in rects:
shape = predictor(gray, rect)
shape = face_utils.shape_to_np(shape)
leftEye = shape[lStart:lEnd]
rightEye = shape[rStart:rEnd]
leftEAR = eye_aspect_ratio(leftEye)
rightEAR = eye_aspect_ratio(rightEye)
ear = (leftEAR + rightEAR) / 2.0
leftEyeHull = cv2.convexHull(leftEye)
rightEyeHull = cv2.convexHull(rightEye)
cv2.drawContours(frame, [leftEyeHull], -1, (0, 255, 0), 1)
cv2.drawContours(frame, [rightEyeHull], -1, (0, 255, 0), 1)
if ear < EYE_AR_THRESH:
COUNTER += 1
if COUNTER >= EYE_AR_CONSEC_FRAMES:
cv2.putText(frame, "DROWSINESS ALERT!", (10, 30),
cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
else:
COUNTER = 0
cv2.putText(frame, "EAR: {:.2f}".format(ear), (300, 30),
cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
cv2.imshow("Frame", frame)
key = cv2.waitKey(1) & 0xFF
if key == ord("q"):
break
cv2.destroyAllWindows()
vs.release()
def attendanceAndDrowsy():
recognizer = cv2.face.LBPHFaceRecognizer_create()
recognizer.read("TrainingImageLabel"+os.sep+"Trainner.yml")
harcascadePath = "haarcascade_default.xml"
faceCascade = cv2.CascadeClassifier(harcascadePath)
df = pd.read_csv("StudentDetails"+os.sep+"StudentDetails.csv",header=None)
df.columns=['Id','Name']
font = cv2.FONT_HERSHEY_SIMPLEX
col_names = ['Id', 'Name', 'Date', 'Time']
attendance = pd.DataFrame(columns=col_names)
EYE_AR_THRESH = 0.3
EYE_AR_CONSEC_FRAMES = 48
COUNTER = 0
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor('shape_predictor_68_face_landmarks.dat')
(lStart, lEnd) = face_utils.FACIAL_LANDMARKS_IDXS["left_eye"]
(rStart, rEnd) = face_utils.FACIAL_LANDMARKS_IDXS["right_eye"]
cam = cv2.VideoCapture(0, cv2.CAP_DSHOW)
cam.set(3, 640)
cam.set(4, 480)
minW = 0.1 * cam.get(3)
minH = 0.1 * cam.get(4)
while True:
ret, im = cam.read()
gray = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
faces = faceCascade.detectMultiScale(gray, 1.2, 5,
minSize = (int(minW), int(minH)),flags = cv2.CASCADE_SCALE_IMAGE)
for(x, y, w, h) in faces:
cv2.rectangle(im, (x, y), (x+w, y+h), (10, 159, 255), 2)
Id, conf = recognizer.predict(gray[y:y+h, x:x+w])
#print(df)
aa = df.loc[df['Id'] == Id]['Name'].values
confstr = " {0}%".format(round(100 - conf))
tt = str(Id)+"-"+aa
if conf>200:
Id = ' Unknown '
tt = str(Id)
confstr = " {0}%".format(round(100 - conf))
if (conf) <55:
ts = time.time()
date = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d')
timeStamp = datetime.datetime.fromtimestamp(ts).strftime('%H:%M:%S')
aa = str(aa)[2:-2]
attendance.loc[len(attendance)] = [Id, aa, date, timeStamp]
tt = str(tt)[2:-2]
cv2.putText(im, str(tt), (x + 5, y - 5), font, 1, (255, 255, 255), 2)
if (100-conf) > 67:
cv2.putText(im, str(confstr), (x + 5, y + h - 5), font,1, (0, 255, 0),1 )
elif (100-conf) > 50:
cv2.putText(im, str(confstr), (x + 5, y + h - 5), font, 1, (0, 255, 255), 1)
else:
cv2.putText(im, str(confstr), (x + 5, y + h - 5), font, 1, (0, 0, 255), 1)
# frame=im
# frame = imutils.resize(frame, width=450)
gray = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
rects = detector(gray, 0)
for rect in rects:
shape = predictor(gray, rect)
shape = face_utils.shape_to_np(shape)
leftEye = shape[lStart:lEnd]
rightEye = shape[rStart:rEnd]
leftEAR = eye_aspect_ratio(leftEye)
rightEAR = eye_aspect_ratio(rightEye)
ear = (leftEAR + rightEAR) / 2.0
leftEyeHull = cv2.convexHull(leftEye)
rightEyeHull = cv2.convexHull(rightEye)
cv2.drawContours(im, [leftEyeHull], -1, (0, 255, 0), 1)
cv2.drawContours(im, [rightEyeHull], -1, (0, 255, 0), 1)
if ear < EYE_AR_THRESH:
COUNTER += 1
if COUNTER >= EYE_AR_CONSEC_FRAMES:
cv2.putText(im, "DROWSINESS ALERT!", (10, 30),
cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
else:
COUNTER = 0
cv2.putText(im, "EAR: {:.2f}".format(ear), (300, 30),
cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
attendance = attendance.drop_duplicates(subset=['Id'], keep='first')
cv2.imshow('Attendance', im)
if (cv2.waitKey(1) == ord('q')):
break
print(attendance)
ts = time.time()
date = datetime.datetime.fromtimestamp(ts).strftime('%d-%m-%Y')
timeStamp = datetime.datetime.fromtimestamp(ts).strftime('%H:%M:%S')
Hour, Minute, Second = timeStamp.split(":")
fileName = "Attendance"+os.sep+"Attendance_"+date+"_"+Hour+"-"+Minute+"-"+Second+".csv"
attendance.to_csv(fileName, index=False)
print("Attendance Successful")
cam.release()
cv2.destroyAllWindows()
#attendanceAndDrowsy()
| 2.1875 | 2 |
tests/test_get_posters.py | aDrz/movie-posters-convnet | 42 | 12772107 | # -*- coding: utf-8 -*-
import sys
sys.path.append('src')
import unittest
from src.get_posters import (download_poster, get_title_display,
get_yearly_url_imgs)
from src.utils import create_folder
class UtilsGetPosters(unittest.TestCase):
def setUp(self):
self.year = 1913
create_folder('./data/{}/posters'.format(self.year))
create_folder('./data/{}/thumbnails'.format(self.year))
self.dict_imgs_1913 = get_yearly_url_imgs(1913)
def test_get_yearly_url_imgs(self):
dict_imgs_1913 = self.dict_imgs_1913
self.assertTrue(isinstance(
dict_imgs_1913, list))
self.assertTrue(all(
[isinstance(x, dict) for x in dict_imgs_1913]))
self.assertTrue(all(
['title' in x.keys() for x in dict_imgs_1913]))
self.assertTrue(all(
['year' in x.keys() for x in dict_imgs_1913]))
self.assertTrue(all(
['title_display' in x.keys() for x in dict_imgs_1913]))
# self.assertTrue(all(
# ['base64_img' in x.keys() for x in dict_imgs_1913]))
# self.assertTrue(all(
# ['base64_thumb' in x.keys() for x in dict_imgs_1913]))
self.assertTrue(all(
['url_img' in x.keys() for x in dict_imgs_1913]))
def test_download_poster(self):
link = self.dict_imgs_1913[0]
img, thumb = download_poster(link['url_img'], size_thumb=(50, 50))
self.assertTrue(isinstance(img, str))
self.assertTrue(isinstance(thumb, str))
def test_get_title_display(self):
title = 'my movie title'
year = 2010
url1 = 'http://dummyurl.com/2010/posters/my_movie_title.jpg'
url2 = 'http://dummyurl.com/2010/posters/my_movie_title_ver2.jpg'
url3 = 'http://dummyurl.com/2010/posters/my_movie_title_ver28.jpg'
title_display1 = get_title_display(title, year, url1)
title_display2 = get_title_display(title, year, url2)
title_display3 = get_title_display(title, year, url3)
self.assertTrue(
title_display1 == 'my movie title, 2010')
self.assertTrue(
title_display2 == 'my movie title, 2010, v2')
self.assertTrue(
title_display3 == 'my movie title, 2010, v28')
| 2.640625 | 3 |
src/mcmc/colour/burn_in_colour.py | MehnaazAsad/RESOLVE_Statistics | 1 | 12772108 | """
{This script reads in the raw chain and plots times series for all parameters
in order to identify the burn-in}
"""
# Libs
from cosmo_utils.utils import work_paths as cwpaths
import matplotlib.pyplot as plt
from matplotlib import rc
import matplotlib
import pandas as pd
import numpy as np
import math
import os
__author__ = '{<NAME>}'
rc('font',**{'family':'sans-serif','sans-serif':['Helvetica']},size=20)
rc('text', usetex=True)
matplotlib.rcParams['text.latex.preamble']=[r"\usepackage{amsmath}"]
rc('axes', linewidth=2)
rc('xtick.major', width=2, size=7)
rc('ytick.major', width=2, size=7)
def find_nearest(array, value):
"""Finds the element in array that is closest to the value
Args:
array (numpy.array): Array of values
value (numpy.float): Value to find closest match to
Returns:
numpy.float: Closest match found in array
"""
array = np.asarray(array)
idx = (np.abs(array - value)).argmin()
return array[idx]
dict_of_paths = cwpaths.cookiecutter_paths()
path_to_raw = dict_of_paths['raw_dir']
path_to_proc = dict_of_paths['proc_dir']
path_to_interim = dict_of_paths['int_dir']
path_to_figures = dict_of_paths['plot_dir']
survey = 'eco'
mf_type = 'smf'
quenching = 'hybrid'
nwalkers = 260
if mf_type == 'smf':
path_to_proc = path_to_proc + 'smhm_colour_run27/'
else:
path_to_proc = path_to_proc + 'bmhm_run3/'
chain_fname = path_to_proc + 'mcmc_{0}_colour_raw.txt'.format(survey)
if quenching == 'hybrid':
emcee_table = pd.read_csv(chain_fname, delim_whitespace=True,
names=['Mstar_q','Mhalo_q','mu','nu'],
header=None)
emcee_table = emcee_table[emcee_table.Mstar_q.values != '#']
emcee_table.Mstar_q = emcee_table.Mstar_q.astype(np.float64)
emcee_table.Mhalo_q = emcee_table.Mhalo_q.astype(np.float64)
emcee_table.mu = emcee_table.mu.astype(np.float64)
emcee_table.nu = emcee_table.nu.astype(np.float64)
for idx,row in enumerate(emcee_table.values):
if np.isnan(row)[3] == True and np.isnan(row)[2] == False:
nu_val = emcee_table.values[idx+1][0]
row[3] = nu_val
emcee_table = emcee_table.dropna(axis='index', how='any').\
reset_index(drop=True)
# emcee_table.nu = np.log10(emcee_table.nu)
elif quenching == 'halo':
emcee_table = pd.read_csv(chain_fname, delim_whitespace=True,
names=['Mh_qc','Mh_qs','mu_c','mu_s'],
header=None)
emcee_table = emcee_table[emcee_table.Mh_qc.values != '#']
emcee_table.Mh_qc = emcee_table.Mh_qc.astype(np.float64)
emcee_table.Mh_qs = emcee_table.Mh_qs.astype(np.float64)
emcee_table.mu_c = emcee_table.mu_c.astype(np.float64)
emcee_table.mu_s = emcee_table.mu_s.astype(np.float64)
for idx,row in enumerate(emcee_table.values):
if np.isnan(row)[3] == True and np.isnan(row)[2] == False:
mu_s_val = emcee_table.values[idx+1][0]
row[3] = mu_s_val
emcee_table = emcee_table.dropna(axis='index', how='any').\
reset_index(drop=True)
chi2_fname = path_to_proc + '{0}_colour_chi2.txt'.format(survey)
chi2_df = pd.read_csv(chi2_fname,header=None,names=['chisquared'])
chi2 = np.log10(chi2_df.chisquared.values)
emcee_table['chi2'] = chi2
# Each chunk is now a step and within each chunk, each row is a walker
# Different from what it used to be where each chunk was a walker and
# within each chunk, each row was a step
walker_id_arr = np.zeros(len(emcee_table))
iteration_id_arr = np.zeros(len(emcee_table))
counter_wid = 0
counter_stepid = 0
for idx,row in emcee_table.iterrows():
counter_wid += 1
if idx % nwalkers == 0:
counter_stepid += 1
counter_wid = 1
walker_id_arr[idx] = counter_wid
iteration_id_arr[idx] = counter_stepid
id_data = {'walker_id': walker_id_arr, 'iteration_id': iteration_id_arr}
id_df = pd.DataFrame(id_data, index=emcee_table.index)
emcee_table = emcee_table.assign(**id_df)
grps = emcee_table.groupby('iteration_id')
grp_keys = grps.groups.keys()
if quenching == 'hybrid':
Mstar_q = [np.zeros(len(grp_keys)),np.zeros(len(grp_keys))]
Mhalo_q = [np.zeros(len(grp_keys)),np.zeros(len(grp_keys))]
mu = [np.zeros(len(grp_keys)),np.zeros(len(grp_keys))]
nu = [np.zeros(len(grp_keys)),np.zeros(len(grp_keys))]
chi2 = [np.zeros(len(grp_keys)),np.zeros(len(grp_keys))]
for idx,key in enumerate(grp_keys):
group = grps.get_group(key)
Mstar_q_mean = np.mean(group.Mstar_q.values)
Mstar_q_std = np.std(group.Mstar_q.values)
Mstar_q[0][idx] = Mstar_q_mean
Mstar_q[1][idx] = Mstar_q_std
Mhalo_q_mean = np.mean(group.Mhalo_q.values)
Mhalo_q_std = np.std(group.Mhalo_q.values)
Mhalo_q[0][idx] = Mhalo_q_mean
Mhalo_q[1][idx] = Mhalo_q_std
mu_mean = np.mean(group.mu.values)
mu_std = np.std(group.mu.values)
mu[0][idx] = mu_mean
mu[1][idx] = mu_std
nu_mean = np.mean(group.nu.values)
nu_std = np.std(group.nu.values)
nu[0][idx] = nu_mean
nu[1][idx] = nu_std
chi2_mean = np.mean(group.chi2.values)
chi2_std = np.std(group.chi2.values)
chi2[0][idx] = chi2_mean
chi2[1][idx] = chi2_std
zumandelbaum_param_vals = [10.5, 13.76, 0.69, 0.15]
grp_keys = list(grp_keys)
fig1, (ax1, ax2, ax3, ax4, ax5) = plt.subplots(5, 1, sharex=True, \
figsize=(10,10))
ax1.plot(grp_keys, Mstar_q[0],c='#941266',ls='--', marker='o')
ax1.axhline(zumandelbaum_param_vals[0],color='lightgray')
ax2.plot(grp_keys, Mhalo_q[0], c='#941266',ls='--', marker='o')
ax2.axhline(zumandelbaum_param_vals[1],color='lightgray')
ax3.plot(grp_keys, mu[0], c='#941266',ls='--', marker='o')
ax3.axhline(zumandelbaum_param_vals[2],color='lightgray')
ax4.plot(grp_keys, nu[0], c='#941266',ls='--', marker='o')
ax4.axhline(zumandelbaum_param_vals[3],color='lightgray')
ax5.plot(grp_keys, chi2[0], c='#941266',ls='--', marker='o')
ax1.fill_between(grp_keys, Mstar_q[0]-Mstar_q[1], Mstar_q[0]+Mstar_q[1],
alpha=0.3, color='#941266')
ax2.fill_between(grp_keys, Mhalo_q[0]-Mhalo_q[1], Mhalo_q[0]+Mhalo_q[1], \
alpha=0.3, color='#941266')
ax3.fill_between(grp_keys, mu[0]-mu[1], mu[0]+mu[1], \
alpha=0.3, color='#941266')
ax4.fill_between(grp_keys, nu[0]-nu[1], nu[0]+nu[1], \
alpha=0.3, color='#941266')
ax5.fill_between(grp_keys, chi2[0]-chi2[1], chi2[0]+chi2[1], \
alpha=0.3, color='#941266')
ax1.set_ylabel(r"$\mathbf{log_{10}\ M^{q}_{*}}$")
ax2.set_ylabel(r"$\mathbf{log_{10}\ M^{q}_{h}}$")
ax3.set_ylabel(r"$\boldsymbol{\mu}$")
# ax4.set_ylabel(r"$\mathbf{log_{10}} \boldsymbol{\ \nu}$")
ax4.set_ylabel(r"$\boldsymbol{\nu}$")
ax5.set_ylabel(r"$\mathbf{log_{10}} \boldsymbol{{\ \chi}^2}$")
# ax5.set_ylabel(r"$\boldsymbol{{\chi}^2}$")
# ax1.set_yscale('log')
# ax2.set_yscale('log')
ax1.annotate(zumandelbaum_param_vals[0], (0.95,0.85), xycoords='axes fraction',
bbox=dict(boxstyle="square", ec='k', fc='lightgray', alpha=0.5), size=10)
ax2.annotate(zumandelbaum_param_vals[1], (0.95,0.85), xycoords='axes fraction',
bbox=dict(boxstyle="square", ec='k', fc='lightgray', alpha=0.5), size=10)
ax3.annotate(zumandelbaum_param_vals[2], (0.95,0.85), xycoords='axes fraction',
bbox=dict(boxstyle="square", ec='k', fc='lightgray', alpha=0.5), size=10)
ax4.annotate(0.15, (0.95,0.85), xycoords='axes fraction',
bbox=dict(boxstyle="square", ec='k', fc='lightgray', alpha=0.5), size=10)
plt.xlabel(r"$\mathbf{iteration\ number}$")
plt.show()
elif quenching == 'halo':
Mh_qc = [np.zeros(len(grp_keys)),np.zeros(len(grp_keys))]
Mh_qs = [np.zeros(len(grp_keys)),np.zeros(len(grp_keys))]
mu_c = [np.zeros(len(grp_keys)),np.zeros(len(grp_keys))]
mu_s = [np.zeros(len(grp_keys)),np.zeros(len(grp_keys))]
chi2 = [np.zeros(len(grp_keys)),np.zeros(len(grp_keys))]
for idx,key in enumerate(grp_keys):
group = grps.get_group(key)
Mh_qc_mean = np.mean(group.Mh_qc.values)
Mh_qc_std = np.std(group.Mh_qc.values)
Mh_qc[0][idx] = Mh_qc_mean
Mh_qc[1][idx] = Mh_qc_std
Mh_qs_mean = np.mean(group.Mh_qs.values)
Mh_qs_std = np.std(group.Mh_qs.values)
Mh_qs[0][idx] = Mh_qs_mean
Mh_qs[1][idx] = Mh_qs_std
mu_c_mean = np.mean(group.mu_c.values)
mu_c_std = np.std(group.mu_c.values)
mu_c[0][idx] = mu_c_mean
mu_c[1][idx] = mu_c_std
mu_s_mean = np.mean(group.mu_s.values)
mu_s_std = np.std(group.mu_s.values)
mu_s[0][idx] = mu_s_mean
mu_s[1][idx] = mu_s_std
chi2_mean = np.mean(group.chi2.values)
chi2_std = np.std(group.chi2.values)
chi2[0][idx] = chi2_mean
chi2[1][idx] = chi2_std
zumandelbaum_param_vals = [12.2, 12.17, 0.38, 0.15]
grp_keys = list(grp_keys)
fig1, (ax1, ax2, ax3, ax4, ax5) = plt.subplots(5, 1, sharex=True, \
figsize=(10,10))
ax1.plot(grp_keys, Mh_qc[0],c='#941266',ls='--', marker='o')
ax1.axhline(zumandelbaum_param_vals[0],color='lightgray')
ax2.plot(grp_keys, Mh_qs[0], c='#941266',ls='--', marker='o')
ax2.axhline(zumandelbaum_param_vals[1],color='lightgray')
ax3.plot(grp_keys, mu_c[0], c='#941266',ls='--', marker='o')
ax3.axhline(zumandelbaum_param_vals[2],color='lightgray')
ax4.plot(grp_keys, mu_s[0], c='#941266',ls='--', marker='o')
ax4.axhline(zumandelbaum_param_vals[3],color='lightgray')
ax5.plot(grp_keys, chi2[0], c='#941266',ls='--', marker='o')
ax1.fill_between(grp_keys, Mh_qc[0]-Mh_qc[1], Mh_qc[0]+Mh_qc[1],
alpha=0.3, color='#941266')
ax2.fill_between(grp_keys, Mh_qs[0]-Mh_qs[1], Mh_qs[0]+Mh_qs[1], \
alpha=0.3, color='#941266')
ax3.fill_between(grp_keys, mu_c[0]-mu_c[1], mu_c[0]+mu_c[1], \
alpha=0.3, color='#941266')
ax4.fill_between(grp_keys, mu_s[0]-mu_s[1], mu_s[0]+mu_s[1], \
alpha=0.3, color='#941266')
ax5.fill_between(grp_keys, chi2[0]-chi2[1], chi2[0]+chi2[1], \
alpha=0.3, color='#941266')
ax1.set_ylabel(r"$\mathbf{log_{10}\ Mh_{qc}}$")
ax2.set_ylabel(r"$\mathbf{log_{10}\ Mh_{qs}}$")
ax3.set_ylabel(r"$\boldsymbol{\ mu_{c}}$")
# ax4.set_ylabel(r"$\mathbf{log_{10}} \boldsymbol{\ \nu}$")
ax4.set_ylabel(r"$\boldsymbol{\ mu_{s}}$")
ax5.set_ylabel(r"$\mathbf{log_{10}} \boldsymbol{{\ \chi}^2}$")
# ax5.set_ylabel(r"$\boldsymbol{{\chi}^2}$")
# ax1.set_yscale('log')
# ax2.set_yscale('log')
ax1.annotate(zumandelbaum_param_vals[0], (0.95,0.85), xycoords='axes fraction',
bbox=dict(boxstyle="square", ec='k', fc='lightgray', alpha=0.5), size=10)
ax2.annotate(zumandelbaum_param_vals[1], (0.95,0.85), xycoords='axes fraction',
bbox=dict(boxstyle="square", ec='k', fc='lightgray', alpha=0.5), size=10)
ax3.annotate(zumandelbaum_param_vals[2], (0.95,0.85), xycoords='axes fraction',
bbox=dict(boxstyle="square", ec='k', fc='lightgray', alpha=0.5), size=10)
ax4.annotate(0.15, (0.95,0.85), xycoords='axes fraction',
bbox=dict(boxstyle="square", ec='k', fc='lightgray', alpha=0.5), size=10)
plt.xlabel(r"$\mathbf{iteration\ number}$")
plt.show()
######################## Calculate acceptance fraction ########################
dict_of_paths = cwpaths.cookiecutter_paths()
path_to_proc = dict_of_paths['proc_dir']
if mf_type == 'smf':
path_to_proc = path_to_proc + 'smhm_colour_run21/'
else:
path_to_proc = path_to_proc + 'bmhm_run3/'
chain_fname = path_to_proc + 'mcmc_{0}_colour_raw.txt'.format(survey)
if quenching == 'hybrid':
emcee_table = pd.read_csv(chain_fname, delim_whitespace=True,
names=['Mstar_q','Mhalo_q','mu','nu'],
header=None)
emcee_table = emcee_table[emcee_table.Mstar_q.values != '#']
emcee_table.Mstar_q = emcee_table.Mstar_q.astype(np.float64)
emcee_table.Mhalo_q = emcee_table.Mhalo_q.astype(np.float64)
emcee_table.mu = emcee_table.mu.astype(np.float64)
emcee_table.nu = emcee_table.nu.astype(np.float64)
for idx,row in enumerate(emcee_table.values):
if np.isnan(row)[3] == True and np.isnan(row)[2] == False:
nu_val = emcee_table.values[idx+1][0]
row[3] = nu_val
emcee_table = emcee_table.dropna(axis='index', how='any').\
reset_index(drop=True)
# emcee_table.nu = np.log10(emcee_table.nu)
elif quenching == 'halo':
emcee_table = pd.read_csv(chain_fname, delim_whitespace=True,
names=['Mh_qc','Mh_qs','mu_c','mu_s'],
header=None)
emcee_table = emcee_table[emcee_table.Mh_qc.values != '#']
emcee_table.Mh_qc = emcee_table.Mh_qc.astype(np.float64)
emcee_table.Mh_qs = emcee_table.Mh_qs.astype(np.float64)
emcee_table.mu_c = emcee_table.mu_c.astype(np.float64)
emcee_table.mu_s = emcee_table.mu_s.astype(np.float64)
for idx,row in enumerate(emcee_table.values):
if np.isnan(row)[3] == True and np.isnan(row)[2] == False:
mu_s_val = emcee_table.values[idx+1][0]
row[3] = mu_s_val
emcee_table = emcee_table.dropna(axis='index', how='any').\
reset_index(drop=True)
num_unique_rows = emcee_table[['Mstar_q','Mhalo_q','mu','nu']].drop_duplicates().shape[0]
num_rows = len(emcee_table)
acceptance_fraction = num_unique_rows / num_rows
print("Acceptance fraction: {0}%".format(np.round(acceptance_fraction,2)*100))
# For behroozi chains
dict_of_paths = cwpaths.cookiecutter_paths()
path_to_proc = dict_of_paths['proc_dir']
chain_fname = path_to_proc + 'smhm_run6/mcmc_{0}_raw.txt'.\
format(survey)
emcee_table = pd.read_csv(chain_fname,
names=['mhalo_c','mstellar_c','lowmass_slope','highmass_slope',
'scatter'],header=None, delim_whitespace=True)
emcee_table = emcee_table[emcee_table.mhalo_c.values != '#']
emcee_table.mhalo_c = emcee_table.mhalo_c.astype(np.float64)
emcee_table.mstellar_c = emcee_table.mstellar_c.astype(np.float64)
emcee_table.lowmass_slope = emcee_table.lowmass_slope.astype(np.float64)
for idx,row in enumerate(emcee_table.values):
if np.isnan(row)[4] == True and np.isnan(row)[3] == False:
scatter_val = emcee_table.values[idx+1][0]
row[4] = scatter_val
emcee_table = emcee_table.dropna(axis='index', how='any').reset_index(drop=True)
num_unique_rows = emcee_table[['mhalo_c','mstellar_c','lowmass_slope',\
'highmass_slope']].drop_duplicates().shape[0]
num_rows = len(emcee_table)
acceptance_fraction = num_unique_rows / num_rows
print("Acceptance fraction: {0}%".format(np.round(acceptance_fraction,2)*100))
################################################################################
def hybrid_quenching_model(theta, gals_df, mock, randint=None):
"""
Apply hybrid quenching model from Zu and Mandelbaum 2015
Parameters
----------
gals_df: pandas dataframe
Mock catalog
Returns
---------
f_red_cen: array
Array of central red fractions
f_red_sat: array
Array of satellite red fractions
"""
# parameter values from Table 1 of Zu and Mandelbaum 2015 "prior case"
Mstar_q = theta[0] # Msun/h
Mh_q = theta[1] # Msun/h
mu = theta[2]
nu = theta[3]
cen_hosthalo_mass_arr, sat_hosthalo_mass_arr = get_host_halo_mock(gals_df, \
mock)
cen_stellar_mass_arr, sat_stellar_mass_arr = get_stellar_mock(gals_df, mock, \
randint)
f_red_cen = 1 - np.exp(-((cen_stellar_mass_arr/(10**Mstar_q))**mu))
g_Mstar = np.exp(-((sat_stellar_mass_arr/(10**Mstar_q))**mu))
h_Mh = np.exp(-((sat_hosthalo_mass_arr/(10**Mh_q))**nu))
f_red_sat = 1 - (g_Mstar * h_Mh)
return f_red_cen, f_red_sat
def assign_colour_label_mock(f_red_cen, f_red_sat, gals_df, drop_fred=False):
"""
Assign colour label to mock catalog
Parameters
----------
f_red_cen: array
Array of central red fractions
f_red_sat: array
Array of satellite red fractions
gals_df: pandas Dataframe
Mock catalog
drop_fred: boolean
Whether or not to keep red fraction column after colour has been
assigned
Returns
---------
df: pandas Dataframe
Dataframe with colour label and random number assigned as
new columns
"""
# Copy of dataframe
df = gals_df.copy()
# Saving labels
color_label_arr = [[] for x in range(len(df))]
rng_arr = [[] for x in range(len(df))]
# Adding columns for f_red to df
df.loc[:, 'f_red'] = np.zeros(len(df))
df.loc[df['cs_flag'] == 1, 'f_red'] = f_red_cen
df.loc[df['cs_flag'] == 0, 'f_red'] = f_red_sat
# Converting to array
f_red_arr = df['f_red'].values
# Looping over galaxies
for ii, cs_ii in enumerate(df['cs_flag']):
# Draw a random number
rng = np.random.uniform()
# Comparing against f_red
if (rng >= f_red_arr[ii]):
color_label = 'B'
else:
color_label = 'R'
# Saving to list
color_label_arr[ii] = color_label
rng_arr[ii] = rng
## Assigning to DataFrame
df.loc[:, 'colour_label'] = color_label_arr
df.loc[:, 'rng'] = rng_arr
# Dropping 'f_red` column
if drop_fred:
df.drop('f_red', axis=1, inplace=True)
return df
def get_host_halo_mock(gals_df, mock):
"""
Get host halo mass from mock catalog
Parameters
----------
gals_df: pandas dataframe
Mock catalog
Returns
---------
cen_halos: array
Array of central host halo masses
sat_halos: array
Array of satellite host halo masses
"""
df = gals_df.copy()
# groups = df.groupby('halo_id')
# keys = groups.groups.keys()
# for key in keys:
# group = groups.get_group(key)
# for index, value in enumerate(group.cs_flag):
# if value == 1:
# cen_halos.append(group.loghalom.values[index])
# else:
# sat_halos.append(group.loghalom.values[index])
if mock == 'vishnu':
cen_halos = []
sat_halos = []
for index, value in enumerate(df.cs_flag):
if value == 1:
cen_halos.append(df.halo_mvir.values[index])
else:
sat_halos.append(df.halo_mvir.values[index])
else:
cen_halos = []
sat_halos = []
for index, value in enumerate(df.cs_flag):
if value == 1:
cen_halos.append(df.loghalom.values[index])
else:
sat_halos.append(df.loghalom.values[index])
cen_halos = np.array(cen_halos)
sat_halos = np.array(sat_halos)
return cen_halos, sat_halos
def get_stellar_mock(gals_df, mock, randint=None):
"""
Get stellar mass from mock catalog
Parameters
----------
gals_df: pandas dataframe
Mock catalog
Returns
---------
cen_gals: array
Array of central stellar masses
sat_gals: array
Array of satellite stellar masses
"""
df = gals_df.copy()
if mock == 'vishnu':
cen_gals = []
sat_gals = []
for idx,value in enumerate(df.cs_flag):
if value == 1:
cen_gals.append(df['{0}'.format(randint)].values[idx])
elif value == 0:
sat_gals.append(df['{0}'.format(randint)].values[idx])
else:
cen_gals = []
sat_gals = []
for idx,value in enumerate(df.cs_flag):
if value == 1:
cen_gals.append(df.logmstar.values[idx])
elif value == 0:
sat_gals.append(df.logmstar.values[idx])
cen_gals = np.array(cen_gals)
sat_gals = np.array(sat_gals)
return cen_gals, sat_gals
def diff_smf(mstar_arr, volume, h1_bool, colour_flag=False):
"""
Calculates differential stellar mass function in units of h=1.0
Parameters
----------
mstar_arr: numpy array
Array of stellar masses
volume: float
Volume of survey or simulation
h1_bool: boolean
True if units of masses are h=1, False if units of masses are not h=1
Returns
---------
maxis: array
Array of x-axis mass values
phi: array
Array of y-axis values
err_tot: array
Array of error values per bin
bins: array
Array of bin edge values
"""
if not h1_bool:
# changing from h=0.7 to h=1 assuming h^-2 dependence
logmstar_arr = np.log10((10**mstar_arr) / 2.041)
else:
logmstar_arr = np.log10(mstar_arr)
if survey == 'eco' or survey == 'resolvea':
bin_min = np.round(np.log10((10**8.9) / 2.041), 1)
if survey == 'eco' and colour_flag == 'R':
bin_max = np.round(np.log10((10**11.5) / 2.041), 1)
bin_num = 6
elif survey == 'eco' and colour_flag == 'B':
bin_max = np.round(np.log10((10**11) / 2.041), 1)
bin_num = 6
elif survey == 'resolvea':
# different to avoid nan in inverse corr mat
bin_max = np.round(np.log10((10**11.5) / 2.041), 1)
bin_num = 7
else:
bin_max = np.round(np.log10((10**11.5) / 2.041), 1)
bin_num = 7
bins = np.linspace(bin_min, bin_max, bin_num)
elif survey == 'resolveb':
bin_min = np.round(np.log10((10**8.7) / 2.041), 1)
bin_max = np.round(np.log10((10**11.8) / 2.041), 1)
bins = np.linspace(bin_min, bin_max, 7)
# Unnormalized histogram and bin edges
counts, edg = np.histogram(logmstar_arr, bins=bins) # paper used 17 bins
dm = edg[1] - edg[0] # Bin width
maxis = 0.5 * (edg[1:] + edg[:-1]) # Mass axis i.e. bin centers
# Normalized to volume and bin width
err_poiss = np.sqrt(counts) / (volume * dm)
err_tot = err_poiss
phi = counts / (volume * dm) # not a log quantity
phi = np.log10(phi)
return maxis, phi, err_tot, bins, counts
def measure_all_smf(table, volume, data_bool, randint_logmstar=None):
"""
Calculates differential stellar mass function for all, red and blue galaxies
from mock/data
Parameters
----------
table: pandas Dataframe
Dataframe of either mock or data
volume: float
Volume of simulation/survey
cvar: float
Cosmic variance error
data_bool: Boolean
Data or mock
Returns
---------
3 multidimensional arrays of stellar mass, phi, total error in SMF and
counts per bin for all, red and blue galaxies
"""
colour_col = 'colour_label'
if data_bool:
logmstar_col = 'logmstar'
max_total, phi_total, err_total, bins_total, counts_total = \
diff_smf(table[logmstar_col], volume, False)
max_red, phi_red, err_red, bins_red, counts_red = \
diff_smf(table[logmstar_col].loc[table[colour_col] == 'R'],
volume, False, 'R')
max_blue, phi_blue, err_blue, bins_blue, counts_blue = \
diff_smf(table[logmstar_col].loc[table[colour_col] == 'B'],
volume, False, 'B')
else:
# logmstar_col = 'stellar_mass'
logmstar_col = '{0}'.format(randint_logmstar)
max_total, phi_total, err_total, bins_total, counts_total = \
diff_smf(table[logmstar_col], volume, True)
max_red, phi_red, err_red, bins_red, counts_red = \
diff_smf(table[logmstar_col].loc[table[colour_col] == 'R'],
volume, True, 'R')
max_blue, phi_blue, err_blue, bins_blue, counts_blue = \
diff_smf(table[logmstar_col].loc[table[colour_col] == 'B'],
volume, True, 'B')
return [max_total, phi_total, err_total, counts_total] , \
[max_red, phi_red, err_red, counts_red] , \
[max_blue, phi_blue, err_blue, counts_blue]
def assign_colour_label_data(catl):
"""
Assign colour label to data
Parameters
----------
catl: pandas Dataframe
Data catalog
Returns
---------
catl: pandas Dataframe
Data catalog with colour label assigned as new column
"""
logmstar_arr = catl.logmstar.values
u_r_arr = catl.modelu_rcorr.values
colour_label_arr = np.empty(len(catl), dtype='str')
for idx, value in enumerate(logmstar_arr):
# Divisions taken from Moffett et al. 2015 equation 1
if value <= 9.1:
if u_r_arr[idx] > 1.457:
colour_label = 'R'
else:
colour_label = 'B'
if value > 9.1 and value < 10.1:
divider = 0.24 * value - 0.7
if u_r_arr[idx] > divider:
colour_label = 'R'
else:
colour_label = 'B'
if value >= 10.1:
if u_r_arr[idx] > 1.7:
colour_label = 'R'
else:
colour_label = 'B'
colour_label_arr[idx] = colour_label
catl['colour_label'] = colour_label_arr
return catl
def read_data_catl(path_to_file, survey):
"""
Reads survey catalog from file
Parameters
----------
path_to_file: `string`
Path to survey catalog file
survey: `string`
Name of survey
Returns
---------
catl: `pandas.DataFrame`
Survey catalog with grpcz, abs rmag and stellar mass limits
volume: `float`
Volume of survey
z_median: `float`
Median redshift of survey
"""
if survey == 'eco':
columns = ['name', 'radeg', 'dedeg', 'cz', 'grpcz', 'absrmag',
'logmstar', 'logmgas', 'grp', 'grpn', 'logmh', 'logmh_s',
'fc', 'grpmb', 'grpms','modelu_rcorr']
# 13878 galaxies
eco_buff = pd.read_csv(path_to_file,delimiter=",", header=0, \
usecols=columns)
if mf_type == 'smf':
# 6456 galaxies
catl = eco_buff.loc[(eco_buff.grpcz.values >= 3000) &
(eco_buff.grpcz.values <= 7000) &
(eco_buff.absrmag.values <= -17.33)]
elif mf_type == 'bmf':
catl = eco_buff.loc[(eco_buff.grpcz.values >= 3000) &
(eco_buff.grpcz.values <= 7000) &
(eco_buff.absrmag.values <= -17.33)]
volume = 151829.26 # Survey volume without buffer [Mpc/h]^3
# cvar = 0.125
z_median = np.median(catl.grpcz.values) / (3 * 10**5)
elif survey == 'resolvea' or survey == 'resolveb':
columns = ['name', 'radeg', 'dedeg', 'cz', 'grpcz', 'absrmag',
'logmstar', 'logmgas', 'grp', 'grpn', 'grpnassoc', 'logmh',
'logmh_s', 'fc', 'grpmb', 'grpms', 'f_a', 'f_b']
# 2286 galaxies
resolve_live18 = pd.read_csv(path_to_file, delimiter=",", header=0, \
usecols=columns)
if survey == 'resolvea':
if mf_type == 'smf':
catl = resolve_live18.loc[(resolve_live18.f_a.values == 1) &
(resolve_live18.grpcz.values >= 4500) &
(resolve_live18.grpcz.values <= 7000) &
(resolve_live18.absrmag.values <= -17.33)]
elif mf_type == 'bmf':
catl = resolve_live18.loc[(resolve_live18.f_a.values == 1) &
(resolve_live18.grpcz.values >= 4500) &
(resolve_live18.grpcz.values <= 7000) &
(resolve_live18.absrmag.values <= -17.33)]
volume = 13172.384 # Survey volume without buffer [Mpc/h]^3
# cvar = 0.30
z_median = np.median(resolve_live18.grpcz.values) / (3 * 10**5)
elif survey == 'resolveb':
if mf_type == 'smf':
# 487 - cz, 369 - grpcz
catl = resolve_live18.loc[(resolve_live18.f_b.values == 1) &
(resolve_live18.grpcz.values >= 4500) &
(resolve_live18.grpcz.values <= 7000) &
(resolve_live18.absrmag.values <= -17)]
elif mf_type == 'bmf':
catl = resolve_live18.loc[(resolve_live18.f_b.values == 1) &
(resolve_live18.grpcz.values >= 4500) &
(resolve_live18.grpcz.values <= 7000) &
(resolve_live18.absrmag.values <= -17)]
volume = 4709.8373 # *2.915 #Survey volume without buffer [Mpc/h]^3
# cvar = 0.58
z_median = np.median(resolve_live18.grpcz.values) / (3 * 10**5)
return catl, volume, z_median
def std_func(bins, mass_arr, vel_arr):
## Calculate std from mean=0
last_index = len(bins)-1
i = 0
std_arr = []
for index1, bin_edge in enumerate(bins):
if index1 == last_index:
break
cen_deltav_arr = []
for index2, stellar_mass in enumerate(mass_arr):
if stellar_mass >= bin_edge and stellar_mass < bins[index1+1]:
cen_deltav_arr.append(vel_arr[index2])
N = len(cen_deltav_arr)
mean = 0
diff_sqrd_arr = []
for value in cen_deltav_arr:
diff = value - mean
diff_sqrd = diff**2
diff_sqrd_arr.append(diff_sqrd)
mean_diff_sqrd = np.mean(diff_sqrd_arr)
std = np.sqrt(mean_diff_sqrd)
std_arr.append(std)
return std_arr
def get_deltav_sigma_vishnu_qmcolour(gals_df, randint):
"""
Calculate spread in velocity dispersion from Vishnu mock (logmstar already
in h=1)
Parameters
----------
survey: string
Name of survey
path: string
Path to mock catalogs
Returns
---------
std_red_arr: numpy array
Spread in velocity dispersion of red galaxies
centers_red_arr: numpy array
Bin centers of central stellar mass for red galaxies
std_blue_arr: numpy array
Spread in velocity dispersion of blue galaxies
centers_blue_arr: numpy array
Bin centers of central stellar mass for blue galaxies
"""
mock_pd = gals_df.copy()
if survey == 'eco':
mock_name = 'ECO'
num_mocks = 8
min_cz = 3000
max_cz = 7000
mag_limit = -17.33
mstar_limit = 8.9
volume = 151829.26 # Survey volume without buffer [Mpc/h]^3
elif survey == 'resolvea':
mock_name = 'A'
num_mocks = 59
min_cz = 4500
max_cz = 7000
mag_limit = -17.33
mstar_limit = 8.9
volume = 13172.384 # Survey volume without buffer [Mpc/h]^3
elif survey == 'resolveb':
mock_name = 'B'
num_mocks = 104
min_cz = 4500
max_cz = 7000
mag_limit = -17
mstar_limit = 8.7
volume = 4709.8373 # Survey volume without buffer [Mpc/h]^3
logmstar_col = '{0}'.format(randint)
g_galtype_col = 'g_galtype_{0}'.format(randint)
# Using the same survey definition as in mcmc smf i.e excluding the
# buffer except no M_r cut since vishnu mock has no M_r info
mock_pd = mock_pd.loc[(mock_pd.cz.values >= min_cz) & \
(mock_pd.cz.values <= max_cz) & \
(mock_pd[logmstar_col].values >= (10**mstar_limit/2.041))]
red_subset_grpids = np.unique(mock_pd.groupid.loc[(mock_pd.\
colour_label == 'R') & (mock_pd[g_galtype_col] == 1)].values)
blue_subset_grpids = np.unique(mock_pd.groupid.loc[(mock_pd.\
colour_label == 'B') & (mock_pd[g_galtype_col] == 1)].values)
# Calculating spread in velocity dispersion for galaxies in groups
# with a red central
red_deltav_arr = []
red_cen_stellar_mass_arr = []
for key in red_subset_grpids:
group = mock_pd.loc[mock_pd.groupid == key]
cen_stellar_mass = group['{0}'.format(randint)].loc[group[g_galtype_col].\
values == 1].values[0]
mean_cz_grp = np.round(np.mean(group.cz.values),2)
deltav = group.cz.values - len(group)*[mean_cz_grp]
for val in deltav:
red_deltav_arr.append(val)
red_cen_stellar_mass_arr.append(cen_stellar_mass)
# print(max(red_cen_stellar_mass_arr))
red_cen_stellar_mass_arr = np.log10(red_cen_stellar_mass_arr)
if survey == 'eco' or survey == 'resolvea':
# TODO : check if this is actually correct for resolve a
red_stellar_mass_bins = np.linspace(8.6,11.2,6)
elif survey == 'resolveb':
red_stellar_mass_bins = np.linspace(8.4,11.0,6)
std_red = std_func(red_stellar_mass_bins, red_cen_stellar_mass_arr,
red_deltav_arr)
std_red = np.array(std_red)
# Calculating spread in velocity dispersion for galaxies in groups
# with a blue central
blue_deltav_arr = []
blue_cen_stellar_mass_arr = []
for key in blue_subset_grpids:
group = mock_pd.loc[mock_pd.groupid == key]
cen_stellar_mass = group['{0}'.format(randint)].loc[group[g_galtype_col]\
.values == 1].values[0]
mean_cz_grp = np.round(np.mean(group.cz.values),2)
deltav = group.cz.values - len(group)*[mean_cz_grp]
for val in deltav:
blue_deltav_arr.append(val)
blue_cen_stellar_mass_arr.append(cen_stellar_mass)
# print(max(blue_cen_stellar_mass_arr))
blue_cen_stellar_mass_arr = np.log10(blue_cen_stellar_mass_arr)
if survey == 'eco' or survey == 'resolvea':
# TODO : check if this is actually correct for resolve a
blue_stellar_mass_bins = np.linspace(8.6,10.7,6)
elif survey == 'resolveb':
blue_stellar_mass_bins = np.linspace(8.4,10.4,6)
std_blue = std_func(blue_stellar_mass_bins, \
blue_cen_stellar_mass_arr, blue_deltav_arr)
std_blue = np.array(std_blue)
centers_red = 0.5 * (red_stellar_mass_bins[1:] + \
red_stellar_mass_bins[:-1])
centers_blue = 0.5 * (blue_stellar_mass_bins[1:] + \
blue_stellar_mass_bins[:-1])
return std_red, std_blue, centers_red, centers_blue
def get_deltav_sigma_mocks_qmcolour(survey, path):
"""
Calculate spread in velocity dispersion from survey mocks (logmstar converted
to h=1 units before analysis)
Parameters
----------
survey: string
Name of survey
path: string
Path to mock catalogs
Returns
---------
std_red_arr: numpy array
Spread in velocity dispersion of red galaxies
centers_red_arr: numpy array
Bin centers of central stellar mass for red galaxies
std_blue_arr: numpy array
Spread in velocity dispersion of blue galaxies
centers_blue_arr: numpy array
Bin centers of central stellar mass for blue galaxies
"""
if survey == 'eco':
mock_name = 'ECO'
num_mocks = 8
min_cz = 3000
max_cz = 7000
mag_limit = -17.33
mstar_limit = 8.9
volume = 151829.26 # Survey volume without buffer [Mpc/h]^3
elif survey == 'resolvea':
mock_name = 'A'
num_mocks = 59
min_cz = 4500
max_cz = 7000
mag_limit = -17.33
mstar_limit = 8.9
volume = 13172.384 # Survey volume without buffer [Mpc/h]^3
elif survey == 'resolveb':
mock_name = 'B'
num_mocks = 104
min_cz = 4500
max_cz = 7000
mag_limit = -17
mstar_limit = 8.7
volume = 4709.8373 # Survey volume without buffer [Mpc/h]^3
std_red_arr = []
centers_red_arr = []
std_blue_arr = []
centers_blue_arr = []
box_id_arr = np.linspace(5001,5008,8)
for box in box_id_arr:
box = int(box)
temp_path = path + '{0}/{1}_m200b_catls/'.format(box,
mock_name)
for num in range(num_mocks):
filename = temp_path + '{0}_cat_{1}_Planck_memb_cat.hdf5'.format(
mock_name, num)
mock_pd = reading_catls(filename)
# Using the same survey definition as in mcmc smf i.e excluding the
# buffer
mock_pd = mock_pd.loc[(mock_pd.cz.values >= min_cz) & \
(mock_pd.cz.values <= max_cz) & \
(mock_pd.M_r.values <= mag_limit) & \
(mock_pd.logmstar.values >= mstar_limit)]
Mstar_q = 10.5 # Msun/h
Mh_q = 13.76 # Msun/h
mu = 0.69
nu = 0.15
theta = [Mstar_q, Mh_q, mu, nu]
f_red_c, f_red_s = hybrid_quenching_model(theta, mock_pd, 'nonvishnu')
mock_pd = assign_colour_label_mock(f_red_c, f_red_s, mock_pd)
mock_pd.logmstar = np.log10((10**mock_pd.logmstar) / 2.041)
red_subset_grpids = np.unique(mock_pd.groupid.loc[(mock_pd.\
colour_label == 'R') & (mock_pd.g_galtype == 1)].values)
blue_subset_grpids = np.unique(mock_pd.groupid.loc[(mock_pd.\
colour_label == 'B') & (mock_pd.g_galtype == 1)].values)
# Calculating spread in velocity dispersion for galaxies in groups
# with a red central
red_deltav_arr = []
red_cen_stellar_mass_arr = []
for key in red_subset_grpids:
group = mock_pd.loc[mock_pd.groupid == key]
cen_stellar_mass = group.logmstar.loc[group.g_galtype.\
values == 1].values[0]
mean_cz_grp = np.round(np.mean(group.cz.values),2)
deltav = group.cz.values - len(group)*[mean_cz_grp]
for val in deltav:
red_deltav_arr.append(val)
red_cen_stellar_mass_arr.append(cen_stellar_mass)
# print(max(red_cen_stellar_mass_arr))
if survey == 'eco' or survey == 'resolvea':
# TODO : check if this is actually correct for resolve a
red_stellar_mass_bins = np.linspace(8.6,11.2,6)
elif survey == 'resolveb':
red_stellar_mass_bins = np.linspace(8.4,11.0,6)
std_red = std_func(red_stellar_mass_bins, red_cen_stellar_mass_arr,
red_deltav_arr)
std_red = np.array(std_red)
std_red_arr.append(std_red)
# Calculating spread in velocity dispersion for galaxies in groups
# with a blue central
blue_deltav_arr = []
blue_cen_stellar_mass_arr = []
for key in blue_subset_grpids:
group = mock_pd.loc[mock_pd.groupid == key]
cen_stellar_mass = group.logmstar.loc[group.g_galtype\
.values == 1].values[0]
mean_cz_grp = np.round(np.mean(group.cz.values),2)
deltav = group.cz.values - len(group)*[mean_cz_grp]
for val in deltav:
blue_deltav_arr.append(val)
blue_cen_stellar_mass_arr.append(cen_stellar_mass)
# print(max(blue_cen_stellar_mass_arr))
if survey == 'eco' or survey == 'resolvea':
# TODO : check if this is actually correct for resolve a
blue_stellar_mass_bins = np.linspace(8.6,10.7,6)
elif survey == 'resolveb':
blue_stellar_mass_bins = np.linspace(8.4,10.4,6)
std_blue = std_func(blue_stellar_mass_bins, \
blue_cen_stellar_mass_arr, blue_deltav_arr)
std_blue = np.array(std_blue)
std_blue_arr.append(std_blue)
centers_red = 0.5 * (red_stellar_mass_bins[1:] + \
red_stellar_mass_bins[:-1])
centers_blue = 0.5 * (blue_stellar_mass_bins[1:] + \
blue_stellar_mass_bins[:-1])
centers_red_arr.append(centers_red)
centers_blue_arr.append(centers_blue)
std_red_arr = np.array(std_red_arr)
centers_red_arr = np.array(centers_red_arr)
std_blue_arr = np.array(std_blue_arr)
centers_blue_arr = np.array(centers_blue_arr)
return std_red_arr, std_blue_arr, centers_red_arr, centers_blue_arr
def get_deltav_sigma_data(df):
"""
Measure spread in velocity dispersion separately for red and blue galaxies
by binning up central stellar mass (changes logmstar units from h=0.7 to h=1)
Parameters
----------
df: pandas Dataframe
Data catalog
Returns
---------
std_red: numpy array
Spread in velocity dispersion of red galaxies
centers_red: numpy array
Bin centers of central stellar mass for red galaxies
std_blue: numpy array
Spread in velocity dispersion of blue galaxies
centers_blue: numpy array
Bin centers of central stellar mass for blue galaxies
"""
catl = df.copy()
if survey == 'eco' or survey == 'resolvea':
catl = catl.loc[catl.logmstar >= 8.9]
elif survey == 'resolveb':
catl = catl.loc[catl.logmstar >= 8.7]
catl.logmstar = np.log10((10**catl.logmstar) / 2.041)
red_subset_grpids = np.unique(catl.grp.loc[(catl.\
colour_label == 'R') & (catl.fc == 1)].values)
blue_subset_grpids = np.unique(catl.grp.loc[(catl.\
colour_label == 'B') & (catl.fc == 1)].values)
# Calculating spread in velocity dispersion for galaxies in groups with a
# red central
red_deltav_arr = []
red_cen_stellar_mass_arr = []
for key in red_subset_grpids:
group = catl.loc[catl.grp == key]
cen_stellar_mass = group.logmstar.loc[group.fc.\
values == 1].values[0]
mean_cz_grp = np.round(np.mean(group.cz.values),2)
deltav = group.cz.values - len(group)*[mean_cz_grp]
for val in deltav:
red_deltav_arr.append(val)
red_cen_stellar_mass_arr.append(cen_stellar_mass)
if survey == 'eco' or survey == 'resolvea':
# TODO : check if this is actually correct for resolve a
red_stellar_mass_bins = np.linspace(8.6,11.2,6)
elif survey == 'resolveb':
red_stellar_mass_bins = np.linspace(8.4,11.0,6)
std_red = std_func(red_stellar_mass_bins, red_cen_stellar_mass_arr,
red_deltav_arr)
std_red = np.array(std_red)
# Calculating spread in velocity dispersion for galaxies in groups with a
# blue central
blue_deltav_arr = []
blue_cen_stellar_mass_arr = []
for key in blue_subset_grpids:
group = catl.loc[catl.grp == key]
cen_stellar_mass = group.logmstar.loc[group.fc\
.values == 1].values[0]
mean_cz_grp = np.round(np.mean(group.cz.values),2)
deltav = group.cz.values - len(group)*[mean_cz_grp]
for val in deltav:
blue_deltav_arr.append(val)
blue_cen_stellar_mass_arr.append(cen_stellar_mass)
if survey == 'eco' or survey == 'resolvea':
# TODO : check if this is actually correct for resolve a
blue_stellar_mass_bins = np.linspace(8.6,10.7,6)
elif survey == 'resolveb':
blue_stellar_mass_bins = np.linspace(8.4,10.4,6)
std_blue = std_func(blue_stellar_mass_bins, blue_cen_stellar_mass_arr,
blue_deltav_arr)
std_blue = np.array(std_blue)
centers_red = 0.5 * (red_stellar_mass_bins[1:] + \
red_stellar_mass_bins[:-1])
centers_blue = 0.5 * (blue_stellar_mass_bins[1:] + \
blue_stellar_mass_bins[:-1])
return std_red, centers_red, std_blue, centers_blue
def reading_catls(filename, catl_format='.hdf5'):
"""
Function to read ECO/RESOLVE catalogues.
Parameters
----------
filename: string
path and name of the ECO/RESOLVE catalogue to read
catl_format: string, optional (default = '.hdf5')
type of file to read.
Options:
- '.hdf5': Reads in a catalogue in HDF5 format
Returns
-------
mock_pd: pandas DataFrame
DataFrame with galaxy/group information
Examples
--------
# Specifying `filename`
>>> filename = 'ECO_catl.hdf5'
# Reading in Catalogue
>>> mock_pd = reading_catls(filename, format='.hdf5')
>>> mock_pd.head()
x y z vx vy vz \
0 10.225435 24.778214 3.148386 356.112457 -318.894409 366.721832
1 20.945772 14.500367 -0.237940 168.731766 37.558834 447.436951
2 21.335835 14.808488 0.004653 967.204407 -701.556763 -388.055115
3 11.102760 21.782235 2.947002 611.646484 -179.032089 113.388794
4 13.217764 21.214905 2.113904 120.689598 -63.448833 400.766541
loghalom cs_flag haloid halo_ngal ... cz_nodist vel_tot \
0 12.170 1 196005 1 ... 2704.599189 602.490355
1 11.079 1 197110 1 ... 2552.681697 479.667489
2 11.339 1 197131 1 ... 2602.377466 1256.285409
3 11.529 1 199056 1 ... 2467.277182 647.318259
4 10.642 1 199118 1 ... 2513.381124 423.326770
vel_tan vel_pec ra_orig groupid M_group g_ngal g_galtype \
0 591.399858 -115.068833 215.025116 0 11.702527 1 1
1 453.617221 155.924074 182.144134 1 11.524787 4 0
2 1192.742240 394.485714 182.213220 1 11.524787 4 0
3 633.928896 130.977416 210.441320 2 11.502205 1 1
4 421.064495 43.706352 205.525386 3 10.899680 1 1
halo_rvir
0 0.184839
1 0.079997
2 0.097636
3 0.113011
4 0.057210
"""
## Checking if file exists
if not os.path.exists(filename):
msg = '`filename`: {0} NOT FOUND! Exiting..'.format(filename)
raise ValueError(msg)
## Reading file
if catl_format=='.hdf5':
mock_pd = pd.read_hdf(filename)
else:
msg = '`catl_format` ({0}) not supported! Exiting...'.format(catl_format)
raise ValueError(msg)
return mock_pd
def get_err_data(survey, path):
"""
Calculate error in data SMF from mocks
Parameters
----------
survey: string
Name of survey
path: string
Path to mock catalogs
Returns
---------
err_total: array
Standard deviation of phi values between all mocks and for all galaxies
err_red: array
Standard deviation of phi values between all mocks and for red galaxies
err_blue: array
Standard deviation of phi values between all mocks and for blue galaxies
"""
if survey == 'eco':
mock_name = 'ECO'
num_mocks = 8
min_cz = 3000
max_cz = 7000
mag_limit = -17.33
mstar_limit = 8.9
volume = 151829.26 # Survey volume without buffer [Mpc/h]^3
elif survey == 'resolvea':
mock_name = 'A'
num_mocks = 59
min_cz = 4500
max_cz = 7000
mag_limit = -17.33
mstar_limit = 8.9
volume = 13172.384 # Survey volume without buffer [Mpc/h]^3
elif survey == 'resolveb':
mock_name = 'B'
num_mocks = 104
min_cz = 4500
max_cz = 7000
mag_limit = -17
mstar_limit = 8.7
volume = 4709.8373 # Survey volume without buffer [Mpc/h]^3
phi_arr_total = []
phi_arr_red = []
phi_arr_blue = []
# logmstar_red_max_arr = []
# logmstar_blue_max_arr = []
# colour_err_arr = []
# colour_corr_mat_inv = []
box_id_arr = np.linspace(5001,5008,8)
for box in box_id_arr:
box = int(box)
temp_path = path + '{0}/{1}_m200b_catls/'.format(box,
mock_name)
for num in range(num_mocks):
filename = temp_path + '{0}_cat_{1}_Planck_memb_cat.hdf5'.format(
mock_name, num)
mock_pd = reading_catls(filename)
# Using the same survey definition as in mcmc smf i.e excluding the
# buffer
mock_pd = mock_pd.loc[(mock_pd.cz.values >= min_cz) & \
(mock_pd.cz.values <= max_cz) & (mock_pd.M_r.values <= mag_limit) &\
(mock_pd.logmstar.values >= mstar_limit)]
Mstar_q = 10.5 # Msun/h
Mh_q = 13.76 # Msun/h
mu = 0.69
nu = 0.15
theta = [Mstar_q, Mh_q, mu, nu]
f_red_c, f_red_s = hybrid_quenching_model(theta, mock_pd, 'nonvishnu')
mock_pd = assign_colour_label_mock(f_red_c, f_red_s, mock_pd)
# logmstar_red_max = mock_pd.logmstar.loc[mock_pd.colour_label == 'R'].max()
# logmstar_red_max_arr.append(logmstar_red_max)
# logmstar_blue_max = mock_pd.logmstar.loc[mock_pd.colour_label == 'B'].max()
# logmstar_blue_max_arr.append(logmstar_blue_max)
logmstar_arr = mock_pd.logmstar.values
#Measure SMF of mock using diff_smf function
max_total, phi_total, err_total, bins_total, counts_total = \
diff_smf(logmstar_arr, volume, False)
max_red, phi_red, err_red, bins_red, counts_red = \
diff_smf(mock_pd.logmstar.loc[mock_pd.colour_label.values == 'R'],
volume, False, 'R')
max_blue, phi_blue, err_blue, bins_blue, counts_blue = \
diff_smf(mock_pd.logmstar.loc[mock_pd.colour_label.values == 'B'],
volume, False, 'B')
phi_arr_total.append(phi_total)
phi_arr_red.append(phi_red)
phi_arr_blue.append(phi_blue)
phi_arr_total = np.array(phi_arr_total)
phi_arr_red = np.array(phi_arr_red)
phi_arr_blue = np.array(phi_arr_blue)
# phi_arr_colour = np.append(phi_arr_red, phi_arr_blue, axis = 0)
# Covariance matrix for total phi (all galaxies)
# cov_mat = np.cov(phi_arr_total, rowvar=False) # default norm is N-1
# err_total = np.sqrt(cov_mat.diagonal())
# cov_mat_red = np.cov(phi_arr_red, rowvar=False) # default norm is N-1
# err_red = np.sqrt(cov_mat_red.diagonal())
# colour_err_arr.append(err_red)
# cov_mat_blue = np.cov(phi_arr_blue, rowvar=False) # default norm is N-1
# err_blue = np.sqrt(cov_mat_blue.diagonal())
# colour_err_arr.append(err_blue)
# corr_mat_red = cov_mat_red / np.outer(err_red , err_red)
# corr_mat_inv_red = np.linalg.inv(corr_mat_red)
# colour_corr_mat_inv.append(corr_mat_inv_red)
# corr_mat_blue = cov_mat_blue / np.outer(err_blue , err_blue)
# corr_mat_inv_blue = np.linalg.inv(corr_mat_blue)
# colour_corr_mat_inv.append(corr_mat_inv_blue)
deltav_sig_red, deltav_sig_blue, deltav_sig_cen_red, deltav_sig_cen_blue = \
get_deltav_sigma_mocks_qmcolour(survey, path)
phi_red_0 = phi_arr_red[:,0]
phi_red_1 = phi_arr_red[:,1]
phi_red_2 = phi_arr_red[:,2]
phi_red_3 = phi_arr_red[:,3]
phi_red_4 = phi_arr_red[:,4]
phi_blue_0 = phi_arr_blue[:,0]
phi_blue_1 = phi_arr_blue[:,1]
phi_blue_2 = phi_arr_blue[:,2]
phi_blue_3 = phi_arr_blue[:,3]
phi_blue_4 = phi_arr_blue[:,4]
dv_red_0 = deltav_sig_red[:,0]
dv_red_1 = deltav_sig_red[:,1]
dv_red_2 = deltav_sig_red[:,2]
dv_red_3 = deltav_sig_red[:,3]
dv_red_4 = deltav_sig_red[:,4]
dv_blue_0 = deltav_sig_blue[:,0]
dv_blue_1 = deltav_sig_blue[:,1]
dv_blue_2 = deltav_sig_blue[:,2]
dv_blue_3 = deltav_sig_blue[:,3]
dv_blue_4 = deltav_sig_blue[:,4]
combined_df = pd.DataFrame({'phi_red_0':phi_red_0, 'phi_red_1':phi_red_1,\
'phi_red_2':phi_red_2, 'phi_red_3':phi_red_3, 'phi_red_4':phi_red_4, \
'phi_blue_0':phi_blue_0, 'phi_blue_1':phi_blue_1,
'phi_blue_2':phi_blue_2, 'phi_blue_3':phi_blue_3,
'phi_blue_4':phi_blue_4, \
'dv_red_0':dv_red_0, 'dv_red_1':dv_red_1, 'dv_red_2':dv_red_2, \
'dv_red_3':dv_red_3, 'dv_red_4':dv_red_4, \
'dv_blue_0':dv_blue_0, 'dv_blue_1':dv_blue_1, 'dv_blue_2':dv_blue_2, \
'dv_blue_3':dv_blue_3, 'dv_blue_4':dv_blue_4})
# Correlation matrix of phi and deltav colour measurements combined
corr_mat_colour = combined_df.corr()
corr_mat_inv_colour = np.linalg.inv(corr_mat_colour.values)
err_colour = np.sqrt(np.diag(combined_df.cov()))
# deltav_sig_colour = np.append(deltav_sig_red, deltav_sig_blue, axis = 0)
# cov_mat_colour = np.cov(phi_arr_colour,deltav_sig_colour, rowvar=False)
# err_colour = np.sqrt(cov_mat_colour.diagonal())
# corr_mat_colour = cov_mat_colour / np.outer(err_colour, err_colour)
# corr_mat_inv_colour = np.linalg.inv(corr_mat_colour)
# cov_mat_colour = np.cov(phi_arr_red,phi_arr_blue, rowvar=False)
# err_colour = np.sqrt(cov_mat_colour.diagonal())
# corr_mat_colour = cov_mat_colour / np.outer(err_colour, err_colour)
# corr_mat_inv_colour = np.linalg.inv(corr_mat_colour)
return err_colour, corr_mat_inv_colour
def debug_within_outside_1sig(emcee_table, grp_keys, Mstar_q, Mhalo_q, mu, nu, chi2):
zumandelbaum_param_vals = [10.5, 13.76, 0.69, 0.15]
iteration = 600.0
emcee_table_it600 = emcee_table.loc[emcee_table.iteration_id == iteration]
chi2_std_it600 = np.std(emcee_table_it600.chi2)
chi2_mean_it600 = np.mean(emcee_table_it600.chi2)
# selecting value from within one sigma
df_within_sig = emcee_table_it600.loc[(emcee_table_it600.chi2 < chi2_mean_it600 + chi2_std_it600)&(emcee_table_it600.chi2 > chi2_mean_it600 - chi2_std_it600)]
chi2_within_sig = df_within_sig.chi2.values[3]
mstar_within_sig = df_within_sig.Mstar_q.values[3]
mhalo_within_sig = df_within_sig.Mhalo_q.values[3]
mu_within_sig = df_within_sig.mu.values[3]
nu_within_sig = df_within_sig.nu.values[3]
# # selecting value from outside one sigma
df_outside_sig = emcee_table_it600.loc[emcee_table_it600.chi2 > chi2_mean_it600 + chi2_std_it600]
chi2_outside_sig = df_outside_sig.chi2.values[3]
mstar_outside_sig = df_outside_sig.Mstar_q.values[3]
mhalo_outside_sig = df_outside_sig.Mhalo_q.values[3]
mu_outside_sig = df_outside_sig.mu.values[3]
nu_outside_sig = df_outside_sig.nu.values[3]
fig1, (ax1, ax2, ax3, ax4, ax5) = plt.subplots(5, 1, sharex=True, \
figsize=(10,10))
ax1.plot(grp_keys, Mstar_q[0],c='#941266',ls='--', marker='o')
ax1.axhline(zumandelbaum_param_vals[0],color='lightgray')
ax1.scatter(iteration, mstar_outside_sig, marker='*', c='k', s=70)
ax1.scatter(iteration, mstar_within_sig, marker='o', c='k', s=70)
ax2.plot(grp_keys, Mhalo_q[0], c='#941266',ls='--', marker='o')
ax2.axhline(zumandelbaum_param_vals[1],color='lightgray')
ax2.scatter(iteration, mhalo_outside_sig, marker='*', c='k', s=70)
ax2.scatter(iteration, mhalo_within_sig, marker='o', c='k', s=70)
ax3.plot(grp_keys, mu[0], c='#941266',ls='--', marker='o')
ax3.axhline(zumandelbaum_param_vals[2],color='lightgray')
ax3.scatter(iteration, mu_outside_sig, marker='*', c='k', s=70)
ax3.scatter(iteration, mu_within_sig, marker='o', c='k', s=70)
ax4.plot(grp_keys, nu[0], c='#941266',ls='--', marker='o')
ax4.axhline(zumandelbaum_param_vals[3],color='lightgray')
ax4.scatter(iteration, nu_outside_sig, marker='*', c='k', s=70)
ax4.scatter(iteration, nu_within_sig, marker='o', c='k', s=70)
ax5.plot(grp_keys, chi2[0], c='#941266',ls='--', marker='o')
ax5.scatter(iteration, chi2_outside_sig, marker='*', c='k', s=70)
ax5.scatter(iteration, chi2_within_sig, marker='o', c='k', s=70)
ax1.fill_between(grp_keys, Mstar_q[0]-Mstar_q[1], Mstar_q[0]+Mstar_q[1],
alpha=0.3, color='#941266')
ax2.fill_between(grp_keys, Mhalo_q[0]-Mhalo_q[1], Mhalo_q[0]+Mhalo_q[1], \
alpha=0.3, color='#941266')
ax3.fill_between(grp_keys, mu[0]-mu[1], mu[0]+mu[1], \
alpha=0.3, color='#941266')
ax4.fill_between(grp_keys, nu[0]-nu[1], nu[0]+nu[1], \
alpha=0.3, color='#941266')
ax5.fill_between(grp_keys, chi2[0]-chi2[1], chi2[0]+chi2[1], \
alpha=0.3, color='#941266')
ax1.set_ylabel(r"$\mathbf{log_{10}\ M^{q}_{*}}$")
ax2.set_ylabel(r"$\mathbf{log_{10}\ M^{q}_{h}}$")
ax3.set_ylabel(r"$\boldsymbol{\mu}$")
# ax4.set_ylabel(r"$\mathbf{log_{10}} \boldsymbol{\ \nu}$")
ax4.set_ylabel(r"$\boldsymbol{\nu}$")
ax5.set_ylabel(r"$\mathbf{log_{10}} \boldsymbol{{\ \chi}^2}$")
# ax5.set_ylabel(r"$\boldsymbol{{\chi}^2}$")
# ax1.set_yscale('log')
# ax2.set_yscale('log')
ax1.annotate(zumandelbaum_param_vals[0], (0.95,0.85), xycoords='axes fraction',
bbox=dict(boxstyle="square", ec='k', fc='lightgray', alpha=0.5), size=10)
ax2.annotate(zumandelbaum_param_vals[1], (0.95,0.85), xycoords='axes fraction',
bbox=dict(boxstyle="square", ec='k', fc='lightgray', alpha=0.5), size=10)
ax3.annotate(zumandelbaum_param_vals[2], (0.95,0.85), xycoords='axes fraction',
bbox=dict(boxstyle="square", ec='k', fc='lightgray', alpha=0.5), size=10)
ax4.annotate(zumandelbaum_param_vals[3], (0.95,0.85), xycoords='axes fraction',
bbox=dict(boxstyle="square", ec='k', fc='lightgray', alpha=0.5), size=10)
plt.xlabel(r"$\mathbf{iteration\ number}$")
plt.show()
dict_of_paths = cwpaths.cookiecutter_paths()
path_to_raw = dict_of_paths['raw_dir']
path_to_proc = dict_of_paths['proc_dir']
path_to_interim = dict_of_paths['int_dir']
path_to_figures = dict_of_paths['plot_dir']
path_to_data = dict_of_paths['data_dir']
catl_file = path_to_raw + "eco/eco_all.csv"
path_to_mocks = path_to_data + 'mocks/m200b/eco/'
randint_logmstar_file = pd.read_csv("/Users/asadm2/Desktop/randint_logmstar.txt",
header=None)
mock_num = randint_logmstar_file[0].values[int(iteration)-1]
gals_df_ = reading_catls(path_to_proc + "gal_group.hdf5")
theta_within = [mstar_within_sig, mhalo_within_sig, mu_within_sig, nu_within_sig]
f_red_cen, f_red_sat = hybrid_quenching_model(theta_within, gals_df_, 'vishnu', \
mock_num)
gals_df = assign_colour_label_mock(f_red_cen, f_red_sat, gals_df_)
v_sim = 130**3
total_model, red_model, blue_model = measure_all_smf(gals_df, v_sim
, False, mock_num)
sig_red_within, sig_blue_within, cen_red_within, cen_blue_within = \
get_deltav_sigma_vishnu_qmcolour(gals_df, mock_num)
total_model_within, red_model_within, blue_model_within = total_model, \
red_model, blue_model
theta_outside = [mstar_outside_sig, mhalo_outside_sig, mu_outside_sig, \
nu_outside_sig]
f_red_cen, f_red_sat = hybrid_quenching_model(theta_outside, gals_df_, 'vishnu', \
mock_num)
gals_df = assign_colour_label_mock(f_red_cen, f_red_sat, gals_df_)
v_sim = 130**3
total_model, red_model, blue_model = measure_all_smf(gals_df, v_sim
, False, mock_num)
sig_red_outside, sig_blue_outside, cen_red_outside, cen_blue_outside = \
get_deltav_sigma_vishnu_qmcolour(gals_df, mock_num)
total_model_outside, red_model_outside, blue_model_outside = total_model, \
red_model, blue_model
catl, volume, z_median = read_data_catl(catl_file, survey)
catl = assign_colour_label_data(catl)
total_data, red_data, blue_data = measure_all_smf(catl, volume, True)
std_red, centers_red, std_blue, centers_blue = get_deltav_sigma_data(catl)
sigma, corr_mat_inv = get_err_data(survey, path_to_mocks)
plt.clf()
plt.plot(total_model_within[0], total_model_within[1], c='k', linestyle='-', \
label='total within 1sig')
plt.plot(total_model_outside[0], total_model_outside[1], c='k', linestyle='--',\
label='total outside 1sig')
plt.plot(red_model_within[0], red_model_within[1], color='maroon',
linestyle='--', label='within 1sig')
plt.plot(blue_model_within[0], blue_model_within[1], color='mediumblue',
linestyle='--', label='within 1sig')
plt.plot(red_model_outside[0], red_model_outside[1], color='indianred',
linestyle='--', label='outside 1sig')
plt.plot(blue_model_outside[0], blue_model_outside[1], color='cornflowerblue',
linestyle='--', label='outside 1sig')
plt.errorbar(x=red_data[0], y=red_data[1], yerr=sigma[0:5], xerr=None,
color='r', label='data')
plt.errorbar(x=blue_data[0], y=blue_data[1], yerr=sigma[5:10], xerr=None,
color='b', label='data')
plt.xlabel(r'\boldmath$\log_{10}\ M_\star \left[\mathrm{M_\odot}\, \mathrm{h}^{-1} \right]$', fontsize=20)
plt.ylabel(r'\boldmath$\Phi \left[\mathrm{dex}^{-1}\,\mathrm{Mpc}^{-3}\,\mathrm{h}^{3} \right]$', fontsize=20)
plt.legend(loc='best')
plt.title('ECO SMF')
plt.show()
plt.clf()
plt.plot(max_total, phi_total, c='k')
plt.xlabel(r'\boldmath$\log_{10}\ M_\star \left[\mathrm{M_\odot}\, \mathrm{h}^{-1} \right]$', fontsize=20)
plt.ylabel(r'\boldmath$\Phi \left[\mathrm{dex}^{-1}\,\mathrm{Mpc}^{-3}\,\mathrm{h}^{3} \right]$', fontsize=20)
plt.legend(loc='best')
plt.title('ECO SMF')
plt.show()
plt.clf()
plt.scatter(cen_red_within, sig_red_within, c='maroon', label='within 1sig')
plt.scatter(cen_red_outside, sig_red_outside, c='indianred', label='outside 1sig')
plt.scatter(cen_blue_within, sig_blue_within, c='mediumblue', label='within 1sig')
plt.scatter(cen_blue_outside, sig_blue_outside, c='cornflowerblue', \
label='outside 1sig')
plt.errorbar(x=centers_red, y=std_red, yerr=sigma[10:15], xerr=None, color='r',\
label='data', fmt='')
plt.errorbar(x=centers_blue, y=std_blue, yerr=sigma[15:20], xerr=None, \
color='b', label='data', fmt='')
plt.xlabel(r'\boldmath$\log_{10}\ M_\star \left[\mathrm{M_\odot}\, \mathrm{h}^{-1} \right]$', fontsize=20)
plt.ylabel(r'$\sigma$')
plt.legend(loc='best')
plt.title(r'ECO spread in $\delta v$')
plt.show()
| 2.296875 | 2 |
api/app/controllers/api/__init__.py | riszkymf/RESTKnot | 0 | 12772109 | from flask import Blueprint
from flask_restful import Api
from .user import *
from .auth import *
from .zone import *
from .type import *
from .ttl import *
from .record import *
from .ttldata import *
from .content import *
from .content_serial import *
from .dns.create import *
from .command_rest import *
from .admin.auth import *
from .admin.create import *
from .cs_master import *
from .cs_slave_node import *
from .cluster import *
from .check_on import *
from .health import *
api_blueprint = Blueprint("api", __name__, url_prefix='/api')
api = Api(api_blueprint)
api.add_resource(UserdataResource, '/user')
api.add_resource(UserdataResourceById, '/user/<userdata_id>')
api.add_resource(UserdataInsert, '/user')
api.add_resource(UserdataUpdate, '/user/<userdata_id>')
api.add_resource(UserdataResourceByProjectId, '/user/project/<project_id>')
api.add_resource(UserdataResourceByUserId, '/user/id/<user_id>')
api.add_resource(UserdataRemove, '/user/<userdata_id>')
api.add_resource(Usersignin,"/login")
api.add_resource(UserDataZoneInsert,"/userzone")
api.add_resource(UserDataZoneResource,"/userzone")
## DNS API
api.add_resource(ZoneName, '/zone')
api.add_resource(Type, '/type')
api.add_resource(TtlName, '/ttl')
api.add_resource(Record, '/record')
api.add_resource(TtlData, '/ttldata')
api.add_resource(Content, '/content')
api.add_resource(ContentSerial, '/content_serial')
api.add_resource(SendCommandRest, '/sendcommand')
api.add_resource(CreateDNS, '/user/dnscreate')
## ADMIN AUTH
api.add_resource(AdminAuth, '/admin/login')
api.add_resource(CreateDNSAdminRole, '/admin/dnscreate')
## CLUSTERING
api.add_resource(CsMaster,'/master')
api.add_resource(RefreshZoneMaster, '/master/refresh/<id_refresh>')
api.add_resource(CsSlave,'/slave_node')
api.add_resource(RefreshZoneSlave, "/slave_node/refresh/<id_refresh>")
## CLUSTER
api.add_resource(ClusterCheckMaster, '/cluster/master/<id_master>')
api.add_resource(ClusterCheckSlave, '/cluster/slave/<id_slave>')
api.add_resource(ClusterUnsetCheckMaster, '/cluster/unset/master/<id_master>')
api.add_resource(ClusterUnsetCheckSlave, '/cluster/unset/slave/<id_slave>')
# CHECK ON
api.add_resource(NotifyOnAgent, "/agent/check")
api.add_resource(ChekcLogSyncOnMaster, '/agent/master/<id_logs>')
api.add_resource(CheckLogSyncOnSlave, '/agent/slave/<id_logs>')
# Healtch Check
api.add_resource(HealthCheck, "/health")
| 1.960938 | 2 |
demo/check_cluster_data_structure_consistency.py | spyke/spyke | 22 | 12772110 | <reponame>spyke/spyke<gh_stars>10-100
# code to run in IPython shell to test whether clustering info in spikes struct array and in
# the neurons dict is consistent:
for nid in sorted(self.sort.neurons):
print(nid, (self.sort.neurons[nid].sids == np.where(self.sort.spikes['nid'] == nid)[0]).all())
| 2.25 | 2 |
twitch_online.py | lbatalha/twitch_online.py | 0 | 12772111 | #!/bin/env python3
import os, sys, argparse
import requests, yaml
default_temp_file = '/tmp/twitch_online.token'
default_auth_file = os.path.join(os.environ['XDG_CONFIG_HOME'], 'twitch_online.creds')
parser = argparse.ArgumentParser(description="CLI Utility to check if a twitch channel is streaming", \
epilog="The credentials file should be a simple yaml file with `client_id` and `client_secret` variables")
parser.add_argument('-a', '--auth-file', default=default_auth_file, help="File used to store credentials")
parser.add_argument('-t', '--temp-file', default=default_temp_file, help="File used to store temporary auth token")
parser.add_argument('CHANNEL', help="Channel to check")
args = vars(parser.parse_args())
token_file = args['temp_file']
# Init temp token file
if not os.path.isfile(token_file):
with open(token_file, 'w') as f:
f.write('')
# only the calling user should be able to read this
os.chmod(token_file, 0o600)
# Handle reading config yaml
with open(args['auth_file'], 'r') as f:
try:
config = yaml.safe_load(f)
client_id = config['client_id']
client_secret = config['client_secret']
os.chmod(token_file, 0o600)
except yaml.YAMLError as pe:
print("Failed to parse credentials file", args['auth_file'])
print(pe)
sys.exit(128)
def check_status():
with open(token_file, 'r') as f:
auth_token = f.read(256).rstrip('\n')
auth_headers = {'Authorization': 'Bearer ' + auth_token, \
'Client-Id': client_id}
r = requests.get('https://api.twitch.tv/helix/streams', \
params={'user_login': args['CHANNEL']}, \
headers=auth_headers)
return r
def authenticate():
auth = requests.post('https://id.twitch.tv/oauth2/token', params={
'client_id': client_id,
'client_secret': client_secret,
'grant_type': 'client_credentials',
})
auth.raise_for_status()
auth_data = auth.json()
with open(token_file, 'w') as f:
f.write(auth_data['access_token'])
def main():
response = check_status()
if response.status_code == 401:
sys.stderr.write("Authentication failed, re-authenticating")
authenticate()
response = check_status()
elif response.status_code == 200:
if response.json()['data'] and response.json()['data'][0]['type'] == 'live':
sys.exit(0)
else:
sys.exit(1)
else:
sys.stderr.write("Unhandled API status code: " + response.status_code)
sys.stderr.write(response.text)
sys.exit(2)
if __name__ == "__main__":
main()
| 2.71875 | 3 |
tprmp/envs/gym.py | anindex/tp-rmp | 7 | 12772112 | <gh_stars>1-10
from os.path import join, dirname, realpath
import tempfile
import logging
import gym
import numpy as np
import pybullet as p
import time
from tprmp.demonstrations.manifold import Manifold
from tprmp.demonstrations.quaternion import q_convert_wxyz, q_convert_xyzw
from tprmp.utils.threading import threaded
_path_file = dirname(realpath(__file__))
ASSETS_PATH = join(_path_file, '..', '..', 'data', 'assets')
UR5_URDF_PATH = join(ASSETS_PATH, 'ur5', 'ur5.urdf')
UR5_WORKSPACE_URDF_PATH = join(ASSETS_PATH, 'ur5', 'workspace.urdf')
PLANE_URDF_PATH = join(ASSETS_PATH, 'plane', 'plane.urdf')
NUM_JOINTS = 6
NUM_LINKS = 10
VEL_LIMIT = 10
ACCEL_LIMIT = 5
class Environment(gym.Env):
"""OpenAI Gym-style environment class."""
logger = logging.getLogger(__name__)
def __init__(self, **kwargs):
"""Creates OpenAI Gym-style environment with PyBullet.
Parameters
----------
:param task: the task to use. If None, the user must call set_task for the
environment to work properly.
:param disp: show environment with PyBullet's built-in display viewer.
:param shared_memory: run with shared memory.
:param manifold: manifold of task space
:param sampling_hz: Sampling freq.
Raises:
-------
RuntimeError: if pybullet cannot load fileIOPlugin.
"""
task = kwargs.get('task', None)
disp = kwargs.get('disp', False)
shared_memory = kwargs.get('shared_memory', False)
self.sampling_hz = kwargs.get('sampling_hz', 100)
self.real_time_step = kwargs.get('real_time_step', False)
self.manifold = kwargs.get('manifold', None)
if self.manifold is None:
self.manifold = Manifold.get_manifold_from_name('R^3 x S^3') # 6-DoFs
self.home_joint = np.array([-1, -0.5, 0.5, -0.5, -0.5, 0], dtype=np.float32) * np.pi
self.moving = False # this flag for recording trajectory
self.observation_space = gym.spaces.Dict({
'ee_pose': # position and normalized quaternion
gym.spaces.Box(low=np.array([0., 0., 0., -1., -1., -1., -1.], dtype=np.float32),
high=np.array([5., 5., 5., 1., 1., 1., 1.], dtype=np.float32),
shape=(7,),
dtype=np.float32),
'ee_vel':
gym.spaces.Box(low=-VEL_LIMIT * np.ones(6, dtype=np.float32),
high=VEL_LIMIT * np.ones(6, dtype=np.float32),
shape=(6,),
dtype=np.float32),
'config': # working with UR5
gym.spaces.Box(low=-np.ones(6, dtype=np.float32) * np.pi,
high=np.ones(6, dtype=np.float32) * np.pi,
shape=(6,),
dtype=np.float32),
'config_vel':
gym.spaces.Box(low=-VEL_LIMIT * np.ones(6, dtype=np.float32),
high=VEL_LIMIT * np.ones(6, dtype=np.float32),
shape=(6,),
dtype=np.float32),
'ft':
gym.spaces.Discrete(6) # TODO: implement FT sensor later
})
self.position_bounds = gym.spaces.Box(low=np.array([0., 0., 0.], dtype=np.float32),
high=np.array([5., 5., 5.], dtype=np.float32),
shape=(3,),
dtype=np.float32)
self.action_space = gym.spaces.Box(low=-ACCEL_LIMIT * np.ones(6, dtype=np.float32),
high=ACCEL_LIMIT * np.ones(6, dtype=np.float32),
shape=(6,),
dtype=np.float32)
# Start PyBullet.
disp_option = p.DIRECT
if disp:
disp_option = p.GUI
if shared_memory:
disp_option = p.SHARED_MEMORY
client = p.connect(disp_option)
file_io = p.loadPlugin('fileIOPlugin', physicsClientId=client)
if file_io < 0:
raise RuntimeError('pybullet: cannot load FileIO!')
if file_io >= 0:
p.executePluginCommand(file_io,
textArgument=ASSETS_PATH,
intArgs=[p.AddFileIOAction],
physicsClientId=client)
p.configureDebugVisualizer(p.COV_ENABLE_GUI, 0)
p.setPhysicsEngineParameter(enableFileCaching=0)
p.setAdditionalSearchPath(ASSETS_PATH)
p.setAdditionalSearchPath(tempfile.gettempdir())
p.setTimeStep(1. / self.sampling_hz)
# If using --disp, move default camera closer to the scene.
if disp:
if self.real_time_step:
p.setRealTimeSimulation(1)
target = p.getDebugVisualizerCamera()[11]
p.resetDebugVisualizerCamera(cameraDistance=1.1,
cameraYaw=90,
cameraPitch=-25,
cameraTargetPosition=target)
if task:
self.set_task(task) # TODO: implement set_task function
self.reset()
def get_joint_states(self, np_array=False):
joint_states = p.getJointStates(self.ur5, self.joints)
j = [state[0] for state in joint_states]
j_vel = [state[1] for state in joint_states]
j_torque = [state[3] for state in joint_states]
if np_array:
j, j_vel, j_torque = np.array(j), np.array(j_vel), np.array(j_torque)
return j, j_vel, j_torque
def compute_ee_jacobian(self, joint_states=None):
if joint_states is None:
j, j_vel, j_torque = self.get_joint_states()
else:
j, j_vel, j_torque = joint_states
return p.calculateJacobian(self.ur5, self.ee_tip, np.zeros(3), j, j_vel, j_torque)
def forward_kinematics(self):
ee_state = p.getLinkState(self.ur5, self.ee_tip, computeForwardKinematics=True)
ee = np.array(ee_state[0] + ee_state[1]) # [x, y, z, x, y, z, w]
return ee
def set_task(self, task):
self.task = task
def add_object(self, urdf, pose, category='rigid'):
"""List of (fixed, rigid, or deformable) objects in env. Assuming abs urdf path"""
fixed_base = 1 if category == 'fixed' else 0
obj_id = p.loadURDF(urdf, pose[:3], pose[3:], useFixedBase=fixed_base)
self.obj_ids[category].append(obj_id)
return obj_id
def movej(self, targj, speed=0.01, timeout=5, direct=False, wait=0.):
"""Move UR5 to target joint configuration."""
self.moving = True
if direct: # should work with RealTimeSimulation
gains = np.ones(len(self.joints))
p.setJointMotorControlArray(bodyIndex=self.ur5,
jointIndices=self.joints,
controlMode=p.POSITION_CONTROL,
targetPositions=targj,
positionGains=gains)
self.moving = False
return True
else:
t0 = time.time()
currj, _, _ = self.get_joint_states(np_array=True)
alpha = np.linspace(0., 1., int(1 / speed))
points = np.outer(alpha, targj) + np.outer((1 - alpha), currj)
gains = np.ones(len(self.joints))
steps = len(points)
i = 0
while i < steps:
if ((time.time() - t0) > timeout):
Environment.logger.warn(f'movej exceeded {timeout} second timeout. Skipping.')
self.moving = False
return False
p.setJointMotorControlArray(bodyIndex=self.ur5,
jointIndices=self.joints,
controlMode=p.POSITION_CONTROL,
targetPositions=points[i],
positionGains=gains)
if not self.real_time_step:
p.stepSimulation()
i += 1
if wait > 0.: # to record stable points
now = time.time()
while (time.time() - now) < wait:
pass
self.moving = False
return True
def movep(self, pose, speed=0.01, timeout=5, direct=False, wait=0.):
"""Move UR5 to target end effector pose."""
targj = self.solve_ik(pose)
return self.movej(targj, speed, timeout, direct, wait=wait)
def setp(self, pose):
"""This should work with p.stepSimulation()"""
targj = self.solve_ik(pose)
self.setj(targj)
def setj(self, targj):
"""This should work with p.stepSimulation()"""
for i in range(len(self.joints)):
p.resetJointState(self.ur5, self.joints[i], targj[i])
if not self.real_time_step:
p.stepSimulation()
def solve_ik(self, pose):
"""Calculate joint configuration with inverse kinematics."""
joints = p.calculateInverseKinematics(
bodyUniqueId=self.ur5,
endEffectorLinkIndex=self.ee_tip,
targetPosition=pose[:3],
targetOrientation=pose[3:],
lowerLimits=[-3 * np.pi / 2, -2.3562, -17, -17, -17, -17],
upperLimits=[-np.pi / 2, 0, 17, 17, 17, 17],
jointRanges=[np.pi, 2.3562, 34, 34, 34, 34], # * 6,
restPoses=np.float32(self.home_joint).tolist(),
maxNumIterations=100,
residualThreshold=1e-5)
joints = np.float32(joints)
joints[2:] = (joints[2:] + np.pi) % (2 * np.pi) - np.pi
return joints
def seed(self, seed=None):
self._random = np.random.RandomState(seed)
return seed
def reset(self):
"""Performs common reset functionality for all supported tasks."""
if not self.task:
raise ValueError('environment task must be set. Call set_task or pass '
'the task arg in the environment constructor.')
self.obj_ids = {'fixed': [], 'rigid': [], 'deformable': []}
p.resetSimulation(p.RESET_USE_DEFORMABLE_WORLD)
p.setGravity(0, 0, -9.8)
# Temporarily disable rendering to load scene faster.
p.configureDebugVisualizer(p.COV_ENABLE_RENDERING, 0)
p.loadURDF(PLANE_URDF_PATH, [0, 0, -0.001])
p.loadURDF(UR5_WORKSPACE_URDF_PATH, [0.5, 0, 0])
# Load UR5 robot arm equipped with suction end effector.
self.ur5 = p.loadURDF(UR5_URDF_PATH)
self.ee = self.task.ee(self.ur5, NUM_LINKS - 1, self.obj_ids)
self.ee_tip = NUM_LINKS # Link ID of suction cup.
# Get revolute joint indices of robot (skip fixed joints).
n_joints = p.getNumJoints(self.ur5)
joints = [p.getJointInfo(self.ur5, i) for i in range(n_joints)]
self.joints = [j[0] for j in joints if j[2] == p.JOINT_REVOLUTE]
# Move robot to home joint configuration.
for i in range(len(self.joints)):
p.resetJointState(self.ur5, self.joints[i], self.home_joint[i])
# Reset end effector.
self.ee.release()
# Reset task.
self.task.reset(self)
# Re-enable rendering.
p.configureDebugVisualizer(p.COV_ENABLE_RENDERING, 1)
self.home_pose = self.forward_kinematics()
# a hack to bypass computing Jacobian at step(), these variables are perfect state tracking
self._ee_pose = self.forward_kinematics()
self._ee_vel = np.zeros(6, dtype=np.float32)
self._config = self.home_joint
self._config_vel = np.zeros(6, dtype=np.float32)
def step(self, action=None, return_data=True, config_space=False):
"""Execute action with acceleration (deterministic).
Parameters
----------
:param action: action to execute.
Returns:
--------
(obs, reward, done, info) tuple containing MDP step data.
"""
if action is None:
return self.robot_state, 0, False, {}
dt = 1 / self.sampling_hz
if config_space:
j, j_vel = self.config, self.config_vel
j_vel += action * dt
j += j_vel * dt
self._config = j
self._config_vel = j_vel
self.setj(j)
ee = self.forward_kinematics()
zeros_v = [0.] * len(j)
joint_states = (j.tolist(), zeros_v, zeros_v)
J_pos, J_rot = self.compute_ee_jacobian(joint_states)
J_pos, J_rot = np.array(J_pos), np.array(J_rot)
self._ee_pose = ee
self._ee_vel = np.append(J_pos @ j_vel, J_rot @ j_vel)
else:
ee, ee_vel = self.ee_pose, self.ee_vel # NOTE: self.ee_vel is a hack to bypass computing Jacobian, hence config_vel is not updated
ee_vel += action * dt
ee_wxyz = np.append(ee[:3], q_convert_wxyz(ee[3:]))
ee_wxyz = self.manifold.exp_map(ee_vel * dt, base=ee_wxyz)
ee = np.append(ee_wxyz[:3], q_convert_xyzw(ee_wxyz[3:]))
self.setp(ee)
self._ee_pose = ee
self._ee_vel = ee_vel
if return_data:
# Get task rewards.
reward, info = self.task.reward() if action is not None else (0, {})
done = self.task.done()
# Add ground truth robot state into info.
info.update(self.info)
obs = self.robot_state
return obs, reward, done, info
def render(self, mode='rgb_array'):
# Render only the color image from the first camera.
# Only support rgb_array for now.
if mode != 'rgb_array':
raise NotImplementedError('Only rgb_array implemented')
color, _, _ = self.render_camera(self.agent_cams[0])
return color
def render_camera(self, config):
"""Render RGB-D image with specified camera configuration."""
# OpenGL camera settings.
lookdir = np.float32([0, 0, 1]).reshape(3, 1)
updir = np.float32([0, -1, 0]).reshape(3, 1)
rotation = p.getMatrixFromQuaternion(config['rotation'])
rotm = np.float32(rotation).reshape(3, 3)
lookdir = (rotm @ lookdir).reshape(-1)
updir = (rotm @ updir).reshape(-1)
lookat = config['position'] + lookdir
focal_len = config['intrinsics'][0]
znear, zfar = config['zrange']
viewm = p.computeViewMatrix(config['position'], lookat, updir)
fovh = (config['image_size'][0] / 2) / focal_len
fovh = 180 * np.arctan(fovh) * 2 / np.pi
# Notes: 1) FOV is vertical FOV 2) aspect must be float
aspect_ratio = config['image_size'][1] / config['image_size'][0]
projm = p.computeProjectionMatrixFOV(fovh, aspect_ratio, znear, zfar)
# Render with OpenGL camera settings.
_, _, color, depth, segm = p.getCameraImage(
width=config['image_size'][1],
height=config['image_size'][0],
viewMatrix=viewm,
projectionMatrix=projm,
shadow=1,
flags=p.ER_SEGMENTATION_MASK_OBJECT_AND_LINKINDEX,
renderer=p.ER_BULLET_HARDWARE_OPENGL)
# Get color image.
color_image_size = (config['image_size'][0], config['image_size'][1],
4)
color = np.array(color, dtype=np.uint8).reshape(color_image_size)
color = color[:, :, :3] # remove alpha channel
if config['noise']:
color = np.int32(color)
color += np.int32(self._random.normal(0, 3, config['image_size']))
color = np.uint8(np.clip(color, 0, 255))
# Get depth image.
depth_image_size = (config['image_size'][0], config['image_size'][1])
zbuffer = np.array(depth).reshape(depth_image_size)
depth = (zfar + znear - (2. * zbuffer - 1.) * (zfar - znear))
depth = (2. * znear * zfar) / depth
if config['noise']:
depth += self._random.normal(0, 0.003, depth_image_size)
# Get segmentation image.
segm = np.uint8(segm).reshape(depth_image_size)
return color, depth, segm
@threaded
def record_trajectory(self):
'''This function works with self.movep()'''
traj = []
traj_vel = []
# wait for robot to move
while not self.moving:
pass
while self.moving:
state = self.robot_state
traj.append(state['ee_pose'])
traj_vel.append(state['ee_vel'])
time.sleep(1 / self.sampling_hz)
traj = np.vstack(traj).T
traj_vel = np.vstack(traj_vel).T
return traj, traj_vel
@property
def is_static(self):
"""Return true if objects are no longer moving."""
v = [np.linalg.norm(p.getBaseVelocity(i)[0]) for i in self.obj_ids['rigid']]
return all(np.array(v) < 5e-3)
@property
def info(self):
"""Environment info variable with object poses, dimensions, and colors."""
info = {} # object id : (position, rotation, dimensions)
for obj_ids in self.obj_ids.values():
for obj_id in obj_ids:
pos, rot = p.getBasePositionAndOrientation(obj_id)
dim = p.getVisualShapeData(obj_id)[0][3]
info[obj_id] = (pos, rot, dim)
return info
@property
def robot_state(self):
'''Simulation state'''
ee_state = p.getLinkState(self.ur5, self.ee_tip, computeLinkVelocity=True, computeForwardKinematics=True) # index of gripper is NUM_LINKS
ee = np.array(ee_state[0] + ee_state[1]) # [x, y, z, x, y, z, w]
ee_vel = np.array(ee_state[6] + ee_state[7]) # ee velocity
j, j_vel, _ = self.get_joint_states()
state = {'ee_pose': ee, 'ee_vel': ee_vel, 'config': j, 'config_vel': j_vel}
return state
@property
def ee_pose(self):
return self._ee_pose
@property
def ee_vel(self):
return self._ee_vel
@property
def config(self):
return self._config
@property
def config_vel(self):
return self._config_vel
| 2.09375 | 2 |
sol/sol_sum67.py | igamberdievhasan/codingbat-notebooks | 0 | 12772113 | def sum67(nums):
count = 0
blocked = False
for n in nums:
if n == 6:
blocked = True
continue
if n == 7 and blocked:
blocked = False
continue
if not blocked:
count += n
return count
| 3.515625 | 4 |
curator/validators/config_file.py | rprabhat/curator | 1 | 12772114 | from voluptuous import *
from ..defaults import client_defaults
def client():
return Schema(
{
Optional('client'): client_defaults.config_client(),
Optional('logging'): client_defaults.config_logging(),
}
)
| 1.625 | 2 |
app/model/authtoken.py | Stanford-PERTS/neptune | 0 | 12772115 | <filename>app/model/authtoken.py
"""AuthToken: Temporary code for a user to authenticate, confirm
email, or change their password from a link.
"""
from google.appengine.ext import ndb
import datetime
import logging
from gae_models import DatastoreModel
class AuthToken(DatastoreModel):
"""Acts as a one-time-pass for a user to take some sensitive action.
Children of a user. The "token" is just the uid of a AuthToken entity.
"""
duration = ndb.IntegerProperty(default=48) # number of hours
token = ndb.ComputedProperty(lambda self: self.short_uid)
@classmethod
def create(klass, user_id, duration=None):
klass.clear_all_tokens_for_user(user_id)
user_key = DatastoreModel.id_to_key(user_id)
if duration is None:
duration = klass.duration._default
return super(klass, klass).create(parent=user_key, duration=duration)
@classmethod
def create_or_renew(klass, user_id, duration=None):
"""Attempt to find and renew a valid token, else create one."""
logging.info("AuthToken.create_or_renew()")
valid_token = None
for t in klass.get_all_tokens_for_user(user_id):
if not t.is_expired():
valid_token = t
valid_token.created = datetime.datetime.utcnow()
valid_token.put()
break
return valid_token or klass.create(user_id, duration)
@classmethod
def check_token_string(klass, token_string):
"""Validate a token in a password reset URL.
Returns: tuple as (user, error)
* user is a User entity or None
* error is a string:
- 'not found' if the token doesn't exist
- 'used' if the token does exist but has deleted = True
- 'expired' if the token exists, is not delete, but has expired
"""
# Bypass DatastoreModel.get_by_id to make sure we pick up any entites that have
# deleted = True.
user = None
error = None
# The token string is user input; treat with caution b/c it may not
# be a valid/interpretable uid.
key = AuthToken.id_to_key(token_string)
if not key:
error = 'not found'
return (user, error)
token = key.get()
if not token:
error = 'not found'
elif token.deleted:
error = 'used'
elif token.is_expired():
error = 'expired'
else:
user = token.get_user()
return (user, error)
@classmethod
def mark_as_used(klass, token_string):
t = AuthToken.get_by_id(token_string)
t.deleted = True
t.put()
@classmethod
def clear_all_tokens_for_user(klass, user_id):
"""Delete all tokens for a given user."""
tokens_to_put = []
for t in klass.get_all_tokens_for_user(user_id):
t.deleted = True
tokens_to_put.append(t)
ndb.put_multi(tokens_to_put)
@classmethod
def get_all_tokens_for_user(klass, user_id):
user_key = DatastoreModel.id_to_key(user_id)
results = AuthToken.get(n=float('inf'), ancestor=user_key)
return results
@classmethod
def get_long_uid(klass, short_uid):
return super(klass, klass).get_long_uid(
short_uid, kinds=(klass.__name__, 'User'))
def is_expired(self):
duration = datetime.timedelta(hours=self.duration)
# Use utcnow() b/c our unit tests use local python runtimes, which may
# vary in timezone setting, while the production App Engine runtime is
# always in UTC. This forces everything to UTC.
return datetime.datetime.utcnow() - self.created > duration
def get_user(self):
return self.key.parent().get()
| 3.1875 | 3 |
1/DayOne.py | samhallam18/aoc-2019 | 1 | 12772116 | import math
total = 0
file = open("fuel.txt")
for line in file:
calc = math.floor(int(line) / 3) - 2
temp_total = calc
while calc > 0:
calc = math.floor(int(calc) / 3) - 2
if calc > 0:
temp_total += calc
total += temp_total
print(total)
file.close()
| 3.546875 | 4 |
receives/helper.py | felixsch/receives | 0 | 12772117 | <gh_stars>0
import inspect
def current_module():
frame = inspect.stack()[1]
return inspect.getmodule(frame[0])
| 1.3125 | 1 |
question/urls.py | RohanDukare/OnlineVoting | 7 | 12772118 | <filename>question/urls.py
from question.views import QA,review
from django.conf.urls import url
urlpatterns = [
url(r'^qa/$', QA),
url(r'^review/$', review),
]
| 1.765625 | 2 |
Server/django-utils-master/djutils/utils/http.py | TheInventorMan/IntelligentTrafficManagement | 3 | 12772119 | <reponame>TheInventorMan/IntelligentTrafficManagement
import httplib2
import re
import socket
from urllib import urlencode
from django.conf import settings
from django.http import HttpResponse, HttpResponseRedirect
from django.utils import simplejson
def fetch_url(url, parameters=None, http_method="GET", follow_redirects=True,
timeout=4, user_agent='python-httplib2'):
"""
Fetch data at the given URL
"""
sock = httplib2.Http(timeout=timeout)
sock.follow_redirects = follow_redirects
request_headers = {'User-Agent': user_agent}
try:
if http_method not in ('GET', 'HEAD'):
request_headers['Content-type'] = 'application/x-www-form-urlencoded'
post_data = urlencode(parameters or {})
headers, response = sock.request(
url,
http_method,
post_data,
headers=request_headers
)
else:
if parameters:
url = '%s?%s' % (url, urlencode(parameters))
headers, response = sock.request(url, headers=request_headers)
except socket.timeout:
raise ValueError('Socket timed out')
if headers['status'] not in ('200', 200):
raise ValueError('Returned status: %s' % (headers['status']))
return response
def json_response(context_dictionary):
"""
Convert a python dictionary in a JSON HttpResponse
"""
payload = simplejson.dumps(context_dictionary)
mimetype = settings.DEBUG and 'text/javascript' or 'application/json'
return HttpResponse(payload, mimetype=mimetype)
def next_redirect(request, fallback='/'):
redirect_to = request.REQUEST.get('next', '')
if not redirect_to or ' ' in redirect_to:
redirect_to = fallback
# Heavier security check -- redirects to http://example.com should
# not be allowed, but things like /view/?param=http://example.com
# should be allowed. This regex checks if there is a '//' *before* a
# question mark.
elif '//' in redirect_to and re.match(r'[^\?]*//', redirect_to):
redirect_to = fallback
return HttpResponseRedirect(redirect_to)
| 2.421875 | 2 |
evaluation.py | Djmon91/3D-Unet | 12 | 12772120 | #coding:utf-8
"""
@auther tk0103
@date 2018-07-04
"""
import os, time, sys, copy
import numpy as np
import chainer
import chainer.links as L
import chainer.functions as F
from chainer.dataset import concat_examples
class Unet3DEvaluator(chainer.training.extensions.Evaluator):
def __init__(self, iterator, unet, number_of_label, converter=concat_examples, device=None, eval_hook=None):
if isinstance(iterator, chainer.dataset.Iterator):
iterator = {'main': iterator}
self._iterators = iterator
self._targets = {'unet':unet}
self.converter = converter
self.device = device
self.eval_hook = eval_hook
self._max_label = number_of_label
def loss_softmax_cross_entropy(self, predict, ground_truth):
eps = 1e-16
cross_entropy = -F.mean(F.log(predict+eps) * ground_truth)
return cross_entropy
def dice_coefficent(self, predict, ground_truth):
'''
Assume label 0 is background
'''
dice_numerator = 0.0
dice_denominator = 0.0
eps = 1e-16
predict = F.flatten(predict[:,1:self._max_label,:,:,:])
ground_truth = F.flatten(ground_truth[:,1:self._max_label,:,:,:].astype(np.float32))
dice_numerator = F.sum(predict * ground_truth)
dice_denominator =F.sum(predict+ ground_truth)
dice = 2*dice_numerator/(dice_denominator+eps)
return dice
def evaluate(self):
iterator = self._iterators['main']
unet = self._targets['unet']
#eval_func = self.eval_func or target
it = copy.copy(iterator)#shallow copy
summary = chainer.reporter.DictSummary()
for batch in it:
observation = {}
with chainer.reporter.report_scope(observation):
ground_truth, data = self.converter(batch, self.device)
with chainer.using_config("train", False):
with chainer.no_backprop_mode():
predict = unet(data)
observation['unet/val/loss'] = self.loss_softmax_cross_entropy(predict, ground_truth)
observation['unet/val/dice'] = self.dice_coefficent(predict, ground_truth)
summary.add(observation)
return summary.compute_mean()
| 2.40625 | 2 |
TCS/problem1.py | Akash671/coding | 0 | 12772121 | <reponame>Akash671/coding
# author : <NAME>
# problem link:
# https://prepinsta.com/tcs-coding-question-1/
x,y,d,t=0,0,10,1
for n in range(int(input())):
if t==1:
x+=d
t=2
d+=10
elif t==2:
y+=d
t=3
d+=10
elif t==3:
x-=d
t=4
d+=10
elif t==4:
y-=d
t=5
d+=10
else:
x+=d
t=1
d+=10
print(x,y)
#Time complexity T(n)=O(n)
| 3.453125 | 3 |
Wisapp/app/migrations/0036_auto_20181104_0033.py | AliVillegas/Wisapp | 0 | 12772122 | # Generated by Django 2.1.2 on 2018-11-04 06:33
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app', '0035_userwithprofile_followedcategories'),
]
operations = [
migrations.AddField(
model_name='story',
name='image',
field=models.FileField(blank=True, default='pic_folder/None/no-img.jpg', null=True, upload_to='pic_folder/'),
),
migrations.AlterField(
model_name='userwithprofile',
name='writtenStories',
field=models.ManyToManyField(blank=True, related_name='WrittenStories', to='app.Story', verbose_name='Historias escritas'),
),
]
| 1.773438 | 2 |
openks/models/pytorch/kg_modules/metrics.py | OpenCKS/OpenKS | 0 | 12772123 | <reponame>OpenCKS/OpenKS<filename>openks/models/pytorch/kg_modules/metrics.py
import math
def getP(ranklist, gtItems):
p = 0
for item in ranklist:
if item in gtItems:
p += 1
return p * 1.0 / len(ranklist)
def getR(ranklist, gtItems):
r = 0
for item in ranklist:
if item in gtItems:
r += 1
return r * 1.0 / len(gtItems)
def getHitRatio(ranklist, gtItem):
h = 0
for item in ranklist:
if item in gtItem:
h += 1
return h
def getDCG(ranklist, gtItems):
dcg = 0.0
for i in range(len(ranklist)):
item = ranklist[i]
if item in gtItems:
dcg += 1.0 / math.log(i + 2)
return dcg
def getIDCG(ranklist, gtItems):
idcg = 0.0
i = 0
for item in ranklist:
if item in gtItems:
idcg += 1.0 / math.log(i + 2)
i += 1
return idcg
def getNDCG(ranklist, gtItems):
dcg = getDCG(ranklist, gtItems)
idcg = getIDCG(ranklist, gtItems)
if idcg == 0:
return 0
return dcg / idcg | 2.109375 | 2 |
data/plot.py | ZiyangTian/tsp | 0 | 12772124 | import numpy as np
import matplotlib.pyplot as plt
def plot_tsp(parameters, rank):
rank = np.concatenate([rank, rank[0:1]], axis=0)
plt.figure()
plt.plot(parameters[:, 0], parameters[:, 1], 'ro', color='red')
plt.plot(parameters[:, 0][rank], parameters[:, 1][rank], 'r-', color='blue')
plt.show()
| 2.546875 | 3 |
challenges/8.3.Function_Documentation_Strings/lesson_tests.py | pradeepsaiu/python-coding-challenges | 141 | 12772125 | import unittest
from main import *
class FunctionDocumentationStringsTests(unittest.TestCase):
def test_main(self):
self.assertIsNone(docstring_function())
self.assertIsNotNone(docstring_function.__doc__)
self.assertIsInstance(docstring_function.__doc__, str)
| 2.796875 | 3 |
examples/pylab_examples/pie_demo2.py | SoftwareDev/mat-plot-lib | 16 | 12772126 | """
Make a pie charts of varying size - see
http://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.pie for the docstring.
This example shows a basic pie charts with labels optional features,
like autolabeling the percentage, offsetting a slice with "explode"
and adding a shadow, in different sizes.
"""
import matplotlib.pyplot as plt
from matplotlib.gridspec import GridSpec
# Some data
labels = 'Frogs', 'Hogs', 'Dogs', 'Logs'
fracs = [15, 30, 45, 10]
explode=(0, 0.05, 0, 0)
# Make square figures and axes
the_grid = GridSpec(2, 2)
plt.subplot(the_grid[0, 0], aspect=1)
plt.pie(fracs, labels=labels, autopct='%1.1f%%', shadow=True)
plt.subplot(the_grid[0, 1], aspect=1)
plt.pie(fracs, explode=explode, labels=labels, autopct='%.0f%%', shadow=True)
plt.subplot(the_grid[1, 0], aspect=1)
patches, texts, autotexts = plt.pie(fracs, labels=labels,
autopct='%.0f%%',
shadow=True, radius=0.5)
# Make the labels on the small plot easier to read.
for t in texts:
t.set_size('smaller')
for t in autotexts:
t.set_size('x-small')
autotexts[0].set_color('y')
plt.subplot(the_grid[1, 1], aspect=1)
patches, texts, autotexts = plt.pie(fracs, explode=explode,
labels=labels, autopct='%.0f%%',
shadow=False, radius=0.5)
# Turn off shadow for tiny plot
# with exploded slice.
for t in texts:
t.set_size('smaller')
for t in autotexts:
t.set_size('x-small')
autotexts[0].set_color('y')
plt.show()
| 3.8125 | 4 |
unity/build_test_dataset.py | rubind/host_unity | 0 | 12772127 | """build_test_dataset.py -- The functions to build simulated data sets.
"""
import pickle
import numpy as np
from scipy import stats
# import matplotlib.pyplot as plt
# import corner
DATA_NAME = 'simple' # default
DATA_NAME = '3_gaus'
MB_HOST = 'indirect' # default
MB_HOST = 'step' # todo implement this
MB_HOST = 'direct'
np.random.seed(13048293)
N_SNE = 300
YOUNG_FRAC = 0.95
N_YOUNG = int(N_SNE*YOUNG_FRAC)
N_OLD = N_SNE - N_YOUNG
# TRUE VALUES
c_true = np.random.randn(N_SNE)*0.1
mass_young = np.random.randn(N_YOUNG) + 11 - np.random.exponential(0.5, N_YOUNG)
mass_old = np.random.randn(N_OLD)*0.75 + 11
mass_true = np.concatenate((mass_young, mass_old))
x1_true = np.random.randn(N_SNE)*((mass_true>10)*0.75 + (mass_true<=10)*0.9) + ((mass_true>10)*-0.5 + (mass_true<=10)*0.1)
age_young = (np.random.triangular(0.25, 0.5, 6, size=N_YOUNG)*(mass_young/4)
+ np.random.exponential(size=N_YOUNG)*x1_true[:N_YOUNG]/3)
age_old = np.random.randn(N_OLD)*0.75 + 10
age_true = np.append(age_young, age_old)
COFF = [-0.1, 3, 0.05/0.5, 0.05/2]
if MB_HOST == 'direct':
mb_true = COFF[0]*x1_true + COFF[1]*c_true + COFF[2]*mass_true + COFF[3]*age_true - 20
else:
mb_true = COFF[0]*x1_true + COFF[1]*c_true - 20
# corner.corner(np.array([x1_true, c_true, mass_true, age_true, mb_true]).T,
# labels=['x1', 'c', 'mass', 'age', 'M'])
# plt.show()
# OBSERVATIONAL
x1_obs = x1_true + np.random.randn(N_SNE)*0.3
c_obs = c_true + np.random.randn(N_SNE)*0.04
mass_obs = mass_true + np.random.randn(N_SNE)*0.5
# todo add obs systematic to ages
if DATA_NAME == '3_gaus':
AGE_STD = 0.2
# each should be shape (N_SNE, 3) for the 3_gaus model
# tile works if the input array is shape (N_SNE, 1)
age_gaus_mean = np.abs(np.tile(age_true.reshape(N_SNE, 1), 3) +
np.random.randn(N_SNE, 3)*AGE_STD*np.tile(age_true.reshape(N_SNE, 1), 3))
age_gaus_mean = np.expand_dims(age_gaus_mean, 0)
# only apply 1/3 of the uncertainty to each Gaussian
age_gaus_std = np.random.randn(N_SNE, 3)*(AGE_STD*np.tile(age_true.reshape(N_SNE, 1), 3))/3
age_gaus_std = np.expand_dims(age_gaus_std, 0)
# it just works, test it with .sum(axis=1).
age_gaus_A = np.random.dirichlet((1, 1, 1), (N_SNE))
age_gaus_A = np.expand_dims(age_gaus_A, 0)
else:
# defaults to simple model
AGE_STD = 0.2
age_obs = np.abs(age_true + np.random.randn(N_SNE)*AGE_STD*age_true)
age_gaus_std = [np.array([AGE_STD*np.abs(age_true)]).T]
age_gaus_A = np.ones((1, N_SNE, 1), dtype=np.float)
mb_obs = mb_true + np.random.randn(N_SNE)*0.15
# corner.corner(np.array([x1_obs, c_obs, mass_obs, age_obs, mb_obs]).T,
# labels=['x1', 'c', 'mass', 'age', 'M'], show_titles=True)
# plt.show()
# SAVE DATA
if DATA_NAME == '3_gaus':
n_age_mix = 3
else:
n_age_mix = 1
pickle.dump(dict( # general properties
n_sne=N_SNE, n_props=5, n_non_gaus_props=1, n_sn_set=1,
sn_set_inds=[0]*N_SNE,
# redshifts
z_helio=[0.1]*N_SNE, z_CMB=[0.1]*N_SNE,
# Gaussian defined properties
obs_mBx1c=[[mb_obs[i], x1_obs[i], c_obs[i], mass_obs[i]] for i in range(N_SNE)],
obs_mBx1c_cov=[np.diag([0.05**2, 0.3**2, 0.04**2, 0.3**2])]*N_SNE,
# Non-Gaussian properties, aka age
n_age_mix=n_age_mix, age_gaus_mean=age_gaus_mean,
age_gaus_std=age_gaus_std, age_gaus_A=age_gaus_A,
# Other stuff that does not really need to change
do_fullDint=0, outl_frac_prior_lnmean=-4.6, outl_frac_prior_lnwidth=1,
lognormal_intr_prior=0, allow_alpha_S_N=0),
open(f'test_{DATA_NAME}_{N_SNE}_obs.pkl', 'wb'))
pickle.dump({'x1': x1_true, 'c': c_true, 'mass': mass_true,
'age': age_true, 'mb': mb_true},
open(f'test_{DATA_NAME}_{N_SNE}_true.pkl', 'wb'))
| 3.0625 | 3 |
tests/generator/test_vode.py | yuanz271/PyDSTool | 2 | 12772128 | <reponame>yuanz271/PyDSTool
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function
from numpy import (
all,
allclose,
array,
linspace,
sin,
sort,
)
from numpy.testing import assert_almost_equal
import pytest
from PyDSTool import (
Events,
args,
makeMultilinearRegrFn,
)
from PyDSTool.Generator import (
Euler_ODEsystem,
InterpolateTable,
Vode_ODEsystem,
)
def test_vode_events_compare_with_euler():
"""
Test terminal and non-terminal event testing with VODE integrator,
including some comparisons and tests of Euler integrator too.
"""
DSargs = args(varspecs={'w': 'k*sin(2*t) - w'}, name='ODEtest')
DSargs.tdomain = [0, 10]
DSargs.pars = {'k': 1, 'p_thresh': -0.25}
DSargs.algparams = {'init_step': 0.001, 'atol': 1e-12, 'rtol': 1e-13}
DSargs.checklevel = 2
DSargs.ics = {'w': -1.0}
DSargs.tdata = [0, 10]
ev_args_nonterm = {'name': 'monitor',
'eventtol': 1e-4,
'eventdelay': 1e-5,
'starttime': 0,
'active': True,
'term': False,
'precise': True}
thresh_ev_nonterm = Events.makeZeroCrossEvent('w', 0,
ev_args_nonterm, varnames=['w'])
ev_args_term = {'name': 'threshold',
'eventtol': 1e-4,
'eventdelay': 1e-5,
'starttime': 0,
'active': True,
'term': True,
'precise': True}
thresh_ev_term = Events.makeZeroCrossEvent('w-p_thresh',
-1, ev_args_term, varnames=['w'], parnames=['p_thresh'])
DSargs.events = [thresh_ev_nonterm, thresh_ev_term]
testODE = Vode_ODEsystem(DSargs)
# diagnostics and other possible user-defined python functions
# for python solvers only (currently only Euler)
# def before_func(euler):
# print(euler.algparams['init_step'])
#
# def after_func(euler):
# print(euler._solver.y)
#
##DSargs.user_func_beforestep = before_func
##DSargs.user_func_afterstep = after_func
testODE_Euler = Euler_ODEsystem(DSargs)
traj = testODE.compute('traj')
traj2 = testODE_Euler.compute('traj')
pts = traj.sample()
testODE.diagnostics.showWarnings()
mon_evs_found = testODE.getEvents('monitor')
term_evs_found = testODE.getEvents('threshold')
# test Euler
assert allclose(array(testODE.getEventTimes('monitor')),
array(traj2.getEventTimes('monitor')), atol=1e-3)
assert all(traj.getEvents('monitor') == mon_evs_found)
assert all(traj.getEventTimes('threshold')
== testODE.getEventTimes('threshold'))
term_evs_found.info()
# Alternative way to extract events: they are labelled in the
# pointset! These return dictionaries indexing into the pointset.
mon_evs_dict = pts.labels.by_label['Event:monitor']
mon_ev_points = pts[sort(list(mon_evs_dict.keys()))]
assert len(mon_evs_found) == len(mon_ev_points) == 2
assert all(mon_evs_found == mon_ev_points)
@pytest.fixture
def my_input():
timeData = linspace(0, 10, 20)
sindata = sin(20 * timeData)
xData = {'example_input': sindata}
return InterpolateTable({
'tdata': timeData,
'ics': xData,
'name': 'interp1d',
'method': 'linear',
'checklevel': 1,
'abseps': 1e-5
}).compute('interp')
def test_vode_events_with_external_input(my_input):
"""
Test Vode_ODEsystem with events involving external inputs.
<NAME>, September 2006.
"""
xs = ['x1', 'x2', 'x3']
ys = [0, 0.5, 1]
fvarspecs = {"w": "k*w + pcwfn(sin(t)) + myauxfn1(t)*myauxfn2(w)",
'aux_wdouble': 'w*2 + globalindepvar(t)',
'aux_other': 'myauxfn1(2*t) + initcond(w)'}
fnspecs = {'myauxfn1': (['t'], '2.5*cos(3*t)'),
'myauxfn2': (['w'], 'w/2'),
'pcwfn': makeMultilinearRegrFn('x', xs, ys)}
# targetlang is optional if the default python target is desired
DSargs = args(fnspecs=fnspecs, name='ODEtest')
DSargs.varspecs = fvarspecs
DSargs.tdomain = [0.1, 2.1]
DSargs.pars = {'k': 2, 'a': -0.5, 'x1': -3, 'x2': 0.5, 'x3': 1.5}
DSargs.vars = 'w'
DSargs.inputs = {'in': my_input.variables['example_input']}
DSargs.algparams = {'init_step': 0.01}
DSargs.checklevel = 2
testODE = Vode_ODEsystem(DSargs)
assert not testODE.defined
testODE.set(ics={'w': 3.0},
tdata=[0.11, 2.1])
traj1 = testODE.compute('traj1')
assert testODE.defined
assert_almost_equal(traj1(0.5, 'w'), 8.9771499, 5)
assert not testODE.diagnostics.hasWarnings()
assert_almost_equal(traj1(0.2, ['aux_other']), 3.905819936, 5)
print("\nNow adding a terminating co-ordinate threshold event")
print(" and non-terminating timer event")
# Show off the general-purpose, language-independent event creator:
# 'makeZeroCrossEvent'
ev_args_nonterm = {'name': 'monitor',
'eventtol': 1e-4,
'eventdelay': 1e-5,
'starttime': 0,
'active': True,
'term': False,
'precise': True}
thresh_ev_nonterm = Events.makeZeroCrossEvent('in', 0,
ev_args_nonterm, inputnames=['in'])
# Now show use of the python-target specific creator:
# 'makePythonStateZeroCrossEvent', which is also only
# able to make events for state variable threshold crossings
ev_args_term = {'name': 'threshold',
'eventtol': 1e-4,
'eventdelay': 1e-5,
'starttime': 0,
'active': True,
'term': True,
'precise': True}
thresh_ev_term = Events.makePythonStateZeroCrossEvent('w',
20, 1, ev_args_term)
testODE.eventstruct.add([thresh_ev_nonterm, thresh_ev_term])
print("Recomputing trajectory:")
print("traj2 = testODE.compute('traj2')")
traj2 = testODE.compute('traj2')
print("\ntestODE.diagnostics.showWarnings() => ")
testODE.diagnostics.showWarnings()
print("\ntraj2.indepdomain.get() => ", traj2.indepdomain.get())
indep1 = traj2.indepdomain[1]
assert indep1 < 1.17 and indep1 > 1.16
mon_evs_found = testODE.getEvents('monitor')
assert len(mon_evs_found) == 1
| 2.15625 | 2 |
tables/table-alter/parse-turk-info.py | yash-srivastava19/sempre | 812 | 12772129 | <filename>tables/table-alter/parse-turk-info.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys, os, shutil, re, argparse, json
from codecs import open
from itertools import izip
from collections import defaultdict
def main():
parser = argparse.ArgumentParser()
parser.add_argument('infile')
parser.add_argument('-p', '--plot', action='store_true',
help='Plot number of classes vs number of denotations')
group = parser.add_mutually_exclusive_group()
group.add_argument('-s', '--summarize', action='store_true',
help='Summarize the number of classes')
group.add_argument('-d', '--dump', action='store_true',
help='Dump the list of examples with at least one agreed classes')
group.add_argument('-D', '--dataset-file',
help='Take a dataset file and print a filtered list of only examples'
' with at least one agreed classes')
args = parser.parse_args()
data = []
with open(args.infile) as fin:
print >> sys.stderr, 'Reading from', args.infile
header = fin.readline().rstrip('\n').split('\t')
for line in fin:
data.append(dict(zip(header, line.rstrip('\n').split('\t'))))
print >> sys.stderr, 'Read', len(data), 'records.'
# ['id', 'numDerivs', 'allTurkedTables', 'agreedTurkedTables',
# 'origTableTarget', 'origTableTurkedTarget', 'origTableFlag',
# 'numClassesMatched', 'numDerivsMatched']
# Classify examples
no_derivs = []
orig_table_mismatch = []
no_classes_matched = []
classes_matched = defaultdict(list)
plt_num_classes, plt_num_derivs = [], []
for record in data:
if record['numDerivs'] == '0':
no_derivs.append(record)
assert record['numDerivsMatched'] == '0'
continue
if record['origTableFlag'] == 'mismatched':
orig_table_mismatch.append(record)
#assert record['numDerivsMatched'] == '0'
continue
if record['numClassesMatched'] == '0':
no_classes_matched.append(record)
assert record['numDerivsMatched'] == '0'
continue
assert record['numDerivsMatched'] != '0'
num_classes = int(record['numClassesMatched'])
plt_num_classes.append(num_classes)
plt_num_derivs.append(int(record['numDerivsMatched']))
if num_classes < 10:
classes_matched[num_classes].append(record)
else:
classes_matched['> 10'].append(record)
if args.summarize:
print 'No derivs:', len(no_derivs)
print 'Original table mismatched:', len(orig_table_mismatch)
print 'No classes matched:', len(no_classes_matched)
print 'Classes matched:'
total = 0
for key in sorted(classes_matched):
num_matches = len(classes_matched[key])
total += num_matches
print ' {}: {} (cum = {})'.format(key, num_matches, total)
if args.plot:
import matplotlib.pyplot as plt
plt.scatter(plt_num_classes, plt_num_derivs)
plt.show()
if args.dump:
for key in sorted(classes_matched):
for x in classes_matched[key]:
print x['id']
if args.dataset_file:
indices = set(int(x['id'].replace('nt-', ''))
for y in classes_matched.values() for x in y)
count_all, count_filtered = 0, 0
with open(args.dataset_file, 'r', 'utf8') as fin:
for i, line in enumerate(fin):
count_all += 1
if i in indices:
print line.rstrip('\n')
count_filtered += 1
print >> sys.stderr, 'Printed {} / {} lines'.format(count_filtered, count_all)
if __name__ == '__main__':
main()
| 2.640625 | 3 |
src/pudl/metadata/dfs.py | Wheelspawn/pudl | 0 | 12772130 | """Static database tables."""
import pandas as pd
FERC_ACCOUNTS: pd.DataFrame = pd.DataFrame(
columns=['row_number', 'ferc_account_id', 'ferc_account_description'],
data=[
# 1. Intangible Plant
(2, '301', 'Intangible: Organization'),
(3, '302', 'Intangible: Franchises and consents'),
(4, '303', 'Intangible: Miscellaneous intangible plant'),
(5, 'subtotal_intangible', 'Subtotal: Intangible Plant'),
# 2. Production Plant
# A. steam production
(8, '310', 'Steam production: Land and land rights'),
(9, '311', 'Steam production: Structures and improvements'),
(10, '312', 'Steam production: Boiler plant equipment'),
(11, '313', 'Steam production: Engines and engine-driven generators'),
(12, '314', 'Steam production: Turbogenerator units'),
(13, '315', 'Steam production: Accessory electric equipment'),
(14, '316', 'Steam production: Miscellaneous power plant equipment'),
(15, '317', 'Steam production: Asset retirement costs for steam production plant'),
(16, 'subtotal_steam_production', 'Subtotal: Steam Production Plant'),
# B. nuclear production
(18, '320', 'Nuclear production: Land and land rights (Major only)'),
(19, '321', 'Nuclear production: Structures and improvements (Major only)'),
(20, '322', 'Nuclear production: Reactor plant equipment (Major only)'),
(21, '323', 'Nuclear production: Turbogenerator units (Major only)'),
(22, '324', 'Nuclear production: Accessory electric equipment (Major only)'),
(23, '325', 'Nuclear production: Miscellaneous power plant equipment (Major only)'),
(24, '326', 'Nuclear production: Asset retirement costs for nuclear production plant (Major only)'),
(25, 'subtotal_nuclear_produciton', 'Subtotal: Nuclear Production Plant'),
# C. hydraulic production
(27, '330', 'Hydraulic production: Land and land rights'),
(28, '331', 'Hydraulic production: Structures and improvements'),
(29, '332', 'Hydraulic production: Reservoirs, dams, and waterways'),
(30, '333', 'Hydraulic production: Water wheels, turbines and generators'),
(31, '334', 'Hydraulic production: Accessory electric equipment'),
(32, '335', 'Hydraulic production: Miscellaneous power plant equipment'),
(33, '336', 'Hydraulic production: Roads, railroads and bridges'),
(34, '337', 'Hydraulic production: Asset retirement costs for hydraulic production plant'),
(35, 'subtotal_hydraulic_production', 'Subtotal: Hydraulic Production Plant'),
# D. other production
(37, '340', 'Other production: Land and land rights'),
(38, '341', 'Other production: Structures and improvements'),
(39, '342', 'Other production: Fuel holders, producers, and accessories'),
(40, '343', 'Other production: Prime movers'),
(41, '344', 'Other production: Generators'),
(42, '345', 'Other production: Accessory electric equipment'),
(43, '346', 'Other production: Miscellaneous power plant equipment'),
(44, '347', 'Other production: Asset retirement costs for other production plant'),
(None, '348', 'Other production: Energy Storage Equipment'),
(45, 'subtotal_other_production', 'Subtotal: Other Production Plant'),
(46, 'subtotal_production', 'Subtotal: Production Plant'),
# 3. Transmission Plant,
(48, '350', 'Transmission: Land and land rights'),
(None, '351', 'Transmission: Energy Storage Equipment'),
(49, '352', 'Transmission: Structures and improvements'),
(50, '353', 'Transmission: Station equipment'),
(51, '354', 'Transmission: Towers and fixtures'),
(52, '355', 'Transmission: Poles and fixtures'),
(53, '356', 'Transmission: Overhead conductors and devices'),
(54, '357', 'Transmission: Underground conduit'),
(55, '358', 'Transmission: Underground conductors and devices'),
(56, '359', 'Transmission: Roads and trails'),
(57, '359.1', 'Transmission: Asset retirement costs for transmission plant'),
(58, 'subtotal_transmission', 'Subtotal: Transmission Plant'),
# 4. Distribution Plant
(60, '360', 'Distribution: Land and land rights'),
(61, '361', 'Distribution: Structures and improvements'),
(62, '362', 'Distribution: Station equipment'),
(63, '363', 'Distribution: Storage battery equipment'),
(64, '364', 'Distribution: Poles, towers and fixtures'),
(65, '365', 'Distribution: Overhead conductors and devices'),
(66, '366', 'Distribution: Underground conduit'),
(67, '367', 'Distribution: Underground conductors and devices'),
(68, '368', 'Distribution: Line transformers'),
(69, '369', 'Distribution: Services'),
(70, '370', 'Distribution: Meters'),
(71, '371', 'Distribution: Installations on customers\' premises'),
(72, '372', 'Distribution: Leased property on customers\' premises'),
(73, '373', 'Distribution: Street lighting and signal systems'),
(74, '374', 'Distribution: Asset retirement costs for distribution plant'),
(75, 'subtotal_distribution', 'Subtotal: Distribution Plant'),
# 5. Regional Transmission and Market Operation Plant
(77, '380', 'Regional transmission: Land and land rights'),
(78, '381', 'Regional transmission: Structures and improvements'),
(79, '382', 'Regional transmission: Computer hardware'),
(80, '383', 'Regional transmission: Computer software'),
(81, '384', 'Regional transmission: Communication Equipment'),
(82, '385', 'Regional transmission: Miscellaneous Regional Transmission and Market Operation Plant'),
(83, '386', 'Regional transmission: Asset Retirement Costs for Regional Transmission and Market Operation Plant'),
(84, 'subtotal_regional_transmission',
'Subtotal: Transmission and Market Operation Plant'),
(None, '387', 'Regional transmission: [Reserved]'),
# 6. General Plant
(86, '389', 'General: Land and land rights'),
(87, '390', 'General: Structures and improvements'),
(88, '391', 'General: Office furniture and equipment'),
(89, '392', 'General: Transportation equipment'),
(90, '393', 'General: Stores equipment'),
(91, '394', 'General: Tools, shop and garage equipment'),
(92, '395', 'General: Laboratory equipment'),
(93, '396', 'General: Power operated equipment'),
(94, '397', 'General: Communication equipment'),
(95, '398', 'General: Miscellaneous equipment'),
(96, 'subtotal_general', 'Subtotal: General Plant'),
(97, '399', 'General: Other tangible property'),
(98, '399.1', 'General: Asset retirement costs for general plant'),
(99, 'total_general', 'TOTAL General Plant'),
(100, '101_and_106', 'Electric plant in service (Major only)'),
(101, '102_purchased', 'Electric plant purchased'),
(102, '102_sold', 'Electric plant sold'),
(103, '103', 'Experimental plant unclassified'),
(104, 'total_electric_plant', 'TOTAL Electric Plant in Service')
]
)
"""
FERC electric plant account IDs with associated row numbers and descriptions.
From FERC Form 1 pages 204-207, Electric Plant in Service.
Descriptions from: https://www.law.cornell.edu/cfr/text/18/part-101
"""
FERC_DEPRECIATION_LINES: pd.DataFrame = pd.DataFrame(
columns=['row_number', 'line_id', 'ferc_account_description'],
data=[
# Section A. Balances and Changes During Year
(1, 'balance_beginning_of_year', 'Balance Beginning of Year'),
(3, 'depreciation_expense', '(403) Depreciation Expense'),
(4, 'depreciation_expense_asset_retirement',
'(403.1) Depreciation Expense for Asset Retirement Costs'),
(5, 'expense_electric_plant_leased_to_others',
'(413) Exp. of Elec. Plt. Leas. to Others'),
(6, 'transportation_expenses_clearing', 'Transportation Expenses-Clearing'),
(7, 'other_clearing_accounts', 'Other Clearing Accounts'),
(8, 'other_accounts_specified', 'Other Accounts (Specify, details in footnote):'),
# blank: might also be other charges like line 17.
(9, 'other_charges', 'Other Charges:'),
(10, 'total_depreciation_provision_for_year',
'TOTAL Deprec. Prov for Year (Enter Total of lines 3 thru 9)'),
(11, 'net_charges_for_plant_retired', 'Net Charges for Plant Retired:'),
(12, 'book_cost_of_plant_retired', 'Book Cost of Plant Retired'),
(13, 'cost_of_removal', 'Cost of Removal'),
(14, 'salvage_credit', 'Salvage (Credit)'),
(15, 'total_net_charges_for_plant_retired',
'TOTAL Net Chrgs. for Plant Ret. (Enter Total of lines 12 thru 14)'),
(16, 'other_debit_or_credit_items',
'Other Debit or Cr. Items (Describe, details in footnote):'),
# blank: can be "Other Charges", e.g. in 2012 for PSCo.
(17, 'other_charges_2', 'Other Charges 2'),
(18, 'book_cost_or_asset_retirement_costs_retired',
'Book Cost or Asset Retirement Costs Retired'),
(19, 'balance_end_of_year',
'Balance End of Year (Enter Totals of lines 1, 10, 15, 16, and 18)'),
# Section B. Balances at End of Year According to Functional Classification
(20, 'steam_production_end_of_year', 'Steam Production'),
(21, 'nuclear_production_end_of_year', 'Nuclear Production'),
(22, 'hydraulic_production_end_of_year', 'Hydraulic Production-Conventional'),
(23, 'pumped_storage_end_of_year', 'Hydraulic Production-Pumped Storage'),
(24, 'other_production', 'Other Production'),
(25, 'transmission', 'Transmission'),
(26, 'distribution', 'Distribution'),
(27, 'regional_transmission_and_market_operation',
'Regional Transmission and Market Operation'),
(28, 'general', 'General'),
(29, 'total', 'TOTAL (Enter Total of lines 20 thru 28)'),
],
)
"""
Row numbers, FERC account IDs, and FERC account descriptions.
From FERC Form 1 page 219, Accumulated Provision for Depreciation of electric
utility plant (Account 108).
"""
| 2.703125 | 3 |
simple/game_loop_global.py | loyalgarlic/snakepit-game | 124 | 12772131 | import asyncio
from aiohttp import web
async def handle(request):
index = open("index.html", 'rb')
content = index.read()
return web.Response(body=content, content_type='text/html')
async def wshandler(request):
app = request.app
ws = web.WebSocketResponse()
await ws.prepare(request)
if app["game_loop"] is None or \
app["game_loop"].cancelled():
app["game_loop"] = asyncio.ensure_future(game_loop(app))
# this is required to propagate exceptions
app["game_loop"].add_done_callback(lambda t: t.result()
if not t.cancelled() else None)
app["sockets"].append(ws)
while 1:
msg = await ws.receive()
if msg.tp == web.MsgType.text:
ws.send_str("Pressed key code: {}".format(msg.data))
print("Got message %s" % msg.data)
elif msg.tp == web.MsgType.close or\
msg.tp == web.MsgType.error:
break
app["sockets"].remove(ws)
if len(app["sockets"]) == 0:
print("Stopping game loop")
app["game_loop"].cancel()
print("Closed connection")
return ws
async def game_loop(app):
print("Game loop started")
while 1:
for ws in app["sockets"]:
ws.send_str("game loop passed")
await asyncio.sleep(2)
app = web.Application()
app["sockets"] = []
app["game_loop"] = None
app.router.add_route('GET', '/connect', wshandler)
app.router.add_route('GET', '/', handle)
web.run_app(app)
| 2.921875 | 3 |
merge_sorted_array_88/main.py | dhanraj-vedanth/Leetcode | 0 | 12772132 | from typing import List
class Solution:
def merge(
self, nums1: List[int], m: int, nums2: List[int], n: int
) -> None:
"""
Do not return anything, modify nums1 in-place instead.
"""
len1 = m-1
len2 = n-1
merged_len = m + n -1
while len1 >= 0 and len2 >= 0:
if nums2[len2] > nums1[len1]:
nums1[merged_len] = nums2[len2]
len2 -= 1
else:
nums1[merged_len] = nums1[len1]
len1 -= 1
merged_len -= 1
nums1[:len2+1] = nums2[:len2+1]
return nums1
| 3.9375 | 4 |
mkconfig/tests/test_cement_cli.py | mcfongtw/MkConfig | 1 | 12772133 | import os
from mkconfig.conf.utils import Utils
from mkconfig.env import setup_logging_with_details, Configurations
import logging
from cement.utils import test
setup_logging_with_details()
logger = logging.getLogger(__name__)
from mkconfig.core.cli import MkConfigApp
class TestMkConfigApp(test.CementTestCase):
app_class = MkConfigApp
example_dir = Configurations.getProjectRootDir() + '/examples/'
def setUp(self):
logger.info('Unit Test [{}] Start'.format(self.id()))
def tearDown(self):
folder = Configurations.getTmpTemplateDir()
logger.info('Removing all files under %s', folder)
for the_file in os.listdir(folder):
file_path = os.path.join(folder, the_file)
try:
if os.path.isfile(file_path):
os.unlink(file_path)
except Exception as e:
print(e)
logger.info('Removing all files under %s ---- DONE', folder)
logger.info('Unit Test [{}] Stop'.format(self.id()))
#########################################################################################
# Default Behavior
#########################################################################################
def test_normal_with_default_template(self):
config_control_string = """
app_list :
- cassandra
app_conf_dir : """ + self.example_dir
app = self.make_app(argv=['-d'+ config_control_string, '-i ', '-otest.output'])
app.setup()
app.run()
self.assertEqual(app.pargs.transf_desc_file, ' ')
self.assertEqual(app.pargs.transf_desc_string, config_control_string)
self.assertEqual(app.pargs.type, 'collectd_genericjmx')
self.assertEqual(app.pargs.output, 'test.output')
self.assertTrue(Utils.is_file_exist(Configurations.getTmpTemplateFile(
'cassandra.mbean.blocks.inc')))
self.assertTrue(Utils.is_file_exist(Configurations.getTmpTemplateFile(
'cassandra.connection.blocks.inc')))
self.assertTrue(Utils.is_file_exist(Configurations.getTmpTemplateFile(
'_collectd_genericjmx.mbean.inc.stub')))
self.assertTrue(Utils.is_file_exist(Configurations.getTmpTemplateFile(
'_collectd_genericjmx.connection.inc.stub')))
app.close()
def test_file_not_found_1(self):
app1 = self.make_app(argv=['-o1', '-t1', '-i1'])
app1.setup()
with self.assertRaises(IOError):
app1.run()
app1.close()
#########################################################################################
# Collectd-GenericJmx specific
#########################################################################################
def test_normal_start_and_stop_on_jenkins_with_genericjmx(self):
config_control_string = """
app_list :
- jenkins
app_conf_dir : """ + self.example_dir
app = self.make_app(
argv=['-tcollectd_genericjmx', '-otest.output', '-i ', '-d'+ config_control_string,])
app.setup()
app.run()
self.assertEqual(app.pargs.transf_desc_file, ' ')
self.assertEqual(app.pargs.transf_desc_string, config_control_string)
self.assertEqual(app.pargs.type, 'collectd_genericjmx')
self.assertEqual(app.pargs.output, 'test.output')
self.assertTrue(Utils.is_file_exist(Configurations.getTmpTemplateFile(
'jenkins.mbean.blocks.inc')))
self.assertTrue(Utils.is_file_exist(Configurations.getTmpTemplateFile(
'jenkins.connection.blocks.inc')))
self.assertTrue(Utils.is_file_exist(Configurations.getTmpTemplateFile(
'_collectd_genericjmx.mbean.inc.stub')))
self.assertTrue(Utils.is_file_exist(Configurations.getTmpTemplateFile(
'_collectd_genericjmx.connection.inc.stub')))
app.close()
def test_normal_start_and_stop_on_cassandra_with_genericjmx(self):
config_control_string = """
app_list :
- cassandra
app_conf_dir : """ + self.example_dir
app = self.make_app(
argv=['-tcollectd_genericjmx', '-otest.output', '-i ', '-d'+ config_control_string,])
app.setup()
app.run()
self.assertEqual(app.pargs.transf_desc_file, ' ')
self.assertEqual(app.pargs.transf_desc_string, config_control_string)
self.assertEqual(app.pargs.type, 'collectd_genericjmx')
self.assertEqual(app.pargs.output, 'test.output')
self.assertTrue(Utils.is_file_exist(Configurations.getTmpTemplateFile(
'cassandra.mbean.blocks.inc')))
self.assertTrue(Utils.is_file_exist(Configurations.getTmpTemplateFile(
'cassandra.connection.blocks.inc')))
self.assertTrue(Utils.is_file_exist(Configurations.getTmpTemplateFile(
'_collectd_genericjmx.mbean.inc.stub')))
self.assertTrue(Utils.is_file_exist(Configurations.getTmpTemplateFile(
'_collectd_genericjmx.connection.inc.stub')))
app.close()
def test_normal_start_and_stop_with_apps_list_with_genericjmx(self):
config_control_string = """
app_list :
- cassandra
- jenkins
app_conf_dir : """ + self.example_dir
app = self.make_app(
argv=['-tcollectd_genericjmx', '-otest.output', '-i ', '-d'+ config_control_string,])
app.setup()
app.run()
self.assertEqual(app.pargs.transf_desc_file, ' ')
self.assertEqual(app.pargs.transf_desc_string, config_control_string)
self.assertEqual(app.pargs.output, 'test.output')
self.assertTrue(Utils.is_file_exist(Configurations.getTmpTemplateFile(
'cassandra.mbean.blocks.inc')))
self.assertTrue(Utils.is_file_exist(Configurations.getTmpTemplateFile(
'cassandra.connection.blocks.inc')))
self.assertTrue(Utils.is_file_exist(Configurations.getTmpTemplateFile(
'jenkins.mbean.blocks.inc')))
self.assertTrue(Utils.is_file_exist(Configurations.getTmpTemplateFile(
'jenkins.connection.blocks.inc')))
self.assertTrue(Utils.is_file_exist(Configurations.getTmpTemplateFile(
'_collectd_genericjmx.mbean.inc.stub')))
self.assertTrue(Utils.is_file_exist(Configurations.getTmpTemplateFile(
'_collectd_genericjmx.connection.inc.stub')))
app.close()
def test_normal_start_and_stop_with_all_exampl_apps_with_genericjmx(self):
config_control_string = """
app_list :
- cassandra
- jenkins
- jira
app_conf_dir : """ + self.example_dir
app = self.make_app(
argv=['-tcollectd_genericjmx', '-otest.output', '-i ', '-d'+ config_control_string])
app.setup()
app.run()
self.assertEqual(app.pargs.transf_desc_file, ' ')
self.assertEqual(app.pargs.transf_desc_string, config_control_string)
self.assertEqual(app.pargs.output, 'test.output')
self.assertTrue(Utils.is_file_exist(Configurations.getTmpTemplateFile(
'cassandra.mbean.blocks.inc')))
self.assertTrue(Utils.is_file_exist(Configurations.getTmpTemplateFile(
'cassandra.connection.blocks.inc')))
self.assertTrue(Utils.is_file_exist(Configurations.getTmpTemplateFile(
'jenkins.mbean.blocks.inc')))
self.assertTrue(Utils.is_file_exist(Configurations.getTmpTemplateFile(
'jenkins.connection.blocks.inc')))
self.assertTrue(Utils.is_file_exist(Configurations.getTmpTemplateFile(
'jira.mbean.blocks.inc')))
self.assertTrue(Utils.is_file_exist(Configurations.getTmpTemplateFile(
'jira.connection.blocks.inc')))
self.assertTrue(Utils.is_file_exist(Configurations.getTmpTemplateFile(
'_collectd_genericjmx.mbean.inc.stub')))
self.assertTrue(Utils.is_file_exist(Configurations.getTmpTemplateFile(
'_collectd_genericjmx.connection.inc.stub')))
app.close()
#########################################################################################
# Collectd-FastJmx specific
#########################################################################################
def test_normal_start_and_stop_on_jenkins_with_fastjmx(self):
config_control_string = """
app_list :
- jenkins
app_conf_dir : """ + self.example_dir
app = self.make_app(
argv=['-tcollectd_fastjmx', '-otest.output', '-i ', '-d'+ config_control_string])
app.setup()
app.run()
self.assertEqual(app.pargs.transf_desc_file, ' ')
self.assertEqual(app.pargs.transf_desc_string, config_control_string)
self.assertEqual(app.pargs.type, 'collectd_fastjmx')
self.assertEqual(app.pargs.output, 'test.output')
self.assertTrue(Utils.is_file_exist(Configurations.getTmpTemplateFile(
'jenkins.mbean.blocks.inc')))
self.assertTrue(Utils.is_file_exist(Configurations.getTmpTemplateFile(
'jenkins.connection.blocks.inc')))
self.assertTrue(Utils.is_file_exist(Configurations.getTmpTemplateFile(
'_collectd_fastjmx.mbean.inc.stub')))
self.assertTrue(Utils.is_file_exist(Configurations.getTmpTemplateFile(
'_collectd_fastjmx.connection.inc.stub')))
app.close()
def test_normal_start_and_stop_on_cassandra_with_fastjmx(self):
config_control_string = """
app_list :
- cassandra
app_conf_dir : """ + self.example_dir
app = self.make_app(
argv=['-tcollectd_fastjmx', '-otest.output', '-i ', '-d'+ config_control_string])
app.setup()
app.run()
self.assertEqual(app.pargs.transf_desc_file, ' ')
self.assertEqual(app.pargs.transf_desc_string, config_control_string)
self.assertEqual(app.pargs.type, 'collectd_fastjmx')
self.assertEqual(app.pargs.output, 'test.output')
self.assertTrue(Utils.is_file_exist(Configurations.getTmpTemplateFile(
'cassandra.mbean.blocks.inc')))
self.assertTrue(Utils.is_file_exist(Configurations.getTmpTemplateFile(
'cassandra.connection.blocks.inc')))
self.assertTrue(Utils.is_file_exist(Configurations.getTmpTemplateFile(
'_collectd_fastjmx.mbean.inc.stub')))
self.assertTrue(Utils.is_file_exist(Configurations.getTmpTemplateFile(
'_collectd_fastjmx.connection.inc.stub')))
app.close()
def test_normal_start_and_stop_with_apps_list_with_fastjmx(self):
config_control_string = """
app_list :
- cassandra
- jenkins
app_conf_dir : """ + self.example_dir
app = self.make_app(
argv=['-tcollectd_fastjmx', '-otest.output', '-i ', '-d'+ config_control_string])
app.setup()
app.run()
self.assertEqual(app.pargs.transf_desc_file, ' ')
self.assertEqual(app.pargs.transf_desc_string, config_control_string)
self.assertEqual(app.pargs.output, 'test.output')
self.assertTrue(Utils.is_file_exist(Configurations.getTmpTemplateFile(
'cassandra.mbean.blocks.inc')))
self.assertTrue(Utils.is_file_exist(Configurations.getTmpTemplateFile(
'cassandra.connection.blocks.inc')))
self.assertTrue(Utils.is_file_exist(Configurations.getTmpTemplateFile(
'jenkins.mbean.blocks.inc')))
self.assertTrue(Utils.is_file_exist(Configurations.getTmpTemplateFile(
'jenkins.connection.blocks.inc')))
self.assertTrue(Utils.is_file_exist(Configurations.getTmpTemplateFile(
'_collectd_fastjmx.mbean.inc.stub')))
self.assertTrue(Utils.is_file_exist(Configurations.getTmpTemplateFile(
'_collectd_fastjmx.connection.inc.stub')))
app.close()
def test_normal_start_and_stop_with_all_exampl_apps_with_fastjmx(self):
config_control_string = """
app_list :
- cassandra
- jenkins
- jira
app_conf_dir : """ + self.example_dir
app = self.make_app(
argv=['-tcollectd_fastjmx', '-otest.output', '-i ', '-d'+ config_control_string])
app.setup()
app.run()
self.assertEqual(app.pargs.transf_desc_file, ' ')
self.assertEqual(app.pargs.transf_desc_string, config_control_string)
self.assertEqual(app.pargs.output, 'test.output')
self.assertTrue(Utils.is_file_exist(Configurations.getTmpTemplateFile(
'cassandra.mbean.blocks.inc')))
self.assertTrue(Utils.is_file_exist(Configurations.getTmpTemplateFile(
'cassandra.connection.blocks.inc')))
self.assertTrue(Utils.is_file_exist(Configurations.getTmpTemplateFile(
'jenkins.mbean.blocks.inc')))
self.assertTrue(Utils.is_file_exist(Configurations.getTmpTemplateFile(
'jenkins.connection.blocks.inc')))
self.assertTrue(Utils.is_file_exist(Configurations.getTmpTemplateFile(
'jira.mbean.blocks.inc')))
self.assertTrue(Utils.is_file_exist(Configurations.getTmpTemplateFile(
'jira.connection.blocks.inc')))
self.assertTrue(Utils.is_file_exist(Configurations.getTmpTemplateFile(
'_collectd_fastjmx.mbean.inc.stub')))
self.assertTrue(Utils.is_file_exist(Configurations.getTmpTemplateFile(
'_collectd_fastjmx.connection.inc.stub')))
app.close()
| 2.234375 | 2 |
opentargets/__init__.py | machbio/opentargets-py | 0 | 12772134 | """
This module communicate with the Open Targets REST API with a simple client, and requires not knowledge of the API.
"""
import logging
from opentargets.conn import Connection, IterableResult
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
class OpenTargetsClient(object):
'''
main class to use to get data from the Open Targets REST API available at targetvalidation.org, or your private instance
'''
_search_endpoint = '/public/search'
_filter_associations_endpoint = '/public/association/filter'
_get_associations_endpoint = '/public/association'
_filter_evidence_endpoint = '/public/evidence/filter'
_get_evidence_endpoint = '/public/evidence'
_stats_endpoint = '/public/utils/stats'
def __init__(self,
**kwargs
):
'''
:param kwargs: all params forwarded to :class:`opentargets.conn.Connection` object
'''
self.conn = Connection(**kwargs)
def __enter__(self):
pass
def __exit__(self, type, value, traceback):
self.close()
def close(self):
# self.conn.close()
pass
def search(self, query,**kwargs):
kwargs['q']=query
result = IterableResult(self.conn)
result(self._search_endpoint,params=kwargs)
return result
def get_association(self,association_id, **kwargs):
kwargs['id']= association_id
result = IterableResult(self.conn)
result(self._get_associations_endpoint, params=kwargs)
return result
def filter_associations(self,**kwargs):
result = IterableResult(self.conn)
result(self._filter_associations_endpoint, params=kwargs)
return result
def get_associations_for_target(self, target, **kwargs):
if not isinstance(target, str):
raise AttributeError('target must be of type str')
if not target.startswith('ENSG'):
search_result = next(self.search(target, size=1, filter='target'))
if not search_result:
raise AttributeError('cannot find an ensembl gene id for target {}'.format(target))
target_id = search_result['id']
logger.debug('{} resolved to id {}'.format(target, target_id))
else:
target_id = target
return self.filter_associations(target=target_id,**kwargs)
def get_associations_for_disease(self, disease, **kwargs):
if not isinstance(disease, str):
raise AttributeError('disease must be of type str')
results = self.filter_associations(disease=disease)
if not results:
search_result = next(self.search(disease, size=1, filter='disease'))
if not search_result:
raise AttributeError('cannot find an disease id for disease {}'.format(disease))
disease_id = search_result['id']
logger.debug('{} resolved to id {}'.format(disease, disease_id))
results = self.filter_associations(disease=disease_id)
return results
def get_evidence(self, evidence_id, **kwargs):
kwargs['id']= evidence_id
result = IterableResult(self.conn)
result(self._get_evidence_endpoint, params=kwargs)
return result
def filter_evidence(self,**kwargs):
result = IterableResult(self.conn)
result(self._filter_evidence_endpoint, params=kwargs)
return result
def get_evidence_for_target(self, target, **kwargs):
if not isinstance(target, str):
raise AttributeError('target must be of type str')
if not target.startswith('ENSG'):
search_result = next(self.search(target, size=1, filter='target'))
if not search_result:
raise AttributeError('cannot find an ensembl gene id for target {}'.format(target))
target_id = search_result['id']
logger.debug('{} resolved to id {}'.format(target, target_id))
else:
target_id = target
return self.filter_evidence(target=target_id,**kwargs)
def get_evidence_for_disease(self, disease, **kwargs):
if not isinstance(disease, str):
raise AttributeError('disease must be of type str')
results = self.filter_evidence(disease=disease)
if not results:
search_result = next(self.search(disease, size=1, filter='disease'))
if not search_result:
raise AttributeError('cannot find an disease id for disease {}'.format(disease))
disease_id = search_result['id']
logger.debug('{} resolved to id {}'.format(disease, disease_id))
results = self.filter_evidence(disease=disease_id)
return results
def get_stats(self):
result = IterableResult(self.conn)
result(self._stats_endpoint)
return result
| 2.53125 | 3 |
build/lib.linux-x86_64-2.7/moca/mrs/admin.py | satvikdhandhania/vit-11 | 1 | 12772135 | <reponame>satvikdhandhania/vit-11
import logging
from django.contrib import admin
from moca.mrs.util import enable_logging
#from requestlog import util
from moca.mrs.models import Patient, Procedure, BinaryResource, SavedProcedure, Notification, QueueElement, Client
from moca.mrs import api
class ProcedureAdmin(admin.ModelAdmin):
list_display = ('title', 'created')
class NotificationAdmin(admin.ModelAdmin):
list_display = ('client', 'patient_id', 'procedure_id', 'message', 'delivered')
class SavedProcedureAdmin(admin.ModelAdmin):
list_display = ('guid', 'procedure_guid', 'client', 'uploaded', 'created',)
actions = ['upload_to_emr']
#@util.enable_logging_with_request_at(1)
@enable_logging
def upload_to_emr(self, request, queryset):
messages = []
logging.info("User manually requested upload of cases: %s" % ', '.join([case.guid for case in queryset]))
for case in queryset:
logging.info("Attempting to upload case %s." % case.guid)
messages.append(api.maybe_upload_procedure(case))
total = len(queryset)
message = ''
if all([result for result,message in messages]):
if total > 1:
message = "%d cases uploaded successfully to the MRS: " % total
else:
message = "1 case uploaded successfully: "
else:
if total > 1:
message = "Failed to upload some cases to the MRS: "
else:
message = "Failed to upload case to the MRS: "
message += ', '.join([m for result, m in messages])
self.message_user(request, message)
upload_to_emr.short_description = "Upload selected procedures to Medical Records System"
class BinaryResourceAdmin(admin.ModelAdmin):
list_display = ('guid', 'procedure', 'element_id', 'content_type', 'upload_progress', 'total_size', 'uploaded', 'created')
class ClientAdmin(admin.ModelAdmin):
list_display = ('name', 'last_seen',)
admin.site.register(Client, ClientAdmin)
admin.site.register(Procedure, ProcedureAdmin)
admin.site.register(Patient)
admin.site.register(BinaryResource, BinaryResourceAdmin)
admin.site.register(SavedProcedure, SavedProcedureAdmin)
admin.site.register(Notification, NotificationAdmin)
admin.site.register(QueueElement)
| 1.984375 | 2 |
examples/adspygoogle/dfa/v1_20/create_spotlight_activity.py | cherry-wb/googleads-python-lib | 0 | 12772136 | <gh_stars>0
#!/usr/bin/python
#
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example creates a spotlight activity in a given activity group. To
create an activity group, run create_spotlight_activity_group.py. To get tag
methods types, run get_tag_methods.py. To get activity type IDs, run
get_activity_types.py.
Tags: spotlight.saveSpotlightActivity
"""
__author__ = '<EMAIL> (<NAME>)'
import os
import sys
sys.path.insert(0, os.path.join('..', '..', '..', '..'))
# Import appropriate classes from the client library.
from adspygoogle import DfaClient
ACTIVITY_GROUP_ID = 'INSERT_ACTIVITY_GROUP_ID_HERE'
ACTIVITY_TYPE_ID = 'INSERT_ACTIVITY_TYPE_ID_HERE'
TAG_METHOD_TYPE_ID = 'INSERT_TAG_METHOD_TYPE_ID_HERE'
URL = 'INSERT_EXPECTED_URL_HERE'
ACTIVITY_NAME = 'INSERT_ACTIVITY_NAME_HERE'
def main(client, activity_group_id, activity_type_id, tag_method_type_id, url,
activity_name):
# Initialize appropriate service.
spotlight_service = client.GetSpotlightService(
'https://advertisersapitest.doubleclick.net', 'v1.20')
# Construct and save spotlight activity.
spotlight_activity = {
'name': activity_name,
'activityGroupId': activity_group_id,
'activityTypeId': activity_type_id,
'tagMethodTypeId': tag_method_type_id,
'expectedUrl': url
}
result = spotlight_service.SaveSpotlightActivity(spotlight_activity)[0]
# Display results.
print 'Spotlight activity with ID \'%s\' was created.' % result['id']
if __name__ == '__main__':
# Initialize client object.
client = DfaClient(path=os.path.join('..', '..', '..', '..'))
main(client, ACTIVITY_GROUP_ID, ACTIVITY_TYPE_ID, TAG_METHOD_TYPE_ID, URL,
ACTIVITY_NAME)
| 2.265625 | 2 |
appengine/src/greenday_core/models/event.py | meedan/montage | 6 | 12772137 | """
Defines the model to hold application events
"""
from django.conf import settings
from django.db import models
from django.forms.models import model_to_dict
from ..constants import EventKind, EventModel, EventCommonCodes, CODES_PER_MODEL
from .user import get_sentinel_user
class Event(models.Model):
"""
Represents an application event
"""
class Meta:
ordering = ['-timestamp', '-pk']
timestamp = models.DateTimeField()
kind = models.IntegerField(EventKind.choices)
object_kind = models.IntegerField()
event_kind = models.IntegerField()
object_id = models.IntegerField(null=True)
project_id = models.IntegerField(null=True)
video_id = models.IntegerField(null=True)
meta = models.TextField(null=True, blank=True)
user = models.ForeignKey(
settings.AUTH_USER_MODEL,
null=True,
blank=True,
related_name="actions",
on_delete=models.SET(get_sentinel_user),
db_constraint=False)
@property
def object_type(self):
"""
Gets the :class:`greenday_core.constants.EventModel <greenday_core.constants.EventModel>`
enum type of this object
"""
return EventModel(self.kind/CODES_PER_MODEL).name
@property
def event_type(self):
"""
Gets the :class:`greenday_core.constants.EventKind <greenday_core.constants.EventKind>`
enum type of this object
"""
code = self.kind % CODES_PER_MODEL
try:
common_code = EventCommonCodes(code)
except ValueError:
return EventKind(self.kind).name
else:
return common_code.name
def to_dict(self):
"""
Return dict of the object's field data
"""
d = model_to_dict(self)
d.update({
"object_type": self.object_type,
"event_type": self.event_type
})
return d
def save(self, *args, **kwargs):
"""
Override save to get the `object_kind` and `event_kind` from the compound `kind` field
"""
self.object_kind, self.event_kind = divmod(self.kind, CODES_PER_MODEL)
return super(Event, self).save(*args, **kwargs)
def __repr__(self):
return unicode(self)
def __unicode__(self):
return "{kind}(id={id}, project_id={project_id}, \
by {email} at {timestamp}".format(
kind=EventKind(self.kind).name,
id=self.object_id,
project_id=self.project_id,
email=self.user.email if self.user else "<none>",
timestamp=self.timestamp
)
| 2.5 | 2 |
torchFI/modules/eltwise.py | bfgoldstein/torchfi | 6 | 12772138 | ###############################################################
# This file was created using part of Distiller project developed by:
# NervanaSystems https://github.com/NervanaSystems/distiller
#
# Changes were applied to satisfy torchFI project needs
###############################################################
#
# Copyright (c) 2018 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from operator import setitem
from functools import reduce
import torch
import torch.nn as nn
import distiller.modules as dist
class FIEltwiseAdd(dist.EltwiseAdd):
def __init__(self, fi, name, inplace=False):
super(FIEltwiseAdd, self).__init__(inplace)
self.fi = fi
self.name = name
self.id = fi.addNewLayer(name, FIEltwiseAdd)
def forward(self, *input):
if self.fi.injectionMode and self.id == self.fi.injectionLayer:
resFI = input[0].clone()
indices, faulty_val = self.fi.inject(resFI.data)
resFI.data[tuple(indices)] = faulty_val
if self.inplace:
for t in input[1:]:
resFI += t
else:
for t in input[1:]:
resFI = resFI + t
return resFI
else:
return super(FIEltwiseAdd, self).forward(*input)
@staticmethod
def from_pytorch_impl(fi, name, eltwiseadd: dist.EltwiseAdd):
return FIEltwiseAdd(fi, name, eltwiseadd.inplace)
def __repr__(self):
return "%s(name=%s, inplace=%s, id=%d)" % (
self.__class__.__name__,
self.name,
str(self.inplace),
self.id)
class FIEltwiseMult(dist.EltwiseMult):
def __init__(self, fi, name, inplace=False):
super(FIEltwiseMult, self).__init__(inplace)
self.fi = fi
self.name = name
self.id = fi.addNewLayer(name, FIEltwiseMult)
def forward(self, *input):
if self.fi.injectionMode and self.id == self.fi.injectionLayer:
resFI = input[0].clone()
indices, faulty_val = self.fi.inject(resFI.data)
resFI.data[tuple(indices)] = faulty_val
if self.inplace:
for t in input[1:]:
resFI *= t
else:
for t in input[1:]:
resFI = resFI * t
return resFI
else:
return super(FIEltwiseMult, self).forward(*input)
@staticmethod
def from_pytorch_impl(fi, name, eltwisemult: dist.EltwiseMult):
return FIEltwiseMult(fi, name, eltwisemult.inplace)
def __repr__(self):
return "%s(name=%s, inplace=%s, id=%d)" % (
self.__class__.__name__,
self.name,
str(self.inplace),
self.id)
#TODO: Check for fault injection on Y value
class FIEltwiseDiv(dist.EltwiseDiv):
def __init__(self, fi, name, inplace=False):
super(FIEltwiseDiv, self).__init__(inplace)
self.fi = fi
self.name = name
self.id = fi.addNewLayer(name, FIEltwiseDiv)
def forward(self, x: torch.Tensor, y):
if self.fi.injectionMode and self.id == self.fi.injectionLayer:
xFI = x.clone()
indices, faulty_val = self.fi.inject(xFI.data)
xFI.data[tuple(indices)] = faulty_val
if self.inplace:
return xFI.div_(y)
return xFI.div(y)
else:
return super(FIEltwiseDiv, self).forward(x, y)
@staticmethod
def from_pytorch_impl(fi, name, eltwisediv: dist.EltwiseDiv):
return FIEltwiseDiv(fi, name, eltwisediv.inplace)
def __repr__(self):
return "%s(name=%s, inplace=%s, id=%d)" % (
self.__class__.__name__,
self.name,
str(self.inplace),
self.id)
| 1.9375 | 2 |
clio/dummy_module.py | nestauk/nesta-flask | 0 | 12772139 | class dummy_model:
def query(self, q):
return "Query had length {}".format(len(q))
| 2.703125 | 3 |
protonfixes/gamefixes/280200.py | Citiroller/protonfixes | 213 | 12772140 | <reponame>Citiroller/protonfixes<filename>protonfixes/gamefixes/280200.py
""" Game fix for Eterium
"""
#pylint: disable=C0103
from protonfixes import util
def main():
""" Install xna40
"""
util.protontricks('xna40')
| 1.242188 | 1 |
Chapter39.Decorators/3-calltracer_descr-for-method-1.py | mindnhand/Learning-Python-5th | 0 | 12772141 | #!/usr/bin/env python3
#encoding=utf-8
#--------------------------------------------
# Usage: python3 3-calltracer_descr-for-method.py
# Description: make descriptor class as decorator to decorate class method
#--------------------------------------------
class Tracer: # a decorator + descriptor
def __init__(self, func): # on @ decorator
print('in property descriptor __init__')
self.calls = 0
self.func = func
def __call__(self, *args, **kwargs): # on call to original func
print('in property descriptor __call__')
self.calls += 1
print('call %s to %s' % (self.calls, self.func.__name__))
return self.func(*args, **kwargs)
def __get__(self, instance, owner): # on method attribute fetch
print('in property descriptor __get__')
def wrapper(*args, **kwargs): # retain state information in both instance
print('in enclosing method wrapper')
return self(instance, *args, **kwargs) # runs __call__
return wrapper
# apply to normal function
@Tracer
def spam(a, b, c):
print('in original function spam')
print('<', a + b + c, '>')
@Tracer
def eggs(x, y):
print('in original function eggs')
print('<', x ** y, '>')
# apply to class method
class Person:
def __init__(self, name, pay):
print('in original class Person __init__')
self.name = name
self.pay = pay
@Tracer
def giveRaise(self, percent):
print('in decorated class giveRaise method')
self.pay *= (1.0 + percent)
@Tracer
def lastName(self):
print('in decorated class lastName method')
return self.name.split()[-1]
if __name__ == '__main__':
print('\n\033[1;36mEntrance\033[0m\n')
print('\n\033[1;37mApply to simple function\033[0m\n')
spam(1, 2, 3)
spam(a=4, b=5, c=6)
print('\n\033[1;37mApply to class method\033[0m\n')
bob = Person('<NAME>', 50000)
sue = Person('<NAME>', 100000)
print('<', bob.name, sue.name, '>')
sue.giveRaise(.10)
print(int(sue.pay))
print('<', bob.lastName(), sue.lastName(), '>')
'''
Execution results:
Chapter39.Decorators]# python3 3-calltracer_descr-for-method-1.py
in property descriptor __init__
in property descriptor __init__
in property descriptor __init__
in property descriptor __init__
[1;36mEntrance[0m
[1;37mApply to simple function[0m
in property descriptor __call__
call 1 to spam
in original function spam
< 6 >
in property descriptor __call__
call 2 to spam
in original function spam
< 15 >
[1;37mApply to class method[0m
in original class Person __init__
in original class Person __init__
< <NAME> >
in property descriptor __get__
in enclosing method wrapper
in property descriptor __call__
call 1 to giveRaise
in decorated class giveRaise method
110000
in property descriptor __get__
in enclosing method wrapper
in property descriptor __call__
call 1 to lastName
in decorated class lastName method
in property descriptor __get__
in enclosing method wrapper
in property descriptor __call__
call 2 to lastName
in decorated class lastName method
< <NAME> >
'''
| 3.421875 | 3 |
src/tests/datastructures/test_mergefindsets.py | DavidLlorens/algoritmia | 6 | 12772142 | <reponame>DavidLlorens/algoritmia
#coding: latin1
import unittest
from algoritmia.datastructures.mergefindsets import (NaiveMergeFindSet, RankUnionMFset,
PathCompressionMFset, MergeFindSet)
from algoritmia.datastructures.maps import IntKeyMap
class TestNaiveMFset(unittest.TestCase):
def setUp(self):
self.mf1 = NaiveMergeFindSet()
self.mf2 = NaiveMergeFindSet(((i,) for i in range(10)), createMap=lambda nodes: IntKeyMap(capacity=max(nodes)+1))
def test_mfsets(self):
for i in range(10):
self.mf1.add(i)
for i in range(10):
self.assertEqual(self.mf1.find(i), i)
self.assertEqual(self.mf2.find(i), i)
for i in range(0, 10, 2):
self.mf1.merge(i, i+1)
self.mf2.merge(i, i+1)
for i in range(0, 10, 2):
self.assertEqual(self.mf1.find(i), self.mf1.find(i+1))
self.assertEqual(self.mf2.find(i), self.mf2.find(i+1))
for i in range(0, 10-3, 4):
self.mf1.merge(i, i+3)
self.mf2.merge(i, i+3)
for i in range(0, 10-4, 4):
self.assertEqual(self.mf1.find(i), self.mf1.find(i+1))
self.assertEqual(self.mf1.find(i), self.mf1.find(i+2))
self.assertEqual(self.mf1.find(i), self.mf1.find(i+3))
self.assertEqual(self.mf2.find(i), self.mf2.find(i+1))
self.assertEqual(self.mf2.find(i), self.mf2.find(i+2))
self.assertEqual(self.mf2.find(i), self.mf2.find(i+3))
def test_repr(self):
aux = dict((i, set()) for i in range(10))
for i in range(10):
aux[self.mf2.find(i)].add(i)
all = set(frozenset(v) for v in aux.values())
mf2 = eval(repr(self.mf2))
aux = dict((i, set()) for i in range(10))
for i in range(10):
aux[self.mf2.find(i)].add(i)
all2 = set(frozenset(v) for v in aux.values())
self.assertEqual(all, all2)
class TestRankUnionMFset(unittest.TestCase):
def setUp(self):
self.mf1 = RankUnionMFset()
self.mf2 = RankUnionMFset(((i,) for i in range(10)), createMap=lambda nodes: IntKeyMap(capacity=max(nodes)+1))
def test_mfsets(self):
for i in range(10):
self.mf1.add(i)
for i in range(10):
self.assertEqual(self.mf1.find(i), i)
self.assertEqual(self.mf2.find(i), i)
for i in range(0, 10, 2):
self.mf1.merge(i, i+1)
self.mf2.merge(i, i+1)
for i in range(0, 10, 2):
self.assertEqual(self.mf1.find(i), self.mf1.find(i+1))
self.assertEqual(self.mf2.find(i), self.mf2.find(i+1))
for i in range(0, 10-3, 4):
self.mf1.merge(i, i+3)
self.mf2.merge(i, i+3)
for i in range(0, 10-4, 4):
self.assertEqual(self.mf1.find(i), self.mf1.find(i+1))
self.assertEqual(self.mf1.find(i), self.mf1.find(i+2))
self.assertEqual(self.mf1.find(i), self.mf1.find(i+3))
self.assertEqual(self.mf2.find(i), self.mf2.find(i+1))
self.assertEqual(self.mf2.find(i), self.mf2.find(i+2))
self.assertEqual(self.mf2.find(i), self.mf2.find(i+3))
def test_repr(self):
aux = dict((i, set()) for i in range(10))
for i in range(10):
aux[self.mf2.find(i)].add(i)
all = set(frozenset(v) for v in aux.values())
mf2 = eval(repr(self.mf2))
aux = dict((i, set()) for i in range(10))
for i in range(10):
aux[self.mf2.find(i)].add(i)
all2 = set(frozenset(v) for v in aux.values())
self.assertEqual(all, all2)
class TestPathCompressionMFset(unittest.TestCase):
def setUp(self):
self.mf1 = PathCompressionMFset()
self.mf2 = PathCompressionMFset(((i,) for i in range(10)), createMap=lambda nodes: IntKeyMap(capacity=max(nodes)+1))
def test_mfsets(self):
for i in range(10):
self.mf1.add(i)
for i in range(10):
self.assertEqual(self.mf1.find(i), i)
self.assertEqual(self.mf2.find(i), i)
for i in range(0, 10, 2):
self.mf1.merge(i, i+1)
self.mf2.merge(i, i+1)
for i in range(0, 10, 2):
self.assertEqual(self.mf1.find(i), self.mf1.find(i+1))
self.assertEqual(self.mf2.find(i), self.mf2.find(i+1))
for i in range(0, 10-3, 4):
self.mf1.merge(i, i+3)
self.mf2.merge(i, i+3)
for i in range(0, 10-4, 4):
self.assertEqual(self.mf1.find(i), self.mf1.find(i+1))
self.assertEqual(self.mf1.find(i), self.mf1.find(i+2))
self.assertEqual(self.mf1.find(i), self.mf1.find(i+3))
self.assertEqual(self.mf2.find(i), self.mf2.find(i+1))
self.assertEqual(self.mf2.find(i), self.mf2.find(i+2))
self.assertEqual(self.mf2.find(i), self.mf2.find(i+3))
def test_repr(self):
aux = dict((i, set()) for i in range(10))
for i in range(10):
aux[self.mf2.find(i)].add(i)
all = set(frozenset(v) for v in aux.values())
mf2 = eval(repr(self.mf2))
aux = dict((i, set()) for i in range(10))
for i in range(10):
aux[self.mf2.find(i)].add(i)
all2 = set(frozenset(v) for v in aux.values())
self.assertEqual(all, all2)
class TestMFset(unittest.TestCase):
def setUp(self):
self.mf1 = MergeFindSet()
self.mf2 = MergeFindSet(((i,) for i in range(10)), createMap=lambda nodes: IntKeyMap(capacity=max(nodes)+1))
def test_mfsets(self):
for i in range(10):
self.mf1.add(i)
for i in range(10):
self.assertEqual(self.mf1.find(i), i)
self.assertEqual(self.mf2.find(i), i)
for i in range(0, 10, 2):
self.mf1.merge(i, i+1)
self.mf2.merge(i, i+1)
for i in range(0, 10, 2):
self.assertEqual(self.mf1.find(i), self.mf1.find(i+1))
self.assertEqual(self.mf2.find(i), self.mf2.find(i+1))
for i in range(0, 10-3, 4):
self.mf1.merge(i, i+3)
self.mf2.merge(i, i+3)
for i in range(0, 10-4, 4):
self.assertEqual(self.mf1.find(i), self.mf1.find(i+1))
self.assertEqual(self.mf1.find(i), self.mf1.find(i+2))
self.assertEqual(self.mf1.find(i), self.mf1.find(i+3))
self.assertEqual(self.mf2.find(i), self.mf2.find(i+1))
self.assertEqual(self.mf2.find(i), self.mf2.find(i+2))
self.assertEqual(self.mf2.find(i), self.mf2.find(i+3))
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main() | 2.828125 | 3 |
examples/get_screenshot.py | Alsan/pySphere | 0 | 12772143 | #!/usr/bin/env python
from pysphere import VIServer
server = VIServer()
server.connect("my.esx.host.example.org", "username", "secret")
vm = server.get_vm_by_path("[datastore] path/to/file.vmx")
vm.wait_for_tools()
vm.login_in_guest("Administrator", "secret")
vm.get_screenshot("vm_screenshot.png", overwrite=True)
server.disconnect()
| 2.203125 | 2 |
to_nwb/mworks/data.py | mpompolas/to_nwb | 1 | 12772144 | import os
import shutil
from ._mworks import ReservedEventCode, _MWKFile, _MWKStream
class IndexingException(IOError):
pass
class MWKFile(_MWKFile):
def __init__(self, file_name):
super(MWKFile, self).__init__(file_name)
self._codec = None
self._reverse_codec = None
def close(self):
super(MWKFile, self).close()
self._codec = None
self._reverse_codec = None
def __enter__(self):
self.open()
return self
def __exit__(self, type, value, tb):
self.close()
@property
def exists(self):
return os.path.exists(self.file)
def _prepare_events_iter(self, codes=(), time_range=(None, None)):
if not codes:
codes = []
else:
reverse_codec = self.reverse_codec
codes = [reverse_codec.get(c, c) for c in codes]
min_time, max_time = time_range
if min_time is None:
min_time = self.minimum_time
if max_time is None:
max_time = self.maximum_time
self._select_events(codes, min_time, max_time)
def get_events_iter(self, **kwargs):
self._prepare_events_iter(**kwargs)
while True:
evt = self._get_next_event()
if evt.empty:
break
yield evt
def get_events(self, **kwargs):
self._prepare_events_iter(**kwargs)
return self._get_events()
@property
def codec(self):
if self._codec is not None:
return self._codec
self._select_events([ReservedEventCode.RESERVED_CODEC_CODE],
self.minimum_time,
self.maximum_time)
e = self._get_next_event()
if e.empty:
self._codec = {}
return self._codec
raw_codec = e.value
codec = dict((key, raw_codec[key]["tagname"]) for key in raw_codec)
self._codec = codec
return codec
@property
def reverse_codec(self):
if self._reverse_codec is not None:
return self._reverse_codec
rc = dict((v, k) for k, v in self.codec.items())
self._reverse_codec = rc
return rc
def reindex(self):
self.close()
self.unindex()
self.open()
# erases all contents in the directory except the original mwk file.
def _empty_dir(self): # original DDC's unindex().
if(os.path.isdir(self.file)):
split_file_name = os.path.split(self.file)
file_name = split_file_name[-1:][0]
parent_path = os.pathsep.join(split_file_name[0:-1])
true_mwk_file = os.path.join(self.file, file_name)
#print "parent_path: ", parent_path
#print "file_name: ", file_name
#print "true_mwk_file; ", true_mwk_file
aside_path = os.path.join(parent_path, file_name + ".aside")
os.rename(self.file, aside_path)
#print "rename %s to %s" % ( self.file, aside_path)
os.rename(os.path.join(aside_path, file_name), os.path.join(parent_path,file_name) )
#print "rename %s to %s" % ( os.path.join(aside_path, file_name), os.path.join(parent_path,file_name) )
shutil.rmtree(aside_path, True) # del tree ignoring errors
#print "remove %s" % aside_path
else:
raise IndexingException("Attempt to re-index a file that has not yet been indexed")
def unindex(self, empty_dir=True):
if empty_dir: # erase all files except .mwk
self._empty_dir()
return True
if not os.path.isdir(self.file): return False
# only erase the .idx file
file_name = os.path.basename(self.file)
idx_file = os.path.join(self.file, file_name + '.idx')
if os.path.isfile(idx_file):
os.remove(idx_file)
return True
else:
return False
class MWKStream(_MWKStream):
@classmethod
def _create_file(cls, filename):
super(MWKStream, cls)._create_file(filename)
return cls.open_file(filename, _writable=True)
@classmethod
def open_file(cls, filename, _writable=False):
uri = ('ldobinary:file%s://%s' %
(('' if _writable else '_readonly'), filename))
stream = cls(uri)
stream.open()
return stream
def __enter__(self):
return self
def __exit__(self, type, value, tb):
self.close()
def __iter__(self):
while True:
try:
yield self._read_event()
except EOFError:
break
def read_event(self):
try:
return self._read_event()
except EOFError:
pass
| 2.390625 | 2 |
modules/currency_token/mint.py | Arpan-206/Web3-CLI | 4 | 12772145 | # Importing modules
from decimal import Decimal
from PyInquirer import prompt
from termcolor import colored
from .get import get
# The function to mint tokens
def mint(currency_module) -> None:
mint_data = prompt([
{
'type': 'input',
'name': 'amount',
'message': 'Enter the amount of tokens to mint',
'default': "1",
},
{
'type': 'confirm',
'name': 'confirmation',
'message': 'Do you want to mint the selected tokens?',
'default': False,
},
])
amount = Decimal(mint_data['amount']) * \
(10 ** get(currency_module)['decimals'])
if mint_data['confirmation']:
try:
currency_module.mint(int(amount))
print(colored('Tokens minted successfully!', 'green'))
except Exception as e:
print(colored('Tokens could not be minted. \n' + e, 'red'))
else:
print(colored('Tokens not minted!', 'blue'))
| 3.328125 | 3 |
gSed/WordSplitter.py | elfgirl/gSed | 0 | 12772146 | import re
import wordMaps
class WordSplitter:
def __init__(self):
self.word_mapper = wordMaps.WordMaps()
self.word_breaker = re.compile(r"([^\W\d]*)", re.MULTILINE)
self.sentence_breaker = re.compile(r"((?!=|\!|\.|\?).)+.\b", re.MULTILINE)
self.swaps = {}
self.contexts = {}
def swap(self, text_block=""):
new_text_block = []
sentence_end_position = 0
for sentence_match in self.sentence_breaker.finditer(text_block):
# Run through word based swaps first
new_sentence = []
sentence_span = sentence_match.span()
# Collect Punctuation and whitespace till next sentence
if sentence_span[0] != sentence_end_position:
new_text_block.append(text_block[sentence_end_position:sentence_span[0]])
# sentence_end_position = sentence_span[0]
source_sentence = text_block[sentence_span[0]:sentence_span[1]]
last_word_position = 0
for word_match in self.word_breaker.finditer(source_sentence):
word_span = word_match.span()
if word_span[0] == word_span[1]:
# blank match. TODO improve regex
continue
# Collect Whitespace till next match
if word_span[0] != last_word_position:
new_sentence.append(source_sentence[last_word_position:word_span[0]])
source_word = source_sentence[word_span[0]:word_span[1]]
word = self.word_mapper.swap(source_word)
new_sentence.append(word)
last_word_position = word_span[1]
if word != source_word:
self.swaps[source_word] = word
if source_sentence not in self.contexts:
self.contexts[source_sentence] = []
self.contexts[source_sentence].append({'source': source_word, 'flip': word, })
new_text_block.append("".join(new_sentence))
sentence_end_position = sentence_span[1]
# Collect final punctuations
if sentence_end_position != len(text_block):
new_text_block.append(text_block[sentence_end_position:])
return "".join(new_text_block)
| 2.828125 | 3 |
cloud_init_gen/part_type.py | sammck/cloud-init-gen | 1 | 12772147 | #
# Copyright (c) 2022 <NAME>
#
# MIT License - See LICENSE file accompanying this package.
#
"""Mapping between cloud-init mime types and '#' comment header conventions
"""
from typing import Optional, List, Dict
class CloudInitPartType:
"""
A descriptor that correlates a MIME type with it's associated cloud-init comment
header line; e.g., "Content-Type: text/cloud-config" with "#cloud-config". This
is used by the renderer to pick the optimal rendering of the part.
"""
mime_type: str
"""The full MIME type; e.g., 'text/cloud-boothook'"""
mime_subtype: str
comment_tag: Optional[str]=None
"""The portion of comment_line after '#'. For '#!', this is just '!', and does not include the
script commandline. If None, there is no comment header associated with the MIME type."""
comment_line: Optional[str]=None
"""The portion of the comment header that identifies its MIME type. For '#!', this is just '!#', and does not include the
script commandline. If None, there is no comment header associated with the MIME type."""
def __init__(self, mime_subtype: str, comment_tag: Optional[str]=None):
"""Construct a descriptor mapping a MIME type to a comment tag
Args:
mime_subtype (str): The MIME type without the leading "text/"
comment_tag (Optional[str], optional):
The comment tag without the leading "#", or None
if there is no comment header associated with the
MIME type. For shebang types, this is just "!".
Defaults to None.
"""
self.mime_subtype = mime_subtype
self.mime_type = 'text/' + mime_subtype
self.comment_tag = comment_tag
self.comment_line = None if comment_tag is None else '#' + comment_tag
_part_type_list: List[CloudInitPartType] = [
CloudInitPartType('cloud-boothook', 'boothook'), # A script with a shebang header
CloudInitPartType('cloud-config', 'cloud-config'), # A YAML doc with rich config data
CloudInitPartType('cloud-config-archive', 'cloud-config-archive'), # a YAML doc that contains a list of docs, like multipart mime
CloudInitPartType('cloud-config-jsonp', 'cloud-config-jsonp'), # fine-grained merging with vendor-provided cloud-config
CloudInitPartType('jinja2', "# template: jinja"), # expand jinja2 template. 2nd line is comment describing actual part type
CloudInitPartType('part-handler', 'part-handler'), # part contains python code that can process custom mime types for subsequent parts
CloudInitPartType('upstart-job', 'upstart-job'), # content plated into a file under /etc/init, to be consumed by upstart
CloudInitPartType('x-include-once-url', 'include-once'), # List of urls that are read one at a time and processed as any item, but only once
CloudInitPartType('x-include-url', 'include'), # list of urls that are read one at a time and processed as any item
CloudInitPartType('x-shellscript', '!'), # simple userdata shell script (comment line has variable chars)
CloudInitPartType('x-shellscript-per-boot'), # shell script run on every boot
CloudInitPartType('x-shellscript-per-instance'), # shell script run once per unique instance
CloudInitPartType('x-shellscript-per-once'), # shell script run only once
]
"""A list of MIME types that are pre-known to cloud-init"""
mime_to_cloud_init_part_type: Dict[str, CloudInitPartType] = dict((x.mime_type, x) for x in _part_type_list)
"""A map from full MIME type to associated CloudInitPartType"""
comment_to_cloud_init_part_type: Dict[str, CloudInitPartType] = dict((x.comment_line, x) for x in _part_type_list if not x.comment_line is None)
"""A map from comment header line (Just "#!" for shebang lines) to associated CloudInitPartType"""
| 2.46875 | 2 |
poptox/leslie_probit/leslie_probit_parameters.py | quanted/poptox | 1 | 12772148 | <filename>poptox/leslie_probit/leslie_probit_parameters.py
#*********************************************************#
# @@ScriptName: leslie_probit_parameters.py
# @@Author: <NAME>
# @@Create Date: 2013-09-23
# @@Modify Date: 2013-09-24
#*********************************************************#
import os
os.environ['DJANGO_SETTINGS_MODULE']='settings'
from django import forms
from django.db import models
from django.utils.safestring import mark_safe
app_target_choices=(('Short Grass','Short Grass'),('Tall Grass','Tall Grass'))
S_select =(('','Please choose'), ('2','2'),('3','3'),('4','4'),('5','5'),('6','6'),('7','7'))
class leslie_probit_chem(forms.Form):
animal_name = forms.CharField(widget=forms.Textarea (attrs={'cols': 17, 'rows': 2}), initial='C. dubia')
chemical_name = forms.CharField(widget=forms.Textarea (attrs={'cols': 17, 'rows': 2}), initial='Spinosad')
app_target = forms.ChoiceField(required=True, choices=app_target_choices, initial='Short Grass')
ai = forms.FloatField(required=True, label='% a.i. (%)', initial=41.9)
hl = forms.FloatField(required=True, label='Chemical half life (days)',initial=2)
sol = forms.FloatField(required=True, label=mark_safe('Solubility (in water @25°C; mg/L)'), initial=70)
t = forms.FloatField(required=True, label='Simulation durations (days)',initial=10)
class leslie_probit_dose(forms.Form):
b = forms.FloatField(required=True, label=mark_safe('Probit dose response slope (b)'), initial=4.5)
test_species = forms.CharField(widget=forms.Textarea (attrs={'cols': 17, 'rows': 1}), label='Tested animal', initial='Quail')
ld50_test = forms.FloatField(required=True, label=mark_safe('LD<sub>50</sub> of tested animal (mg/kg-bw)'),initial=783)
bw_test=forms.FloatField(required=True, label='Body weight of the tested animal (g)', initial=178)
ass_species = forms.CharField(widget=forms.Textarea (attrs={'cols': 17, 'rows': 1}), label='Assessed animal', initial='Turkey')
bw_ass = forms.FloatField(required=True, label='Body weight of assessed animal (g)', initial=20)
mineau_scaling_factor = forms.FloatField(required=True, label='Mineau scaling factor', initial=1.15)
class leslie_probit_popu(forms.Form):
c = forms.FloatField(required=True,label=mark_safe('Intensity of the density dependence (γ)'),initial=0.00548)
s = forms.ChoiceField(required=True,choices=S_select, label='Number of age class', initial='Please choose')
| 2.0625 | 2 |
python/problem-088.py | mbuhot/mbuhot-euler-solutions | 1 | 12772149 | #! /usr/bin/env python3
import prime
from memo import memoize
description = '''
Next
Product-sum numbers
Problem 88
A natural number, N, that can be written as the sum and product of a given set of at least two natural numbers, {a1, a2, ... , ak} is called a product-sum number: N = a1 + a2 + ... + ak = a1 × a2 × ... × ak.
For example, 6 = 1 + 2 + 3 = 1 × 2 × 3.
For a given set of size, k, we shall call the smallest N with this property a minimal product-sum number. The minimal product-sum numbers for sets of size, k = 2, 3, 4, 5, and 6 are as follows.
k=2: 4 = 2 × 2 = 2 + 2
k=3: 6 = 1 × 2 × 3 = 1 + 2 + 3
k=4: 8 = 1 × 1 × 2 × 4 = 1 + 1 + 2 + 4
k=5: 8 = 1 × 1 × 2 × 2 × 2 = 1 + 1 + 2 + 2 + 2
k=6: 12 = 1 × 1 × 1 × 1 × 2 × 6 = 1 + 1 + 1 + 1 + 2 + 6
Hence for 2≤k≤6, the sum of all the minimal product-sum numbers is 4+6+8+12 = 30; note that 8 is only counted once in the sum.
In fact, as the complete set of minimal product-sum numbers for 2≤k≤12 is {4, 6, 8, 12, 15, 16}, the sum is 61.
What is the sum of all the minimal product-sum numbers for 2≤k≤12000?
'''
def firstFactor(n):
for p in prime.primes():
if n % p == 0: return p
def replaceTupleElement(tup, idx, val):
return tuple((x if j != idx else val) for (j,x) in enumerate(tup))
@memoize
def factorizations(n):
# inner generator, yields factorizations, but some may be duplicates
def facgen(n):
# base case: prime or 1, only 1 factorization
if n == 1 or prime.isPrime(n):
yield (n,)
return
# recursive case: peel off the first factor of n,
# then combine with all factorizations of n/factor
# Use memoization to avoid repeated factorization of previously seen numbers
factor = firstFactor(n)
for subfac in factorizations(n // factor):
# combine by appending
yield (factor,) + subfac
# combine by multiplying each element of subfactor by the first factor
for i in range(0, len(subfac)):
yield replaceTupleElement(subfac, i, subfac[i] * factor)
# outer function collects all generated factorizations, returns the unique set
return set(tuple(sorted(fac)) for fac in facgen(n))
def sumMinProductSums(maxK):
# the minimum N that is a product-sum number for set size K
minimums = [0] * (maxK + 1)
# min product-sum number for K cannot be greater than 2K
for n in range(2, 2*maxK):
# Test each factorization of n, for potential sum-product solutions
for factorization in factorizations(n):
# sum(factorization) = product(factorization), use 1's to pad the sum until both sides equal
# sum(factorization) + padding1s = n
# => padding1s = n - sum(factorization)
# => k = len(factorization) + padding1s
# = len(factorization) + n - sum(factorization)
k = len(factorization) + n - sum(factorization)
# test and update the minimum
if k <= 1 or k >= len(minimums): continue
if minimums[k] == 0 or n < minimums[k]:
minimums[k] = n
# sum the unique n's that were minimum product-sum numbers
return sum(set(minimums))
print('sum:', sumMinProductSums(12000))
| 4.21875 | 4 |
napari_pyclesperanto_assistant/_statistics_of_labeled_pixels.py | jo-mueller/napari_pyclesperanto_assistant | 16 | 12772150 | import warnings
import numpy as np
from magicgui.widgets import Table
from napari_plugin_engine import napari_hook_implementation
from napari.types import ImageData, LabelsData, LayerDataTuple
from napari import Viewer
from pandas import DataFrame
from qtpy.QtCore import QTimer
from qtpy.QtWidgets import QTableWidget, QTableWidgetItem, QWidget, QGridLayout, QPushButton, QFileDialog
import pyclesperanto_prototype as cle
import napari
from napari_tools_menu import register_function
@register_function(menu="Measurement > Statistics of labeled pixels (clEsperanto)")
def statistics_of_labeled_pixels(image: ImageData, labels_layer: napari.layers.Labels, napari_viewer : Viewer, measure_background=False):
"""
Adds a table widget to a given napari viewer with quantitative analysis results derived from an image-labelimage pair.
"""
labels = labels_layer.data
if image is not None and labels is not None:
# quantitative analysis using clEsperanto's statistics_of_labelled_pixels
if measure_background:
table = cle.statistics_of_background_and_labelled_pixels(image, labels)
else:
table = cle.statistics_of_labelled_pixels(image, labels)
# Store results in the properties dictionary:
labels_layer.properties = table
# turn table into a widget
from napari_skimage_regionprops import add_table
add_table(labels_layer, napari_viewer)
else:
warnings.warn("Image and labels must be set.")
| 2.078125 | 2 |