max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
|---|---|---|---|---|---|---|
course_api/migrations/0017_auto_20180404_2343.py
|
dragonbone81/bobcat-courses-backend
| 3
|
12783151
|
<gh_stars>1-10
# Generated by Django 2.0.3 on 2018-04-05 06:43
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('course_api', '0016_subjectcourse_course_description'),
]
operations = [
migrations.RenameField(
model_name='course',
old_name='discussion_crn',
new_name='attached_crn',
),
]
| 1.648438
| 2
|
index_documents.py
|
nilleb/adb-expertise-locator
| 0
|
12783152
|
<filename>index_documents.py
import logging
import os
import sys
import re
from backends.es import IndexingException, create_indexer
from common.constants import SETS_FILEPATH
from common.io import read_object, write_object
from configuration import LIMIT
from regex_authors import is_author_page
import nltk
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
logging.basicConfig(level=logging.INFO)
import urllib3
urllib3.disable_warnings()
DOCUMENTS_SOURCE_FORMAT = "data/output/{what}.json"
idx = create_indexer()
stop_words = set(stopwords.words("english"))
def exclude_stop_words(text):
word_tokens = word_tokenize(text)
filtered_sentence = [w for w in word_tokens if not w.lower() in stop_words]
return " ".join(filtered_sentence)
def lemmatize(text):
lemma = nltk.wordnet.WordNetLemmatizer()
word_tokens = word_tokenize(text)
return " ".join([lemma.lemmatize(w) for w in word_tokens])
def squeeze(text):
return re.sub("\s+", " ", text)
def shorten(texts):
residual_len = LIMIT - 1
for text in texts:
text = exclude_stop_words(squeeze(text))
logging.info(len(text))
yield text[:residual_len]
residual_len -= len(text) + 1
if residual_len < 0:
return
def pages_after_author_page(path, doc):
begin_yield = False
for page in doc.get("pages"):
if begin_yield:
yield page
if not begin_yield and is_author_page(path, page):
begin_yield = True
def prepare_texts(paths):
for path in paths:
doc = read_object(f"{path}.metadata.json")
yield " ".join(pages_after_author_page(path, doc))
def prepare_keywords(keywords):
for keyword, count in keywords.items():
yield {"keyword": keyword, "count": count}
def load_report_names():
sets = read_object(SETS_FILEPATH)
return {os.path.basename(path): path for path in sets.get("reports")}
REPORT_NAMES = load_report_names()
def get_filenames(links):
filenames = [os.path.basename(document) for document in links]
filenames = list(set(filenames))
return filenames
def get_links(filenames):
def short_id(filename):
return filename.split("-")[0]
def get_report(filename):
return REPORT_NAMES.get(
filename,
f"https://www.adb.org/sites/default/files/project-documents/{short_id(filename)}/{filename}",
)
return [get_report(filename) for filename in filenames]
def index_single_document(document):
es_document = dict(document)
filenames = get_filenames(es_document["links"])
es_document["texts"] = list(prepare_texts(es_document["links"]))
es_document["links"] = get_links(filenames)
es_document["texts_cut"] = list(shorten(es_document["texts"]))
es_document["keywords"] = list(prepare_keywords(es_document.get("keywords", {})))
idx.index_single_document(es_document)
REMEMBER_PATH = "data/intermediate/indexed_documents.json"
try:
INDEXED_DOCUMENTS = set(read_object(REMEMBER_PATH))
except:
INDEXED_DOCUMENTS = set()
def should_process(document):
return document and document["id"] not in INDEXED_DOCUMENTS
def remember(document):
INDEXED_DOCUMENTS.add(document["id"])
write_object(list(INDEXED_DOCUMENTS), REMEMBER_PATH)
REMEMBER_ERRORS_PATH = "data/intermediate/failed_indexing_documents.json"
try:
FAILED_DOCUMENTS = set(read_object(REMEMBER_ERRORS_PATH))
except:
FAILED_DOCUMENTS = set()
def remember_error(document):
FAILED_DOCUMENTS.add(document["id"])
write_object(list(FAILED_DOCUMENTS), REMEMBER_ERRORS_PATH)
def index_authors_documents(what):
idx.setup_index()
author_documents = read_object(DOCUMENTS_SOURCE_FORMAT.format(what=what))
logging.info(f"{len(author_documents)} total documents loaded")
for count, document in enumerate(author_documents.values()):
if should_process(document):
try:
index_single_document(document)
remember(document)
except IndexingException:
remember_error(document)
logging.info(f"{count + 1} documents indexed.")
if __name__ == "__main__":
what = sys.argv[-1] if len(sys.argv) == 2 else sys.argv[-2]
who = sys.argv[-1] if len(sys.argv) == 3 else None
print(what)
if not who and what in (
"regex-authors",
"regex-authors-merged",
"stanford_ner",
"special-guests",
):
logging.info(f"indexing all {what} documents")
index_authors_documents(what)
else:
author_documents = read_object(DOCUMENTS_SOURCE_FORMAT.format(what=what))
document = author_documents.get(who)
if document:
logging.info(f"indexing {what}/{who}")
index_single_document(document)
else:
logging.error(f"not found: {what}/{who}")
| 2.453125
| 2
|
src/tests/sources/test_data_loader.py
|
zaxmks/demo-data-compliance-service
| 0
|
12783153
|
import pytest
import pandas as pd
from src.clients.s3_client import S3Client
from src.sources.data_loader import DataLoader
def test_init():
dl = DataLoader("test_source", "test_client")
assert dl.data_source == "test_source"
assert dl.client == "test_client"
# def test_load_when_database_client():
# db_client = DatabaseClient(temp=True)
# db_client.connect()
# db_client.execute("CREATE TABLE test_table (name TEXT)")
# db_client.execute("INSERT INTO test_table(name) VALUES ('test_name')")
# dl = DataLoader("test_table", client=db_client)
# data, structured, name = dl.load()
# assert isinstance(data, pd.DataFrame)
# assert data.columns == "name"
# assert data.values == ["test_name"]
# assert structured
# assert name == "test_table"
#
#
# def test_load_when_s3_file_specified():
# with pytest.raises(NotImplementedError):
# dl = DataLoader("test", S3Client())
# dl.load()
def test_load_when_csv_file_specified():
dl = DataLoader("src/tests/test_data/sample/names.csv", client=None)
data, structured, name = dl.load()
assert isinstance(data, pd.DataFrame)
assert data.columns == ["name"]
assert len(data.values) == 250
assert structured
assert name == "src/tests/test_data/sample/names.csv"
def test_load_when_dataframe_specified():
test_df = pd.read_csv("src/tests/test_data/sample/names.csv")
dl = DataLoader(test_df, client=None)
data, structured, name = dl.load()
assert isinstance(data, pd.DataFrame)
assert data.columns == ["name"]
assert len(data.values) == 250
assert structured
assert name == "pandas DataFrame (hash 5214317343533855748)"
def test_load_when_txt_file_specified():
dl = DataLoader("src/tests/test_data/sample/email.txt", client=None)
data, structured, name = dl.load()
assert isinstance(data, str)
assert data.startswith("Dear Mr. Connell")
assert not structured
assert name == "src/tests/test_data/sample/email.txt"
def test_load_when_pdf_file_specified():
dl = DataLoader("src/tests/test_data/sample/academic_paper.pdf", client=None)
data, structured, name = dl.load()
assert isinstance(data, str)
assert data.startswith("Enriching Word Vectors")
assert not structured
assert name == "src/tests/test_data/sample/academic_paper.pdf"
def test_load_when_xml_file_specified():
dl = DataLoader("src/tests/test_data/sample/employees.xml", client=None)
data, structured, name = dl.load()
assert isinstance(data, pd.DataFrame)
assert len(data) == 6
assert len(data.columns) == 9
assert structured
assert name == "src/tests/test_data/sample/employees.xml"
def test_load_when_xls_file_specified():
dl = DataLoader("src/tests/test_data/sample/dummy.xls", client=None)
data, structured, name = dl.load()
assert isinstance(data, pd.DataFrame)
assert len(data) == 4
assert len(data.columns) == 3
assert structured
assert name == "src/tests/test_data/sample/dummy.xls"
def test_load_when_xlsx_file_specified():
dl = DataLoader("src/tests/test_data/sample/dummy.xlsx", client=None)
data, structured, name = dl.load()
assert isinstance(data, pd.DataFrame)
assert len(data) == 4
assert len(data.columns) == 3
assert structured
assert name == "src/tests/test_data/sample/dummy.xlsx"
def test_load_when_multi_sheet_xlsx():
dl = DataLoader("src/tests/test_data/sample/dummy_two_sheets.xlsx", client=None)
with pytest.raises(NotImplementedError):
dl.load()
| 2.453125
| 2
|
data/pred.py
|
cedar33/roberta-crf
| 6
|
12783154
|
from fairseq.models.roberta import RobertaModel
label_fn = lambda label: roberta.task.label_dictionary.string(
[label + roberta.task.label_dictionary.nspecial]
)
roberta = RobertaModel.from_pretrained(
model_name_or_path='/path/to/checkpoints',
checkpoint_file='checkpoint_best.pt',
data_name_or_path='/path/to/data'
)
roberta.eval()
tokens = roberta.encode('china is a great country')
pred = roberta.predict_label("kaggle_ner", tokens, return_logits=True)
print(pred[1][0])
print([label_fn(int(p)) for p in pred[1][0].tolist()])
| 2.28125
| 2
|
tests/Eva_auth_test.py
|
Charlesworth/eva_python_sdk
| 14
|
12783155
|
from evasdk import EvaAutoRenewError, EvaError
import time
import pytest
# TODO: this rely on having an actual robot, should be rewritten to be mockable
@pytest.mark.robot_required
class TestAuth:
def test_create_new_session(self, eva):
token = eva.auth_create_session()
assert(len(token) == 36)
assert(token == eva._Eva__http_client.session_token)
def test_invalidate_session(self, eva):
# start a new session, then invalidate it
token = eva.auth_create_session()
eva.auth_invalidate_session()
# this should automatically start a new, different session
eva.users_get()
assert(token != eva._Eva__http_client.session_token)
@pytest.mark.slow
def test_auto_renew_error(self, eva):
api_token = eva._Eva__http_client.api_token
eva._Eva__http_client.api_token = ''
# Ensure it will try to auto-renew
eva.auth_invalidate_session()
time.sleep(3 * 60)
got_auto_renew_error = False
try:
# Won't get a 401, as no session required for this endpoint
eva.api_call_with_auth('GET', '_/init')
except EvaAutoRenewError:
got_auto_renew_error = True
finally:
eva._Eva__http_client.api_token = api_token
assert(got_auto_renew_error)
def test_lock_with_no_existing_session(self, eva):
try:
eva.auth_invalidate_session()
except EvaError:
# could fail if session is already invalidated, so ignore!
pass
with eva.lock():
eva.gpio_set('d1', not eva.gpio_get('d1', 'output'))
@pytest.mark.slow
def test_auto_renew(self, locked_eva):
for _ in range(7):
locked_eva.gpio_set('d1', not locked_eva.gpio_get('d1', 'output'))
time.sleep(5 * 60)
| 2.21875
| 2
|
tools/debugging/matrix/load_with_generate_messages.py
|
luehrsFred/raiden
| 0
|
12783156
|
#!/usr/bin/env python
from gevent import monkey # isort:skip
monkey.patch_all() # isort:skip
import argparse
import os
import time
from dataclasses import dataclass
from typing import Iterator, List
from raiden.utils.nursery import Janitor, Nursery
CWD = os.path.dirname(os.path.abspath(__file__))
GENERATE_MESSAGES_SCRIPT = os.path.join(CWD, "generate_messages.py")
@dataclass
class Config:
logdir: str
sender_matrix_server_url: str
receiver_matrix_server_url: str
target_qty_of_chat_rooms: int
qty_of_new_rooms_per_iteration: int
concurrent_messages_per_room: int
wait_before_next_iteration: float
def batch_size(target: int, step: int) -> Iterator[int]:
iterations = target // step
for _ in range(iterations):
yield step
rest = target % step
if rest:
yield rest
def run(config: Config, nursery: Nursery) -> None:
for i, qty_of_rooms in enumerate(
batch_size(config.target_qty_of_chat_rooms, config.qty_of_new_rooms_per_iteration)
):
log_file = os.path.join(config.logdir, str(i))
script_args: List[str] = [
GENERATE_MESSAGES_SCRIPT,
"--concurrent-messages",
str(config.concurrent_messages_per_room),
"--chat-rooms",
str(qty_of_rooms),
log_file,
config.sender_matrix_server_url,
config.receiver_matrix_server_url,
]
nursery.exec_under_watch(script_args)
time.sleep(config.wait_before_next_iteration)
def main() -> None:
parser = argparse.ArgumentParser()
parser.add_argument("--wait-before-next-iteration", type=int, default=60)
parser.add_argument("target_qty_of_chat_rooms", type=int, default=500)
parser.add_argument("qty_of_new_rooms_per_iteration", type=int, default=10)
parser.add_argument("concurrent_messages_per_room", type=int, default=50)
parser.add_argument("logdir", help="Directory used to save the script logs.")
parser.add_argument("server", help="Matrix server used by the sender user.")
parser.add_argument(
"server2",
help=(
"If provided, the server used by the receiever, otherwise the same "
"server as the sender is used."
),
default=None,
nargs="?",
)
args = parser.parse_args()
logdir = args.logdir
os.makedirs(logdir, exist_ok=True)
sender_matrix_server_url = args.server
receiver_matrix_server_url = args.server2 or args.server
config = Config(
logdir=logdir,
sender_matrix_server_url=sender_matrix_server_url,
receiver_matrix_server_url=receiver_matrix_server_url,
target_qty_of_chat_rooms=args.target_qty_of_chat_rooms,
qty_of_new_rooms_per_iteration=args.qty_of_new_rooms_per_iteration,
concurrent_messages_per_room=args.concurrent_messages_per_room,
wait_before_next_iteration=args.wait_before_next_iteration,
)
with Janitor() as nursery:
nursery.spawn_under_watch(run, config, nursery)
nursery.wait(timeout=None)
if __name__ == "__main__":
main()
| 2.046875
| 2
|
src/discussion/models.py
|
ResearchHub/ResearchHub-Backend-Open
| 18
|
12783157
|
from django.db.models import (
Count,
Q,
F
)
from django.contrib.contenttypes.fields import (
GenericForeignKey,
GenericRelation
)
from django.contrib.contenttypes.models import ContentType
from django.contrib.postgres.fields import JSONField
from django.core.cache import cache
from django.db import models
from paper.utils import get_cache_key
from purchase.models import Purchase
from researchhub.lib import CREATED_LOCATIONS
from .reaction_models import Flag, Vote, Endorsement
HELP_TEXT_WAS_EDITED = (
'True if the comment text was edited after first being created.'
)
HELP_TEXT_IS_PUBLIC = (
'Hides the comment from the public.'
)
HELP_TEXT_IS_REMOVED = (
'Hides the comment because it is not allowed.'
)
class BaseComment(models.Model):
CREATED_LOCATION_PROGRESS = CREATED_LOCATIONS['PROGRESS']
CREATED_LOCATION_CHOICES = [
(CREATED_LOCATION_PROGRESS, 'Progress')
]
created_by = models.ForeignKey(
'user.User',
on_delete=models.SET_NULL,
blank=True,
null=True,
)
created_date = models.DateTimeField(auto_now_add=True, db_index=True)
updated_date = models.DateTimeField(auto_now=True)
created_location = models.CharField(
choices=CREATED_LOCATION_CHOICES,
max_length=255,
default=None,
null=True,
blank=True
)
was_edited = models.BooleanField(
default=False,
help_text=HELP_TEXT_WAS_EDITED
)
is_public = models.BooleanField(
default=True,
help_text=HELP_TEXT_IS_PUBLIC
)
is_removed = models.BooleanField(
default=False,
help_text=HELP_TEXT_IS_REMOVED
)
ip_address = models.GenericIPAddressField(
unpack_ipv4=True,
blank=True,
null=True
)
text = JSONField(blank=True, null=True)
external_metadata = JSONField(null=True)
votes = GenericRelation(
Vote,
object_id_field='object_id',
content_type_field='content_type',
related_query_name='discussion'
)
flags = GenericRelation(Flag)
endorsement = GenericRelation(Endorsement)
plain_text = models.TextField(default='', blank=True)
source = models.CharField(default='researchhub', max_length=32, null=True)
purchases = GenericRelation(
Purchase,
object_id_field='object_id',
content_type_field='content_type',
related_query_name='discussion'
)
contributions = GenericRelation(
'reputation.Contribution',
object_id_field='object_id',
content_type_field='content_type',
related_query_name='discussion'
)
class Meta:
abstract = True
# TODO make this a mixin Actionable or Notifiable
@property
def owners(self):
if self.created_by:
return [self.created_by]
else:
return []
# TODO make this a mixin Actionable or Notifiable
@property
def users_to_notify(self):
parent_owners = self.parent.owners
return parent_owners
@property
def created_by_author_profile_indexing(self):
if self.created_by:
author = self.created_by.author_profile
if author:
return author
return None
@property
def children(self):
return BaseComment.objects.none()
@property
def score_indexing(self):
return self.calculate_score()
def calculate_score(self, ignore_self_vote=False):
if hasattr(self, 'score'):
return self.score
else:
qs = self.votes.filter(
created_by__is_suspended=False,
created_by__probable_spammer=False
)
if ignore_self_vote:
qs = qs.exclude(created_by=F('discussion__created_by'))
score = qs.aggregate(
score=Count(
'id', filter=Q(vote_type=Vote.UPVOTE)
) - Count(
'id', filter=Q(vote_type=Vote.DOWNVOTE)
)
).get('score', 0)
return score
def update_discussion_count(self):
paper = self.paper
if paper:
new_dis_count = paper.get_discussion_count()
paper.calculate_hot_score()
paper.discussion_count = new_dis_count
paper.save(update_fields=['discussion_count'])
cache_key = get_cache_key('paper', paper.id)
cache.delete(cache_key)
for h in paper.hubs.all():
h.discussion_count = h.get_discussion_count()
h.save(update_fields=['discussion_count'])
return new_dis_count
post = self.post
hypothesis = self.hypothesis
instance = post or hypothesis
if instance:
new_dis_count = instance.get_discussion_count()
instance.discussion_count = new_dis_count
instance.save()
return new_dis_count
return 0
def remove_nested(self):
if self.is_removed is False:
self.is_removed = True
self.save(update_fields=['is_removed'])
if len(self.children) > 0:
for c in self.children:
c.remove_nested()
def get_promoted_score(self):
purchases = self.purchases.filter(
paid_status=Purchase.PAID,
)
if purchases.exists():
boost_score = sum(
map(int, purchases.values_list('amount', flat=True))
)
return boost_score
return False
class Thread(BaseComment):
CITATION_COMMENT = 'citation_comment'
INLINE_ABSTRACT = 'inline_abstract'
INLINE_PAPER_BODY = 'inline_paper_body'
RESEARCHHUB = 'researchhub'
THREAD_SOURCE_CHOICES = [
(CITATION_COMMENT, 'Citation Comment'),
(INLINE_ABSTRACT, 'Inline Abstract'),
(INLINE_PAPER_BODY, 'Inline Paper Body'),
(RESEARCHHUB, 'researchhub'),
]
source = models.CharField(
default=RESEARCHHUB,
choices=THREAD_SOURCE_CHOICES,
max_length=32
)
block_key = models.CharField(max_length=255, null=True, blank=True)
context_title = models.TextField(
blank=True,
null=True,
help_text="For inline-comments, indicates what's highlighted"
)
entity_key = models.CharField(max_length=255, null=True, blank=True)
title = models.CharField(
max_length=255,
null=True,
blank=True
)
paper = models.ForeignKey(
'paper.Paper',
on_delete=models.SET_NULL,
related_name='threads',
blank=True,
null=True
)
post = models.ForeignKey(
'researchhub_document.ResearchhubPost',
on_delete=models.SET_NULL,
related_name='threads',
blank=True,
null=True
)
hypothesis = models.ForeignKey(
'hypothesis.Hypothesis',
on_delete=models.SET_NULL,
related_name='threads',
null=True,
blank=True,
)
citation = models.ForeignKey(
'hypothesis.Citation',
on_delete=models.SET_NULL,
related_name='threads',
null=True,
blank=True,
)
actions = GenericRelation(
'user.Action',
object_id_field='object_id',
content_type_field='content_type',
related_query_name='threads'
)
def __str__(self):
return '%s: %s' % (self.created_by, self.title)
@property
def parent(self):
return self.paper
@property
def unified_document(self):
paper = self.paper
if paper:
return paper.unified_document
post = self.post
if post:
return post.unified_document
hypothesis = self.hypothesis
if hypothesis:
return hypothesis.unified_document
citation = self.citation
if citation:
return citation.source
return None
@property
def children(self):
return self.comments.filter(is_removed=False)
@property
def comment_count_indexing(self):
return len(self.comments.filter(is_removed=False))
@property
def paper_indexing(self):
if self.paper is not None:
return self.paper.id
@property
def paper_title_indexing(self):
if self.paper is not None:
return self.paper.title
@property
def owners(self):
if (
self.created_by
and self.created_by.emailrecipient.thread_subscription
and not self.created_by.emailrecipient.thread_subscription.none
):
return [self.created_by]
else:
return []
@property
def users_to_notify(self):
# TODO: Add notifications to posts and hypotheses
if self.post or self.hypothesis or self.citation:
return []
users = list(self.parent.moderators.all())
paper_authors = self.parent.authors.all()
for author in paper_authors:
if (
author.user
and author.user.emailrecipient.paper_subscription.threads
and not author.user.emailrecipient.paper_subscription.none
):
users.append(author.user)
return users
class Reply(BaseComment):
content_type = models.ForeignKey(
ContentType,
on_delete=models.SET_NULL,
blank=True,
null=True
)
object_id = models.PositiveIntegerField()
parent = GenericForeignKey('content_type', 'object_id')
replies = GenericRelation('Reply')
actions = GenericRelation(
'user.Action',
object_id_field='object_id',
content_type_field='content_type',
related_query_name='replies'
)
@property
def paper(self):
comment = self.get_comment_of_reply()
paper = comment.paper
return paper
@property
def post(self):
comment = self.get_comment_of_reply()
if comment:
post = comment.post
return post
@property
def hypothesis(self):
comment = self.get_comment_of_reply()
if comment:
hypothesis = comment.hypothesis
return hypothesis
@property
def thread(self):
comment = self.get_comment_of_reply()
thread = comment.parent
return thread
@property
def unified_document(self):
thread = self.thread
paper = thread.paper
hypothesis = thread.hypothesis
if paper:
return paper.unified_document
post = thread.post
if post:
return post.unified_document
hypothesis = thread.hypothesis
if hypothesis:
return hypothesis.unified_document
return None
@property
def children(self):
return self.replies.filter(is_removed=False)
def get_comment_of_reply(self):
obj = self
while isinstance(obj, Reply):
obj = obj.parent
if isinstance(obj, Comment):
return obj
return None
@property
def owners(self):
return [self.created_by]
@property
def users_to_notify(self):
# TODO: No siblings for now. Do we need this?
# sibling_comment_users = []
# for c in self.parent.children.prefetch_related(
# 'created_by',
# 'created_by__emailrecipient',
# 'created_by__emailrecipient__thread_subscription',
# 'created_by__emailrecipient__comment_subscription'
# ):
# if (
# c != self
# and c.created_by not in sibling_comment_users
# and c.created_by.emailrecipient.thread_subscription
# and c.created_by.emailrecipient.thread_subscription.replies
# and c.created_by.emailrecipient.comment_subscription
# and c.created_by.emailrecipient.comment_subscription.replies
# ):
# sibling_comment_users.append(c.created_by)
# return parent_owners + sibling_comment_users
users = []
p = self.parent
if isinstance(p, Reply):
if (
p.created_by
and p.created_by.emailrecipient.reply_subscription.replies
and not p.created_by.emailrecipient.reply_subscription.none
and not p.created_by == self.created_by
):
users.append(p.created_by)
else:
if (
p.created_by
and p.created_by.emailrecipient.comment_subscription.replies
and not p.created_by.emailrecipient.comment_subscription.none
):
users.append(p.created_by)
return users
class Comment(BaseComment):
parent = models.ForeignKey(
Thread,
on_delete=models.SET_NULL,
related_name='comments',
blank=True,
null=True
)
replies = GenericRelation(Reply)
actions = GenericRelation(
'user.Action',
object_id_field='object_id',
content_type_field='content_type',
related_query_name='comments'
)
def __str__(self):
return '{} - {}'.format(self.created_by, self.plain_text)
@property
def paper(self):
thread = self.parent
if thread:
paper = thread.paper
return paper
@property
def post(self):
thread = self.parent
if thread:
post = thread.post
return post
@property
def hypothesis(self):
thread = self.parent
if thread:
hypothesis = thread.hypothesis
return hypothesis
@property
def unified_document(self):
thread = self.thread
paper = thread.paper
if paper:
return paper.unified_document
post = thread.post
if post:
return post.unified_document
hypothesis = thread.hypothesis
if hypothesis:
return hypothesis.unified_document
return None
@property
def thread(self):
thread = self.parent
return thread
@property
def children(self):
return self.replies.filter(is_removed=False)
@property
def owners(self):
return [self.created_by]
@property
def users_to_notify(self):
users = []
p = self.parent
if (
p.created_by
and p.created_by.emailrecipient.thread_subscription.comments
and not p.created_by.emailrecipient.thread_subscription.none
and not p.created_by == self.created_by
):
users.append(p.created_by)
return users
# TODO: No siblings for now. Do we need this?
# sibling_comment_users = []
# for c in self.parent.children.prefetch_related(
# 'created_by',
# 'created_by__emailrecipient',
# 'created_by__emailrecipient__thread_subscription'
# ):
# if (
# c != self
# and c.created_by not in sibling_comment_users
# and c.created_by.emailrecipient.thread_subscription
# and c.created_by.emailrecipient.thread_subscription.comments
# ):
# sibling_comment_users.append(c.created_by)
# return parent_owners + sibling_comment_users
| 1.820313
| 2
|
vyvyan/validate/__init__.py
|
downneck/vyvyan
| 0
|
12783158
|
<reponame>downneck/vyvyan
# Copyright 2015 WebEffects Network, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
vyvyan.validate
a library of validation methods
for various types of data
"""
# imports
import base64
import struct
import types
import re
# All of the models and sqlalchemy are brought in
# to simplify referencing
from vyvyan.vyvyan_models import *
class ValidationError(Exception):
pass
# Validates domain input data
def v_domain(domain):
"""
[description]
validates domain input data (probably not necessary)
[parameter info]
required:
cfg: the config object. useful everywhere
domain: the domain we're trying to validate
[return value]
True/False based on success of validation
"""
# TODO: write some actual validation
return True
# Validates ssh2 pubkeys
def v_ssh2_pubkey(key):
"""
[description]
validates ssh2 public keys
[parameter info]
required:
key: the ssh2 public key we're trying to validate
[return value]
True/Exception based on success of validation
"""
try:
DSA_KEY_ID="ssh-dss"
RSA_KEY_ID="ssh-rsa"
if re.match(DSA_KEY_ID+'|'+RSA_KEY_ID, key):
k = key.split(' ')
else:
raise ValidationError("validate/v_ssh2_pubkey: invalid ssh2 key: %s" % key)
if k:
try:
data = base64.decodestring(k[1])
except IndexError:
raise ValidationError("validate/v_ssh2_pubkey: invalid ssh2 key: %s" % key)
int_len = 4
str_len = struct.unpack('>I', data[:int_len])[0] # this should return 7
if DSA_KEY_ID in key:
if data[int_len:int_len+str_len] == DSA_KEY_ID:
return True
else:
raise ValidationError("validate/v_ssh2_pubkey: invalid ssh2 key: %s" % key)
else:
if data[int_len:int_len+str_len] == RSA_KEY_ID:
return True
else:
raise ValidationError("validate/v_ssh2_pubkey: invalid ssh2 key: %s" % key)
else:
raise ValidationError("validate/v_ssh2_pubkey: invalid ssh2 key: %s" % key)
except Exception, e:
raise ValidationError("validate/v_ssh2_pubkey: invalid ssh2 key: %s" % key)
# Validates UNIX uids
def v_uid(cfg, uid):
"""
[description]
validates UNIX UIDs
[parameter info]
required:
cfg: the config object. useful everywhere
uid: the UID we're trying to validate
[return value]
True/False based on success of validation
"""
if type(uid) == types.IntType:
if uid >= cfg.uid_start and uid <= cfg.uid_end:
return True
else:
cfg.log.debug("UID is outside the allowed range (%s to %s)" % (cfg.uid_start, cfg.uid_end))
raise ValidationError("UID is outside the allowed range (%s to %s)" % (cfg.uid_start, cfg.uid_end))
elif uid == None:
cfg.log.debug("UID is empty!")
raise ValidationError("UID is empty!")
else:
cfg.log.debug("UID must be an integer!")
raise ValidationError("UID must be an integer!")
# Looks for a UID in the db and returns true if present, false if absent
def v_uid_in_db(cfg, uid, domain=None):
"""
[description]
looks for a UID in the db
[parameter info]
required:
cfg: the config object. useful everywhere
uid: the UID we're trying to find
optional:
domain: the domain to look in
[return value]
True/False based on success of validation
"""
# make sure we're within operational parameters
v_uid(cfg, uid)
uidlist = []
# go get our list
if domain:
u = cfg.dbsess.query(Users).\
filter(Users.domain==domain).all()
else:
u = cfg.dbsess.query(Users).all()
# put em in a list
for userentry in u:
uidlist.append(userentry.uid)
# finally, check 'em
uid_set = set(uidlist)
if uid in uid_set:
return True
else:
return False
# Looks for a GID in the db and returns true if present, false if absent
def v_gid_in_db(cfg, gid, domain=None):
"""
[description]
looks for a GID in the db
[parameter info]
required:
cfg: the config object. useful everywhere
gid: the GID we're looking for
optional:
domain: the domain to look in
[return value]
True/False based on success of validation
"""
# make sure we're within operational parameters
v_gid(cfg, gid)
gidlist = []
# go get our list
if domain:
g = cfg.dbsess.query(Groups).\
filter(Groups.domain==domain).all()
else:
g = cfg.dbsess.query(Groups).all()
# put em in a list
for groupentry in g:
gidlist.append(groupentry.gid)
# finally, check 'em
gid_set = set(gidlist)
if gid in gid_set:
return True
else:
return False
# Validates UNIX gids
def v_gid(cfg, gid):
"""
[description]
validates UNIX GIDs
[parameter info]
required:
cfg: the config object. useful everywhere
gid: the GID we're trying to validate
[return value]
True/False based on success of validation
"""
if type(gid) == types.IntType:
if gid >= cfg.gid_start and gid <= cfg.gid_end:
return True
else:
cfg.log.debug("GID is outside the allowed range (%s to %s)" % (cfg.gid_start, cfg.gid_end))
raise ValidationError("GID is outside the allowed range (%s to %s)" % (cfg.gid_start, cfg.gid_end))
elif gid == None:
cfg.log.debug("GID is empty")
raise ValidationError("GID is empty")
else:
cfg.log.debug("GID must be an integer! GID: %s" % gid)
raise ValidationError("GID must be an integer! GID: %s" % gid)
# VERY basic validation of user- group- or host-name input
def v_name(name):
"""
[description]
VERY basic validation of user- group- or host-name input
"""
if not name:
raise ValidationError('v_name() called without a name!')
if re.search("[^A-Za-z0-9_\-.]", name):
raise ValidationError('name contains illegal characters! allowed characters are: A-Z a-z 0-9 _ - .')
if len(name) < 1:
raise ValidationError('too short! name must have more than 1 character')
return True
| 1.921875
| 2
|
beneficiaries/beneficiaries/doctype/beneficiary_request/beneficiary_request.py
|
baidalala/beneficiaries
| 0
|
12783159
|
# -*- coding: utf-8 -*-
# Copyright (c) 2021, Baida and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
import json
from frappe.model.naming import set_name_by_naming_series
from frappe import _, msgprint, throw
import frappe.defaults
from frappe.utils import flt, cint, cstr, today
from frappe.desk.reportview import build_match_conditions, get_filters_cond
from erpnext.utilities.transaction_base import TransactionBase
from erpnext.accounts.party import validate_party_accounts, get_dashboard_info, get_timeline_data # keep this
from frappe.contacts.address_and_contact import load_address_and_contact, delete_contact_and_address
from frappe.model.rename_doc import update_linked_doctypes
from frappe.model.mapper import get_mapped_doc
from frappe.model.document import Document
from datetime import datetime,date
from dateutil.relativedelta import relativedelta
from frappe.permissions import add_user_permission, remove_user_permission, \
set_user_permission_if_allowed, has_permission
from frappe.utils.password import update_password as _update_password
from frappe.utils import random_string
from frappe.utils.data import add_months
from frappe.utils import cint, cstr, formatdate, flt, getdate, nowdate, get_link_to_form
from erpnext.setup.doctype.item_group.item_group import get_item_group_defaults
from erpnext.stock import get_warehouse_account_map
from erpnext.assets.doctype.asset_category.asset_category import get_asset_category_account
from erpnext.accounts.utils import get_fiscal_year
class BeneficiaryRequest(Document):
def validate(self):
self.is_deserve()
# self.validate_values()
self.created_by = frappe.session.user
self.date_of_registration=date.today()
def validate_values(self):
if (self.number_of_needed_members_in_family > self.number_of_family) :
frappe.throw('عدد الافراد المعالين اكبر من عدد افراد الاسرة')
if (self.number_of_wives > self.number_of_family) :
frappe.throw('عدد الزوجات اكبر من عدد افراد الاسرة')
if (self.the_number_of_household_workers > self.number_of_family) :
frappe.throw('عدد الافراد العاملين في المنزل اكبر من عدد افراد الاسرة')
if ( self.the_number_of_professional_workers > self.number_of_family) :
frappe.throw('عدد الافراد العاملين اكبر من عدد افراد الاسرة')
if self.date_of_expired < self.date_of_issue:
frappe.throw('تاريخ انتهاء الهوية أقل من تاريخ اصدارها')
def get_max_number_of_members(self):
return frappe.db.sql("""select max(number_of_members) as members from `tabThe Base`""", as_dict=True)
def get_base(self):
max_member=self.get_max_number_of_members()[0].members
if self.number_of_needed_members_in_family:
if self.number_of_needed_members_in_family > int (max_member):
members=max_member
else:
members=self.number_of_needed_members_in_family
"""
Returns list of active beneficiary based on selected criteria
and for which type exists
"""
return frappe.db.sql("""select live_base as live_base,rent_base as rent_base,rent_in_year as rent_in_year,rent_in_five_year as rent_in_five_year
from `tabThe Base` where number_of_members= %s""",members, as_dict=True)
else:
return
def is_deserve(self):
check_is_deserve = self.get_base()
if not check_is_deserve:
return
fee_sum=0
for m in self.get("fees"):
m.fee_in_year=flt(m.fee_in_month * 12)
fee_sum +=m.fee_in_year
self.fee_total=fee_sum
obl_sum=0
for m in self.get("obligation"):
obl_sum +=m.amount
self.obligations_total=obl_sum
result = self.fee_total - self.obligations_total
if (self.territory=="Unaizah" or self.territory=="عنيزة") and (self.nationality=="Saudi" or self.nationality=="Syrian" or self.nationality=="سوري" or
self.nationality=="سعودي")and result <= check_is_deserve[0].live_base:
self.deserve_according_to_base=True
self.live_base=check_is_deserve[0].live_base
if self.home_type== "Rent":
self.rent_base=check_is_deserve[0].rent_base
else:
self.rent_base=0
self.rent_in_year=check_is_deserve[0].rent_in_year
self.rent_in_five_year=check_is_deserve[0].rent_in_five_year
elif (self.territory=="Unaizah" or self.territory=="عنيزة") and (self.nationality=="Saudi" or self.nationality=="Syrian" or self.nationality=="سوري" or
self.nationality=="سعودي" ) and result >= check_is_deserve[0].live_base and result <= check_is_deserve[0].rent_base:
self.deserve_according_to_base=True
self.live_base=0
if self.home_type== "Rent":
self.rent_base=check_is_deserve[0].rent_base
else:
self.rent_base=0
self.rent_in_year=check_is_deserve[0].rent_in_year
self.rent_in_five_year=check_is_deserve[0].rent_in_five_year
else:
self.deserve_according_to_base=False
self.live_base=0
self.rent_base=0
self.rent_in_five_year=0
self.rent_in_year=0
def add_beneficiary(self):
if self.employee==1:
beneficiary = frappe.new_doc('Beneficiary')
beneficiary.beneficiary_name = self.beneficiary_name
beneficiary.beneficiary_request = self.name
beneficiary.beneficiary_account=frappe.db.get_single_value('Beneficiary Settings', 'beneficiary_account')
beneficiary.marital_status = self.marital_status
beneficiary.nationality = self.nationality
beneficiary.territory=self.territory
beneficiary.address=self.address
beneficiary.gender=self.gender
beneficiary.phone=self.phone
beneficiary.mobile=self.mobile
beneficiary.email=self.email
beneficiary.beneficiary_state=self.beneficiary_state
beneficiary.owner=self.email
pwd=<PASSWORD>(10)
beneficiary.default_password=<PASSWORD>
beneficiary.id_type=self.id_type
beneficiary.the_number=self.the_number
beneficiary.date_of_issue=self.date_of_issue
beneficiary.date_of_expired=self.date_of_expired
for f in self.get("fees"):
beneficiary.append('fees', dict(fee_type=f.fee_type, fee_in_year=f.fee_in_year,fee_in_month=f.fee_in_month))
beneficiary.fee_total=self.fee_total
for ob in self.get("obligation"):
beneficiary.append('beneficiary_obligation', dict(beneficiary_obligation=ob.beneficiary_obligation,
obligation_to=ob.obligation_to,amount=ob.amount,number_of_pays=ob.number_of_pays,way_of_pay=ob.way_of_pay,reason_of_obligation=ob.reason_of_obligation,attach=ob.attach))
beneficiary.obligations_total=self.obligations_total
beneficiary.home_type=self.home_type
beneficiary.number_of_rooms=self.number_of_rooms
beneficiary.home_attach=self.home_type_attachment
beneficiary.home_state=self.state_of_home
beneficiary.number_of_family=self.number_of_family
beneficiary.number_of_wives=self.number_of_wives
beneficiary.number_of_needed_members_in_family=self.number_of_needed_members_in_family
beneficiary.the_number_of_professional_workers=self.the_number_of_professional_workers
beneficiary.the_number_of_household_workers=self.the_number_of_household_workers
beneficiary.number_of_unemployed_members=self.number_of_unemployed_members
beneficiary.beneficiary_notes=self.beneficiary_notes
beneficiary.deserve_according_to_base=self.deserve_according_to_base
beneficiary.live_base=self.live_base
beneficiary.rent_base=self.rent_base
beneficiary.rent_in_year=self.rent_in_year
beneficiary.rent_in_five_year=self.rent_in_five_year
for f in self.get("family_own"):
beneficiary.append('family_own', dict(own=f.own, note=f.note))
if not frappe.db.exists("Beneficiary", beneficiary.name):
beneficiary.insert()
# frappe.msgprint('Beneficiary Inserted Done :)')
# create contact from beneficiary
contact = frappe.new_doc('Contact')
contact.first_name = self.beneficiary_name
contact.email_id = self.email
contact.phone = self.phone
contact.mobile_no = self.mobile
contact.is_primary_contact = 1
contact.append('links', dict(link_doctype='Beneficiary', link_name=beneficiary.name))
if self.email:
contact.append('email_ids', dict(email_id=self.email, is_primary=1))
if self.phone:
contact.append('phone_nos', dict(phone=self.phone, is_primary_mobile_no=1))
contact.flags.ignore_permissions = self.flags.ignore_permissions
contact.autoname()
if not frappe.db.exists("Beneficiary", contact.name):
contact.insert()
# frappe.msgprint('Beneficiary contact Inserted Done :)')
# self.has_contact=1
# if self.has_contact==0:
# frappe.throw("Beneficiary doesn't add to contacts list",raise_exception)
# if self.has_contact==1:
user = frappe.get_doc({
"doctype": "User",
"first_name": self.beneficiary_name,
"email": self.email,
"language":"ar",
"user_type": "Website User",
"send_welcome_email": 1,
"role_profile_name":"Beneficiary"
}).insert(ignore_permissions = True)
frappe.get_doc("User", self.email).add_roles("Beneficiary")
_update_password(user=self.email, pwd=<PASSWORD>, logout_all_sessions=0)
# user.new_password="<PASSWORD>"
# self.is_user=1
# if self.is_user==0:
# frappe.throw("Beneficiary doesn't add to Users list",raise_exception)
# if self.is_user==1 and self.has_contact==1:
userpermission = frappe.get_doc({
"doctype": "User Permission",
"user": user.email,
"for_value": beneficiary.name,
"allow": "Beneficiary",
"is_default":1,
"apply_to_all_doctypes":0,
"applicable_for":"Beneficiary"
}).insert()
# if frappe.db.exists("Beneficiary", beneficiary.name) and frappe.db.exists("Contact", contact.name) and frappe.db.exists("User", user.email) and frappe.db.exists("User Permission", userpermission.user):
self.inserted=True
# else:
# self.inserted=False
# self.has_user_permission=1
# if self.has_user_permission==0:
# frappe.throw("Beneficiary doesn't add to User Permission list",raise_exception)
@frappe.whitelist()
def set_multiple_request(names):
names = json.loads(names)
# frappe.msgprint(names)
for name in names:
req = frappe.get_doc("Beneficiary Request", name)
if not req.inserted:
add_beneficiary(req)
req.save()
else:
frappe.msgprint(req.beneficiary_name + "Already Beneficiary")
@frappe.whitelist()
def add_beneficiary(self):
if self.employee==1:
beneficiary = frappe.new_doc('Beneficiary')
beneficiary.beneficiary_name = self.beneficiary_name
beneficiary.beneficiary_request = self.name
beneficiary.beneficiary_account=frappe.db.get_single_value('Beneficiary Settings', 'beneficiary_account')
beneficiary.marital_status = self.marital_status
beneficiary.nationality = self.nationality
beneficiary.territory=self.territory
beneficiary.address=self.address
beneficiary.gender=self.gender
beneficiary.phone=self.phone
beneficiary.mobile=self.mobile
beneficiary.email=self.email
beneficiary.beneficiary_state=self.beneficiary_state
beneficiary.owner=self.email
pwd=random_string(10)
beneficiary.default_password=<PASSWORD>
beneficiary.id_type=self.id_type
beneficiary.the_number=self.the_number
beneficiary.date_of_issue=self.date_of_issue
beneficiary.date_of_expired=self.date_of_expired
for f in self.get("fees"):
beneficiary.append('fees', dict(fee_type=f.fee_type, fee_in_year=f.fee_in_year,fee_in_month=f.fee_in_month))
beneficiary.fee_total=self.fee_total
for ob in self.get("obligation"):
beneficiary.append('beneficiary_obligation', dict(beneficiary_obligation=ob.beneficiary_obligation,
obligation_to=ob.obligation_to,amount=ob.amount,number_of_pays=ob.number_of_pays,way_of_pay=ob.way_of_pay,reason_of_obligation=ob.reason_of_obligation,attach=ob.attach))
beneficiary.obligations_total=self.obligations_total
beneficiary.home_type=self.home_type
beneficiary.number_of_rooms=self.number_of_rooms
beneficiary.home_attach=self.home_type_attachment
beneficiary.home_state=self.state_of_home
beneficiary.number_of_family=self.number_of_family
beneficiary.number_of_wives=self.number_of_wives
beneficiary.number_of_needed_members_in_family=self.number_of_needed_members_in_family
beneficiary.the_number_of_professional_workers=self.the_number_of_professional_workers
beneficiary.the_number_of_household_workers=self.the_number_of_household_workers
beneficiary.number_of_unemployed_members=self.number_of_unemployed_members
beneficiary.beneficiary_notes=self.beneficiary_notes
beneficiary.deserve_according_to_base=self.deserve_according_to_base
beneficiary.live_base=self.live_base
beneficiary.rent_base=self.rent_base
beneficiary.rent_in_year=self.rent_in_year
beneficiary.rent_in_five_year=self.rent_in_five_year
for f in self.get("family_own"):
beneficiary.append('family_own', dict(own=f.own, note=f.note))
if not frappe.db.exists("Beneficiary", beneficiary.name):
beneficiary.insert()
# frappe.msgprint('Beneficiary Inserted Done :)')
# create contact from beneficiary
contact = frappe.new_doc('Contact')
contact.first_name = self.beneficiary_name
contact.email_id = self.email
contact.phone = self.phone
contact.mobile_no = self.mobile
contact.is_primary_contact = 1
contact.append('links', dict(link_doctype='Beneficiary', link_name=beneficiary.name))
if self.email:
contact.append('email_ids', dict(email_id=self.email, is_primary=1))
if self.phone:
contact.append('phone_nos', dict(phone=self.phone, is_primary_mobile_no=1))
contact.flags.ignore_permissions = self.flags.ignore_permissions
contact.autoname()
if not frappe.db.exists("Beneficiary", contact.name):
contact.insert()
# frappe.msgprint('Beneficiary contact Inserted Done :)')
# self.has_contact=1
# if self.has_contact==0:
# frappe.throw("Beneficiary doesn't add to contacts list",raise_exception)
# if self.has_contact==1:
user = frappe.get_doc({
"doctype": "User",
"first_name": self.beneficiary_name,
"email": self.email,
"user_type": "Website User",
"send_welcome_email": 1,
"role_profile_name":"Beneficiary"
}).insert(ignore_permissions = True)
frappe.get_doc("User", self.email).add_roles("Beneficiary")
_update_password(user=self.email, pwd=<PASSWORD>, logout_all_sessions=0)
# user.new_password="<PASSWORD>"
# self.is_user=1
# if self.is_user==0:
# frappe.throw("Beneficiary doesn't add to Users list",raise_exception)
# if self.is_user==1 and self.has_contact==1:
userpermission = frappe.get_doc({
"doctype": "User Permission",
"user": user.email,
"for_value": beneficiary.name,
"allow": "Beneficiary",
"is_default":1,
"apply_to_all_doctypes":0,
"applicable_for":"Beneficiary"
}).insert()
# if frappe.db.exists("Beneficiary", beneficiary.name) and frappe.db.exists("Contact", contact.name) and frappe.db.exists("User", user.email) and frappe.db.exists("User Permission", userpermission.user):
self.inserted=True
# else:
# self.inserted=False
# self.has_user_permission=1
# if self.has_user_permission==0:
# frappe.throw("Beneficiary doesn't add to User Permission list",raise_exception)
| 1.367188
| 1
|
tests/test_utils.py
|
OSeMOSYS/otoole
| 8
|
12783160
|
<reponame>OSeMOSYS/otoole<filename>tests/test_utils.py
import pytest
from unittest.mock import MagicMock
from otoole.utils import extract_config, read_packaged_file
class TestDataPackageSchema:
@pytest.mark.xfail
def test_read_datapackage_schema_into_config(self):
schema = read_packaged_file("datapackage.json", "otoole.preprocess")
mock = MagicMock()
mock.__getitem__.return_value = 0
actual = extract_config(schema, mock)
expected = read_packaged_file("config.yaml", "otoole.preprocess")
assert actual == expected
| 2.234375
| 2
|
x_rebirth_station_calculator/station_data/modules/warhead_forge.py
|
Phipsz/XRebirthStationCalculator
| 1
|
12783161
|
<filename>x_rebirth_station_calculator/station_data/modules/warhead_forge.py
from x_rebirth_station_calculator.station_data.station_base import Module
from x_rebirth_station_calculator.station_data.station_base import Production
from x_rebirth_station_calculator.station_data.station_base import Consumption
from x_rebirth_station_calculator.station_data import wares
names = {'L044': '<NAME>',
'L049': 'Gefechtskopfschmiede'}
productions = {'al': [Production(wares.WarheadComponents, 40)],
'ar': [Production(wares.WarheadComponents, 40)]}
consumptions = {'al': [Consumption(wares.AntimatterCells, 480),
Consumption(wares.ChemicalCompounds, 40),
Consumption(wares.EnergyCells, 480),
Consumption(wares.FoodRations, 320),
Consumption(wares.Microchips, 40),
Consumption(wares.PlasmaCells, 160),
Consumption(wares.QuantumTubes, 20),
Consumption(wares.RefinedMetals, 160),
Consumption(wares.MedicalSupplies, 100, True),
Consumption(wares.Spacefuel, 80, True)],
'ar': [Consumption(wares.AntimatterCells, 440),
Consumption(wares.BioElectricNeuronGel, 40),
Consumption(wares.BoFu, 160),
Consumption(wares.ChemicalCompounds, 40),
Consumption(wares.EMSpectrometer, 20),
Consumption(wares.EnergyCells, 760),
Consumption(wares.RefinedMetals, 220),
Consumption(wares.MedicalSupplies, 100, True),
Consumption(wares.Spacefuel, 100, True)]}
WarheadForge = Module(names, productions, consumptions)
| 1.765625
| 2
|
bot.py
|
SpencerDotGray/TronTracker
| 0
|
12783162
|
from discord.ext import commands
import discord
from datetime import datetime, time, timedelta
import asyncio
import requests
import json
import os
client = discord.Client()
jontron_url = 'https://www.youtube.com/user/JonTronShow/videos'
try:
with open('./auth_tokens.json', 'r') as filein:
token = json.load(filein)['token']
except FileNotFoundError:
token = os.environ.get('token')
bot = commands.Bot(command_prefix="&")
current_title = 'Simplifying Corporate Logos - JonTron'
WHEN = time(16, 00, 00) # 12:00 PM
channel_id = 290915716255711232 # Put your channel id here
date_mapper = {
'Jan': 'January',
'Feb': 'February',
'Mar': 'March',
'Apr': 'April',
'May': 'May',
'Jun': 'June',
'Jul': 'July',
'Aug': 'August',
'Sep': 'September',
'Oct': 'October',
'Nov': 'November',
'Dec': 'December'
}
def get_jontron_video_title(content):
try:
focused_content = content[ content.index('gridVideoRenderer'): ]
focused_content = focused_content[ focused_content.index('"text":') + len('"text":'): ]
left = focused_content.index('"')+1
right = focused_content[ focused_content.index('"')+1: ].index('"')+1
title_name = focused_content[ left:right ]
return title_name
except ValueError:
print('ERROR: could not get jontron video title\nSubstring did not work')
def get_jontron_video_image_url(content):
try:
focused_content = content[ content.index('gridVideoRenderer'): ]
focused_content = focused_content[ focused_content.index('"url":') + len('"url":'): ]
left = focused_content.index('"')+1
right = focused_content[ focused_content.index('"')+1: ].index('"')+1
picture_url = focused_content [ left:right ]
return picture_url
except ValueError:
print('ERROR: could not get jontron video image\nSubstring did not work')
def get_jontron_video_date(content):
try:
focused_content = content[ content.index('gridVideoRenderer'): ]
focused_content = focused_content[ focused_content.index('"text":') + len('"text":'): ]
left = focused_content.index('"')+1
right = focused_content[ focused_content.index('"')+1: ].index('"')+1
title_name = focused_content[ left:right ]
focused_content = focused_content[ focused_content.index('"url":') + len('"url":'): ]
left = focused_content.index('"')+1
right = focused_content[ focused_content.index('"')+1: ].index('"')+1
watch_url = f'https://www.youtube.com{focused_content [ left:right ]}'
results = requests.get(watch_url)
content = results.text
focused_content = content[ content.index('"dateText":{"simpleText":') + len('"dateText":{"simpleText":'): ]
left = focused_content.index('"')+1
right = focused_content[ focused_content.index('"')+1: ].index('"')+1
video_date = focused_content[ left:right ]
month = date_mapper[video_date.split(' ')[0]]
date = video_date.replace(video_date.split(' ')[0], month)
date = datetime.strptime(date,"%B %d, %Y")
return date
except ValueError:
print('ERROR: could not get jontron video date\nSubstring did not work')
def get_jontron_watch_url(content):
try:
focused_content = content[ content.index('gridVideoRenderer'): ]
focused_content = focused_content[ focused_content.index('"text":') + len('"text":'): ]
left = focused_content.index('"')+1
right = focused_content[ focused_content.index('"')+1: ].index('"')+1
title_name = focused_content[ left:right ]
focused_content = focused_content[ focused_content.index('"url":') + len('"url":'): ]
left = focused_content.index('"')+1
right = focused_content[ focused_content.index('"')+1: ].index('"')+1
watch_url = f'https://www.youtube.com{focused_content [ left:right ]}'
return watch_url
except ValueError:
print('ERROR: could not get jontron video url\nSubstring did not work')
def get_jontron():
content = requests.get(jontron_url).text
return {
"title": get_jontron_video_title(content),
"image": get_jontron_video_image_url(content),
"date": get_jontron_video_date(content),
"url": get_jontron_watch_url(content)
}
async def called_once_a_day(): # Fired every day
await bot.wait_until_ready() # Make sure your guild cache is ready so the channel can be found via get_channel
channel = bot.get_channel(channel_id) # Note: It's more efficient to do bot.get_guild(guild_id).get_channel(channel_id) as there's less looping involved, but just get_channel still works fine
jontron = get_jontron()
video_em = discord.Embed()
video_em.set_image(url=jontron['image'])
await channel.send(f'Good Afternoon!\nIt\'s been {(datetime.now() - jontron["date"]).days} days since JonTron uploaded "{jontron["title"]}"', embed=video_em)
async def upload_check_background_task():
global current_title
while True:
jontron = get_jontron()
if (current_title != jontron['title']):
await bot.wait_until_ready() # Make sure your guild cache is ready so the channel can be found via get_channel
channel = bot.get_channel(channel_id)
video_em = discord.Embed()
video_em.set_image(url=jontron['image'])
await channel.send(f'JONTRON HAS UPLOADED\nTHIS IS NOT A DRILL!!!\n:rotating_light::rotating_light::rotating_light::rotating_light::rotating_light::rotating_light::rotating_light::rotating_light::rotating_light::rotating_light::rotating_light::rotating_light:\n{jontron["url"]}', embed=video_em)
current_title = jontron['title']
else:
print('No JonTron Upload :(')
await asyncio.sleep(3600)
async def morning_upload_background_task():
now = datetime.utcnow()
if now.time() > WHEN: # Make sure loop doesn't start after {WHEN} as then it will send immediately the first time as negative seconds will make the sleep yield instantly
tomorrow = datetime.combine(now.date() + timedelta(days=1), time(0))
seconds = (tomorrow - now).total_seconds() # Seconds until tomorrow (midnight)
await asyncio.sleep(seconds) # Sleep until tomorrow and then the loop will start
while True:
now = datetime.utcnow() # You can do now() or a specific timezone if that matters, but I'll leave it with utcnow
target_time = datetime.combine(now.date(), WHEN) # 6:00 PM today (In UTC)
seconds_until_target = (target_time - now).total_seconds()
await asyncio.sleep(seconds_until_target) # Sleep until we hit the target time
await called_once_a_day() # Call the helper function that sends the message
tomorrow = datetime.combine(now.date() + timedelta(days=1), time(0))
seconds = (tomorrow - now).total_seconds() # Seconds until tomorrow (midnight)
await asyncio.sleep(seconds) # Sleep until tomorrow and then the loop will start a new iteration
@bot.command()
async def JontronPlz(ctx):
jontron = get_jontron()
video_em = discord.Embed()
video_em.set_image(url=jontron['image'])
await ctx.channel.send(f'It\'s been {(datetime.now() - jontron["date"]).days} days since JonTron uploaded "{jontron["title"]}"', embed=video_em)
if __name__ == "__main__":
print(f'Running message at: {WHEN.hour}:{WHEN.minute}:{WHEN.second}')
bot.loop.create_task(morning_upload_background_task())
bot.loop.create_task(upload_check_background_task())
bot.run(token)
| 2.765625
| 3
|
programming-laboratory-I/ltjc/porta.py
|
MisaelAugusto/computer-science
| 0
|
12783163
|
# coding: utf-8
# Aluno: <NAME>
# Matrícula: 117110525
# Problema: Porta Eletrônica
registrados = []
while True:
entrada = raw_input().split()
if entrada[0] == "S":
break
else:
categoria = entrada[1]
if entrada[0] == "R":
registrados.append(categoria[0])
else:
n = 0
for r in registrados:
if r == categoria[0]:
n += 1
print n
| 3.578125
| 4
|
pymrt/visualizer/casas/_gmphd_3d.py
|
TinghuiWang/pymrt
| 1
|
12783164
|
import sys
import numpy as np
from mayavi import mlab
from mayavi.scripts import mayavi2
from traits.api import HasTraits, Button, Instance
from traitsui.api import View, Item
from ._plot3d import plot3d_embeddings
def plot3d_gmphd(dataset, embeddings, grid, gm_s=None, gm_list=None,
observation=None, title=None, contours=4,
log_plot=True):
"""3D plot of CASAS sensor embedding with GM-PHD sampled by grid.
Multi-target PHD represented either by scalar ``gm_s`` or Gaussian Mixture
``gm_list`` is plotted as 3D contour graph with mayavi.
Current observations and sensor embedding are plotted as spheres as well.
Args:
dataset (:obj:`~pymrt.casas.CASASDataset`): CASAS smart home dataset.
embeddings (:obj:`numpy.ndarray`): 3D sensor vector embedding of shape
(num_sensors, 3) where num_sensors corresponds to the length of
``dataset.sensor_list``.
grid (:obj:`numpy.ndarray`): 3D mesh generated by :func:`numpy.mgrid` or
:func:`numpy.meshgrid`.
gm_s (:obj:`numpy.ndarray`): Multi-target PHD scalar sampled at each
point defined by the 3D mesh grid ``grid``.
gm_list (:obj:`list`): List of
:obj:`~pymrt.tracking.utils.GaussianComponent` representing the
multi-target PHD at the moment. If ``gm_s`` is None, this list is
used to generate the PHD scalar for plotting.
observation (:obj:`list`): List of observations to be plotted. Each
observation is a :obj:`numpy.ndarray` of shape (n, 1). It has to
be the embedding vector of one of the sensor in the dataset.
title (:obj:`string`): Plot title.
contours (:obj:`int`): Number of contour surfaces to draw.
log_plot (:obj:`bool`): Plot ``gm_s`` in log scale.
"""
if gm_s is None:
if gm_list is None:
raise ValueError("Must provide 3D sampled GM scalar gm_s or a "
"Gaussian Mixture list")
else:
print('Sampling PHD in 3D space')
from ...tracking.utils import gm_calculate
gm_s = gm_calculate(gm_list=gm_list, grid=grid)
if title is None:
title = 'PHD'
print('Start Plotting with Mayavi')
figure = mlab.figure(dataset.get_name() + ' ' + title)
if log_plot:
contour_s = np.log(gm_s + np.finfo(np.float).tiny)
else:
contour_s = gm_s
# Plot Contour Surf first
contour = mlab.contour3d(
grid[0],
grid[1],
grid[2],
contour_s,
contours=contours,
transparent=True,
opacity=0.5
)
mlab.colorbar(contour, title='PHD', orientation='vertical')
_, points = plot3d_embeddings(dataset, embeddings, figure=figure)
if observation is not None:
obs_array = np.block(observation).T
obs_points = mlab.points3d(
obs_array[:, 0], obs_array[:, 1], obs_array[:, 2],
scale_factor=0.03, color=(0, 0, 1)
)
mlab.show()
def plot3d_gmphd_track(dataset, embeddings, grid, gm_s_list=None,
gm_list_list=None, observation_list=None,
title=None, contours=4, log_plot=True):
""" 3D plot of CASAS sensor embedding with GM-PHD sampled by grid.
Multi-target PHD represented either by scalar ``gm_s`` or Gaussian Mixture
``gm_list`` is plotted as 3D contour graph with mayavi.
Current observations and sensor embedding are plotted as spheres as well.
It wraps the whole sequence in a mayavi application, user can go back and
forth in time and visually see how the PHD changes in time.
Args:
dataset (:obj:`~pymrt.casas.CASASDataset`): CASAS smart home dataset.
embeddings (:obj:`numpy.ndarray`): 3D sensor vector embedding of shape
(num_sensors, 3) where num_sensors corresponds to the length of
``dataset.sensor_list``.
grid (:obj:`numpy.ndarray`): 3D mesh generated by :func:`numpy.mgrid` or
:func:`numpy.meshgrid`.
gm_s_list (:obj:`list`): List of PHD scalars at each time step.
gm_list_list (:obj:`list`): List of Gaussian Mixtures at each time step.
If ``gm_s_list`` is None, it is used along with ``grid`` to generate
the PHD scalar at each time step.
observation_list (:obj:`list`): List of observations at each time step.
title (:obj:`string`): Plot title.
contours (:obj:`int`): Number of contour surfaces to draw.
log_plot (:obj:`bool`): Plot ``gm_s`` in log scale.
"""
if gm_s_list is None:
if gm_list_list is None:
raise ValueError("Must provide 3D sampled GM scalar gm_s or a "
"Gaussian Mixture list")
else:
print('Sampling PHD in 3D space')
from ...tracking.utils import gm_calculate
gm_s_list = []
i = 0
for gm_list in gm_list_list:
sys.stdout.write('calculate gm_scalar for step %d' % i)
gm_s_list.append(gm_calculate(
gm_list=gm_list, grid=grid
))
i += 1
if title is None:
title = 'PHD'
print('Start Plotting with Mayavi')
class Controller(HasTraits):
next_frame = Button('Next Frame')
previous_frame = Button('Previous Frame')
view = View(
Item(name='next_frame'),
Item(name='previous_frame')
)
current_frame = 0
play_state = False
def _next_frame_changed(self, value):
"""Goto next frame"""
if self.current_frame + 1 < len(gm_s_list):
self.current_frame += 1
self.update_frame()
def _previous_frame_changed(self, value):
"""Goto previous frame"""
if self.current_frame - 1 >= 0:
self.current_frame -= 1
self.update_frame()
def update_frame(self):
print('Frame %d' % self.current_frame)
if log_plot:
contour_s = np.log(
gm_s_list[self.current_frame] + np.finfo(np.float).tiny
)
else:
contour_s = gm_s_list[self.current_frame]
self.phd_contour.mlab_source.set(
scalars=contour_s
)
self.color_vector[:] = 0.
if observation_list is not None:
obs_array = observation_list[self.current_frame]
obs_index = [
np.where(
np.all(embeddings == sensor_vec.flatten(), axis=1)
)[0][0]
for sensor_vec in obs_array
]
self.color_vector[obs_index] = 1.
self.sensor_points.mlab_source.dataset.point_data.scalars = \
self.color_vector
mlab.draw()
@mayavi2.standalone
def main_view():
"""Example showing how to view a 3D numpy array in mayavi2.
"""
figure = mlab.figure(dataset.get_name() + ' ' + title)
if log_plot:
contour_s = np.log(gm_s_list[0] + np.finfo(np.float).tiny)
else:
contour_s = gm_s_list[0]
# Plot Contour Surf first
contour = mlab.contour3d(
grid[0],
grid[1],
grid[2],
contour_s,
contours=contours,
transparent=True,
opacity=0.5
)
mlab.colorbar(contour, title='PHD', orientation='vertical')
_, points = plot3d_embeddings(dataset, embeddings, figure=figure)
points.glyph.scale_mode = 'scale_by_vector'
points.mlab_source.dataset.point_data.vectors = np.tile(
np.ones(embeddings.shape[0]), (3, 1))
color_vector = np.zeros(embeddings.shape[0])
points.mlab_source.dataset.point_data.scalars = color_vector
if observation_list is not None:
obs_array = observation_list[0]
obs_index = [
np.where(
np.all(embeddings == sensor_vec.flatten(), axis=1)
)[0][0]
for sensor_vec in obs_array
]
color_vector[obs_index] = 1.
computation = Controller(
sensor_points=points,
phd_contour=contour,
color_vector=color_vector,
figure=figure
)
computation.edit_traits()
main_view()
| 2.421875
| 2
|
yatube/apps/posts/admin.py
|
azharkih/PetBlog
| 1
|
12783165
|
<filename>yatube/apps/posts/admin.py<gh_stars>1-10
from django.contrib import admin
from .models import Comment, Follow, Group, Post
class PostAdmin(admin.ModelAdmin):
""" Класс PostAdmin используется для конфигурации отображения модели Post
в админ-панели.
Атрибуты класса
--------
list_display : Tuple[str]
Список отображаемых полей
search_fields : Tuple[str]
Список полей по которым осуществляется поиск
list_filter : Tuple[str]
Список полей по которым может применен фильтр
empty_value_display : str
Значение отображаемое вместо пустой строки.
"""
list_display = ('pk', 'text', 'pub_date', 'author', 'group')
search_fields = ('text',)
list_filter = ('pub_date',)
empty_value_display = '-пусто-'
class GroupAdmin(admin.ModelAdmin):
""" Класс GroupAdmin используется для конфигурации отображения модели Group
в админ-панели.
Атрибуты класса
--------
list_display : Tuple[str]
Список отображаемых полей
list_filter : Tuple[str]
Список полей по которым может применен фильтр
empty_value_display : str
Значение отображаемое вместо пустой строки.
"""
list_display = ('slug', 'title', 'description')
search_fields = ('title', 'description',)
empty_value_display = '-пусто-'
class FollowAdmin(admin.ModelAdmin):
""" Класс FollowAdmin используется для конфигурации отображения модели
Follow в админ-панели.
Атрибуты класса
--------
list_display : Tuple[str]
Список отображаемых полей
search_fields : Tuple[str]
Список полей по которым осуществляется поиск
empty_value_display : str
Значение отображаемое вместо пустой строки.
"""
list_display = ('user', 'author')
list_filter = ('user', 'author',)
empty_value_display = '-пусто-'
class CommentAdmin(admin.ModelAdmin):
""" Класс CommentAdmin используется для конфигурации отображения модели
Group в админ-панели.
Атрибуты класса
--------
list_display : Tuple[str]
Список отображаемых полей
search_fields : Tuple[str]
Список полей по которым осуществляется поиск
empty_value_display : str
Значение отображаемое вместо пустой строки.
"""
list_display = ('post', 'author', 'text')
search_fields = ('author',)
empty_value_display = '-пусто-'
admin.site.register(Post, PostAdmin)
admin.site.register(Group, GroupAdmin)
admin.site.register(Follow, FollowAdmin)
admin.site.register(Comment, CommentAdmin)
| 2.40625
| 2
|
misc.py
|
soerenjalas/Laser
| 0
|
12783166
|
import numpy as np
def gauss2D(x, y, fwhmx, fwhmy, x0=0, y0=0, offset=0, order=1, int_FWHM=True):
"""
Define a (super-)Gaussian 2D beam. Identical to laser.misc.gauss2D.
Parameters
----------
x: float 2D np.array
Horizontal axis of the Gaussian
y: float 2D np.array
Vertical axis of the Gaussian
fwhmx: float
Horizontal Full Width at Half Maximum
fwhmy: float
Vertical Full Width at Half Maximum
x0: float, optional
Horizontal center position of the Gaussian
y0: float, optional
Vertical center position of the Gaussian
offset: float, optional
Amplitude offset of the Gaussian
order: int, optional
order of the super-Gaussian function.
Defined as: exp( - ( x**2 + y**2 )**order )
int_FWHM: boolean, optional
If True, the FWHM is the FWHM of the square of the Gaussian (intensity).
If False, it is the FWHM of the Gaussian directly (electric field).
"""
coeff = 1.0
if int_FWHM:
coeff = 0.5
return np.exp(-np.log(2) * coeff * ((2 * (x - x0) / fwhmx)**2 + (2 * (y - y0) / fwhmy)**2)**order) + offset
def gauss1D(x, fwhm, x0=0, offset=0, order=1, int_FWHM=True):
"""
Define a (super-)Gaussian 1D beam. Identical to laser.misc.gauss2D.
Parameters
----------
x: float 1D np.array
Axis of the Gaussian
fwhm: float
Full Width at Half Maximum
x0: float, optional
Center position of the Gaussian
offset: float, optional
Amplitude offset of the Gaussian
order: int, optional
order of the super-Gaussian function.
Defined as: exp( - ( x**2 )**order )
int_FWHM: boolean, optional
If True, the FWHM is the FWHM of the square of the Gaussian (intensity).
If False, it is the FWHM of the Gaussian directly (electric field).
"""
coeff = 1.0
if int_FWHM:
coeff = 0.5
return np.exp(-np.log(2) * coeff * ((2 * (x - x0) / fwhm)**2)**order) + offset
def cart2pol(x, y):
"""Convert cartesian to polar coordinates"""
return np.abs(x + 1j * y), np.angle(x + 1j * y)
def pol2cart(r, theta):
"""Convert polar to cartesian coodinates"""
return np.real(r * exp(1j * theta)), np.imag(r * exp(1j * theta))
def array_trim(ar):
"""Trim zeros of 2D map"""
ar_trim = ar.copy()
ar_trim = ar_trim[:, ar_trim.any(axis=0)] # trim columns
ar_trim = ar_trim[ar_trim.any(axis=1), :] # trim rows
return ar_trim
def vect(N):
"""Returns a centered array between -0.5 and 0.5"""
return np.linspace(0, N, num=N) / N - 0.5
def norm(a):
"""Normalise an array by it's maximum value"""
return a / np.max(np.abs(a))
def text_progress_bar(iteration, num_iteration):
"""Displays a progress bar with the print function"""
return print('|' * (iteration + 1) + '.' * (num_iteration - iteration - 1) + ' %.1f %%' % ((iteration + 1) / num_iteration * 100), end='\r')
| 3.265625
| 3
|
point_2_screen.py
|
kenkyusha/eyeGazeToScreen
| 8
|
12783167
|
#!/usr/bin/env python
from typing import Optional
import datetime
import logging
import pathlib
import cv2
import numpy as np
import yacs.config
from gaze_estimation.gaze_estimator.common import (Face, FacePartsName,
Visualizer)
from gaze_estimation.utils import load_config
from gaze_estimation import GazeEstimationMethod, GazeEstimator
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
import pdb
import pickle
import time
import imutils
import sys
import os
import draw_utils
from helper_fn import point_to_screen, dump_dict, calc_metrics, round_tup
from screen_conf import *
# FLAGS PARAMETERS
#------------------------------------------------
fpath = 'recs/'
rgb_fp = 'det/'
# AVERAGING OVER GAZE VALUES TOGGLE
#------------------------------------------------
GAZE_AVG_FLAG = 0
num_frames = 3 # num of frames to average over
#------------------------------------------------
# AVERAGING OVER LANDMARKS TOGGLE
AVG_LANDMARKS = 0
num_avg_frames = 3 # num of frames to average over
# GLOBAL VARIABLES
#------------------------------------------------
img = np.zeros((adj_H, W_px,3))
mid_point = (0,0)
rng_pos = (np.random.randint(0, W_px),np.random.randint(0, H_px))
focus = 0
avg_pos = []
#------------------------------------------------
DEBUG = 0 #'EYE' # 'EYE' DEBUG INDIVIDUAL VALUES
if DEBUG:
try:
print('Creating dirs')
os.mkdir(fpath)
os.mkdirs(fpath+rgb_fp)
except:
print('dirs already exist')
#------------------------------------------------
class Demo:
QUIT_KEYS = {27, ord('q')}
def __init__(self, config: yacs.config.CfgNode):
self.config = config
self.gaze_estimator = GazeEstimator(config, AVG_LANDMARKS=AVG_LANDMARKS, num_frames=num_avg_frames)
self.visualizer = Visualizer(self.gaze_estimator.camera)
self.cap = self._create_capture()
self.output_dir = self._create_output_dir()
# Turn writer on and off.
if SAVE_VIDEO:
self.writer = self._create_video_writer()
else:
self.writer = 0
self.stop = False
self.show_bbox = self.config.demo.show_bbox
self.show_head_pose = self.config.demo.show_head_pose
self.show_landmarks = self.config.demo.show_landmarks
self.show_normalized_image = NORM_EYEZ # self.config.demo.show_normalized_image
self.show_template_model = self.config.demo.show_template_model
# FRAME COUNTER
self.i = 0
self.pts = []
self.cur_pos = []
self.true_pos = []
self.dist = []
self.left_eye_cent = []
self.right_eye_cent = []
self.right_eye_gaze = []
self.left_eye_gaze = []
self.face_gaze = []
self.face_cent = []
def run(self) -> None:
while True:
if DEMO:
pts = draw_utils.display_canv(CANV_MODE=CANV_MODE, cur_pos=mid_point) #cur_pos=cur_pos
self.pts.append(pts)
self.true_pos.append(pts[0])
self.cur_pos.append(pts[1])
if self.config.demo.display_on_screen:
self._wait_key()
if self.stop:
break
ok, frame = self.cap.read()
if not ok:
break
if CUST_VIDEO:
frame = imutils.resize(frame, width=self.gaze_estimator.camera.width, height=self.gaze_estimator.camera.height)
calib_time = time.time()
# FIRST WE UNDISTORT THE IMAGE!
undistorted = cv2.undistort(
frame, self.gaze_estimator.camera.camera_matrix,
self.gaze_estimator.camera.dist_coefficients)
if RUNTIME:
print('Image calibration: ', time.time()-calib_time, ' seconds.')
self.visualizer.set_image(frame.copy())
dlib_time = time.time()
faces = self.gaze_estimator.detect_faces(undistorted)
if RUNTIME:
print('DLIB faces: ', time.time() - dlib_time, ' seconds.')
for face in faces:
self.gaze_estimator.estimate_gaze(undistorted, face)
self._draw_face_bbox(face)
self._draw_head_pose(face)
self._draw_landmarks(face)
self._draw_face_template_model(face)
self._draw_gaze_vector(face)
self._display_normalized_image(face)
if self.config.demo.use_camera:
self.visualizer.image = self.visualizer.image[:, ::-1]
if self.writer:
self.writer.write(self.visualizer.image)
#self.write_eyes.write(self.visualizer.image)
if self.config.demo.display_on_screen:
self.visualizer.image = cv2.resize(self.visualizer.image, (0, 0), fy=IMG_SCALE, fx=IMG_SCALE)
cv2.imshow('frame', self.visualizer.image)
# MOVE TO TOP LEFT CORNER
cv2.moveWindow("frame", 0,0)
if DEBUG:
cv2.imwrite(fpath+rgb_fp+'rgb_'+str(self.i).zfill(5)+'.png', self.visualizer.image)
# INCREMENT COUNTER
self.i += 1
self.cap.release()
if self.writer:
self.writer.release()
def _create_capture(self) -> cv2.VideoCapture:
if self.config.demo.use_camera:
# use recording or the custom video
if CUST_VIDEO:
cap = cv2.VideoCapture(vid_file)
else:
cap = cv2.VideoCapture(0)
elif self.config.demo.video_path:
cap = cv2.VideoCapture(self.config.demo.video_path)
else:
raise ValueError
# pdb.set_trace()
cap.set(cv2.CAP_PROP_FRAME_WIDTH, self.gaze_estimator.camera.width)
cap.set(cv2.CAP_PROP_FRAME_HEIGHT, self.gaze_estimator.camera.height)
return cap
def _create_output_dir(self) -> Optional[pathlib.Path]:
if not self.config.demo.output_dir:
return
output_dir = pathlib.Path(self.config.demo.output_dir)
output_dir.mkdir(exist_ok=True, parents=True)
return output_dir
@staticmethod
def _create_timestamp() -> str:
dt = datetime.datetime.now()
return dt.strftime('%Y%m%d_%H%M%S')
def _create_video_writer(self) -> Optional[cv2.VideoWriter]:
if not self.output_dir:
return None
ext = self.config.demo.output_file_extension
if ext == 'mp4':
fourcc = cv2.VideoWriter_fourcc(*'H264')
elif ext == 'avi':
fourcc = cv2.VideoWriter_fourcc(*'PIM1')
else:
raise ValueError
output_path = self.output_dir / f'{self._create_timestamp()}.{ext}'
writer = cv2.VideoWriter(output_path.as_posix(), fourcc, FPS,
(VID_W,
VID_H))
if writer is None:
raise RuntimeError
return writer
def _wait_key(self) -> None:
key = cv2.waitKey(self.config.demo.wait_time) & 0xff
if key in self.QUIT_KEYS:
self.stop = True
elif key == ord('b'):
self.show_bbox = not self.show_bbox
elif key == ord('l'):
self.show_landmarks = not self.show_landmarks
elif key == ord('h'):
self.show_head_pose = not self.show_head_pose
elif key == ord('n'):
self.show_normalized_image = not self.show_normalized_image
elif key == ord('t'):
self.show_template_model = not self.show_template_model
def _draw_face_bbox(self, face: Face) -> None:
if not self.show_bbox:
return
self.visualizer.draw_bbox(face.bbox)
def _draw_head_pose(self, face: Face) -> None:
if not self.show_head_pose:
return
# Draw the axes of the model coordinate system
length = self.config.demo.head_pose_axis_length
self.visualizer.draw_model_axes(face, length, lw=2)
euler_angles = face.head_pose_rot.as_euler('XYZ', degrees=True)
pitch, yaw, roll = face.change_coordinate_system(euler_angles)
logger.info(f'[head] pitch: {pitch:.2f}, yaw: {yaw:.2f}, '
f'roll: {roll:.2f}, distance: {face.distance:.2f}')
self.dist.append(face.distance)
def _draw_landmarks(self, face: Face) -> None:
if not self.show_landmarks:
return
self.visualizer.draw_points(face.landmarks,
color=(0, 255, 255),
size=1)
def _draw_face_template_model(self, face: Face) -> None:
if not self.show_template_model:
return
self.visualizer.draw_3d_points(face.model3d,
color=(255, 0, 525),
size=1)
def _display_normalized_image(self, face: Face) -> None:
if not self.config.demo.display_on_screen:
return
if not self.show_normalized_image:
return
if self.config.mode == GazeEstimationMethod.MPIIGaze.name:
reye = face.reye.normalized_image
leye = face.leye.normalized_image
normalized = np.hstack([reye, leye])
elif self.config.mode == GazeEstimationMethod.MPIIFaceGaze.name:
normalized = face.normalized_image
else:
raise ValueError
if self.config.demo.use_camera:
normalized = normalized[:, ::-1]
normalized = cv2.resize(normalized, (0, 0), fy=5, fx=5)
if PRINT_VALS:
H, W = normalized.shape
left_edge = W - 50
left_edge_H = 20
cv2.putText(normalized,
str(self.i), #'cur frame = '
(left_edge, left_edge_H),
cv2.FONT_HERSHEY_SIMPLEX, 0.8, RED, 1)
save_str = 'norm_eyes_fix/img_'+str(self.i).zfill(5)+'.png'
if NORM_EYEZ:
cv2.imwrite(save_str, normalized[:,300:])
cv2.imshow('normalized', normalized)
def avg_frames(self):
if 0:
r_avg_cent = [np.array([x[0] for x in self.right_eye_cent[-num_frames:]]).mean(),
np.array([x[1] for x in self.right_eye_cent[-num_frames:]]).mean(),
np.array([x[2] for x in self.right_eye_cent[-num_frames:]]).mean()]
l_avg_cent = [np.array([x[0] for x in self.left_eye_cent[-num_frames:]]).mean(),
np.array([x[1] for x in self.left_eye_cent[-num_frames:]]).mean(),
np.array([x[2] for x in self.left_eye_cent[-num_frames:]]).mean()]
else:
r_avg_cent = self.right_eye_cent[-1]
l_avg_cent = self.left_eye_cent[-1]
r_avg_gaze = [np.array([x[0] for x in self.right_eye_gaze[-num_frames:]]).mean(),
np.array([x[1] for x in self.right_eye_gaze[-num_frames:]]).mean(),
np.array([x[2] for x in self.right_eye_gaze[-num_frames:]]).mean()]
l_avg_gaze = [np.array([x[0] for x in self.left_eye_gaze[-num_frames:]]).mean(),
np.array([x[1] for x in self.left_eye_gaze[-num_frames:]]).mean(),
np.array([x[2] for x in self.left_eye_gaze[-num_frames:]]).mean()]
right_eye_XY = point_to_screen(r_avg_cent, r_avg_gaze)
left_eye_XY = point_to_screen(l_avg_cent, l_avg_gaze)
mid_x = np.mean([right_eye_XY[0], left_eye_XY[0]])
mid_y = np.mean([right_eye_XY[1], left_eye_XY[1]])
if PRINT_VALS:
self.draw_vals(r_avg_gaze, r_avg_cent, l_avg_gaze,l_avg_cent)
return mid_x, mid_y
def draw_vals(self, r_gaze, r_cent, l_gaze, l_cent):
H, W, _ = self.visualizer.image.shape
left_edge = W - 350
left_edge_H = 40
flip_img = cv2.flip(self.visualizer.image, 1)
r_gaze = round_tup(r_gaze)
r_cent = round_tup(r_cent)
l_gaze = round_tup(l_gaze)
l_cent = round_tup(l_cent)
print('frame no ', self.i)
print('right_gaze, ', r_gaze)
print('left_gaze , ', l_gaze)
print('right_cent, ', r_cent)
print('left_cent , ', l_cent)
cv2.putText(flip_img,
'cur frame = '+ str(self.i),
(left_edge, left_edge_H-20),
cv2.FONT_HERSHEY_SIMPLEX, 0.8, RED, 1)
cv2.putText(flip_img,
'R_Gaze = '+str(r_gaze),
(left_edge, left_edge_H),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, BLACK, 1)
cv2.putText(flip_img,
'R_Cent = '+str(r_cent),
(left_edge, left_edge_H+20),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, BLACK, 1)
cv2.putText(flip_img,
'L_Gaze = '+str(l_gaze),
(left_edge, left_edge_H+40),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, BLACK, 1)
cv2.putText(flip_img,
'L_Cent = '+str(l_cent),
(left_edge, left_edge_H+60),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, BLACK, 1)
if GAZE_AVG_FLAG:
avg_str = 'ON' + ' frames = ' + str(num_frames)
else:
avg_str = 'OFF'
cv2.putText(flip_img,
'AVG = ' + str(avg_str),
(left_edge, left_edge_H+85),
cv2.FONT_HERSHEY_SIMPLEX, 0.8, RED, 1)
self.visualizer.image = cv2.flip(flip_img, 1)
def _draw_gaze_vector(self, face: Face) -> None:
length = self.config.demo.gaze_visualization_length
print('*'*50)
right_eye_XY = (0,0)
left_eye_XY = (0,0)
r_gaze_ = (0,0,0)
r_cent_ = (0,0,0)
l_gaze_ = (0,0,0)
l_cent_ = (0,0,0)
if self.config.mode == GazeEstimationMethod.MPIIGaze.name:
for key in [FacePartsName.REYE, FacePartsName.LEYE]:
eye = getattr(face, key.name.lower())
self.visualizer.draw_3d_line(
eye.center, eye.center + length * eye.gaze_vector)
if key.name.lower() == 'reye':
self.right_eye_cent.append(eye.center)
self.right_eye_gaze.append(eye.gaze_vector)
r_gaze_ = tuple(eye.gaze_vector)
r_cent_ = tuple(eye.center)
right_eye_XY = point_to_screen(eye.center, eye.gaze_vector)
else:
self.left_eye_cent.append(eye.center)
self.left_eye_gaze.append(eye.gaze_vector)
left_eye_XY = point_to_screen(eye.center, eye.gaze_vector)
l_gaze_ = tuple(eye.gaze_vector)
l_cent_ = tuple(eye.center)
print('{} gaze = '.format(key.name.lower()), eye.gaze_vector)
print('{} center = '.format(key.name.lower()), eye.center)
pitch, yaw = np.rad2deg(eye.vector_to_angle(eye.gaze_vector))
logger.info(
f'[{key.name.lower()}] pitch: {pitch:.2f}, yaw: {yaw:.2f}')
elif self.config.mode == GazeEstimationMethod.MPIIFaceGaze.name:
self.visualizer.draw_3d_line(
face.center, face.center + length * face.gaze_vector)
self.face_cent.append(face.center)
self.face_gaze.append(face.gaze_vector)
pitch, yaw = np.rad2deg(face.vector_to_angle(face.gaze_vector))
logger.info(f'[face] pitch: {pitch:.2f}, yaw: {yaw:.2f}')
else:
raise ValueError
global mid_point
if self.config.mode == GazeEstimationMethod.MPIIGaze.name:
# -----------------------------------------------
if GAZE_AVG_FLAG:
if len(self.right_eye_cent) >= num_frames:
mid_x, mid_y = self.avg_frames()
else:
if PRINT_VALS:
self.draw_vals(r_gaze_, r_cent_, l_gaze_,l_cent_)
else:
mid_x = np.mean([right_eye_XY[0], left_eye_XY[0]])
mid_y = np.mean([right_eye_XY[1], left_eye_XY[1]])
if PRINT_VALS:
self.draw_vals(r_gaze_, r_cent_, l_gaze_,l_cent_)
elif self.config.mode == GazeEstimationMethod.MPIIFaceGaze.name:
XY = point_to_screen(face.center, face.gaze_vector)
mid_x = XY[0]
mid_y = XY[1]
else:
raise ValueError
mid_point = (int(mid_x), int(mid_y))
def main():
'''
# EYE MODEL
python demo.py --config configs/demo_mpiigaze_resnet.yaml
# FACE MODEL
python demo.py --config configs/demo_mpiifacegaze_resnet_simple_14.yaml
'''
global DEMO, CANV_MODE, IMG_SCALE, NORM_EYEZ, SAVE_VIDEO
global RUNTIME, CUST_VIDEO, vid_file, PRINT_VALS
start_time = time.time()
config, custom = load_config()
# pdb.set_trace()
DEMO = custom['demo']
# Save normalized eyes
NORM_EYEZ = custom['eyes']
# FLAG TO SAVE MOVE, DEFAULT = FALSE
SAVE_VIDEO = custom['save_vid']
# PRINT RUNTIME
RUNTIME = custom['runtime'] #0
# PRINTS VALS ON THE WEBCAM IMG
PRINT_VALS = custom['printvals'] #0
# CUSTOM VIDEO:
CUST_VIDEO = custom['cust_vid']
if CUST_VIDEO != None:
vid_file = CUST_VIDEO
CANV_MODE = custom['mode']
if CANV_MODE == 'STABILITY' or CANV_MODE == 'UPDOWN' \
or CANV_MODE == 'LEFTRIGHT' or CANV_MODE == 'SEQ':
print('Current mode is {}'.format(CANV_MODE))
else:
print('Breaking since current mode is {}'.format(CANV_MODE))
print('Set correct CANV_MODE --mode: ')
print('*STABILITY* *UPDOWN* *LEFTRIGHT* *SEQ*')
sys.exit(1)
if DEMO:
IMG_SCALE = custom['imgscale']
CANV_MODE = custom['mode'] #'RNG'
demo = Demo(config)
demo.run()
n_frames = len(demo.pts)
tot_time = time.time()-start_time
print('nr of frames: ', n_frames)
print('All finished: ',tot_time , ' seconds.')
print('FPS: ', round(n_frames/tot_time,2))
# This part only gets executed in case there is input to the model
if CUST_VIDEO:
# COMPUTE ACCURACY METRICS HERE
save_path = 'testResults/'
try:
os.mkdir(save_path)
except:
print('folder already existing {}'.format(save_path))
str_name = vid_file.split('/')[1].split('.')[0] + '_LM_' +str(AVG_LANDMARKS) + '_GAZE_' + str(GAZE_AVG_FLAG)
str_name = str(demo.gaze_estimator.camera.width) + 'x' + str(demo.gaze_estimator.camera.height) + '_' + str_name
str_name = config.mode + str_name
indices = [sum(item) for item in demo.cur_pos if sum(item) == 0]
for item in reversed(indices):
demo.true_pos.pop(item)
demo.cur_pos.pop(item)
# DUMP THE GAZE AND CENTER VALUES
if config.mode == 'MPIIGaze':
dump_dict(str_name,items=[demo.left_eye_cent,demo.left_eye_gaze, demo.right_eye_cent, demo.right_eye_gaze, demo.true_pos, demo.dist],
item_name = ['lcent', 'lgaze', 'rcent', 'rgaze', 'tpos', 'fdist'])
elif config.mode == 'MPIIFaceGaze':
dump_dict(str_name,items=[demo.face_cent,demo.face_gaze, demo.true_pos, demo.dist],
item_name = ['fcent', 'fgaze', 'tpos', 'fdist'])
print('EXTI BEFORE METRICS & PLOTS')
_, MAE, CEP, CE95 = calc_metrics((demo.true_pos,demo.cur_pos))
print('MAE = ', MAE)
print('CEP = ', CEP)
print('CEP95 = ', CE95)
# draw results
draw_utils.plot_pts((demo.true_pos,demo.cur_pos), str_name, MAE, save_path)
if __name__ == '__main__':
main()
| 2.03125
| 2
|
Vpp3_FlowVisualization/Package/__init__.py
|
kaijunhuang1994/VirtualPowerPrinciple-v1.0
| 0
|
12783168
|
import sys
path = sys.path[0] + "/Package/parser"
sys.path.append(path)
path = sys.path[0] + "/Package/vector"
sys.path.append(path)
path = sys.path[0] + "/Package/solvercontrol"
sys.path.append(path)
| 1.742188
| 2
|
swagger_client/apis/attachment_api.py
|
rcbops/qtest-swagger-client
| 1
|
12783169
|
<reponame>rcbops/qtest-swagger-client<filename>swagger_client/apis/attachment_api.py
# coding: utf-8
"""
qTest Manager API Version 8.6 - 9.1
qTest Manager API Version 8.6 - 9.1
OpenAPI spec version: 8.6 - 9.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import sys
import os
import re
# python 2 and python 3 compatibility library
from six import iteritems
from ..configuration import Configuration
from ..api_client import ApiClient
class AttachmentApi(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
config = Configuration()
if api_client:
self.api_client = api_client
else:
if not config.api_client:
config.api_client = ApiClient()
self.api_client = config.api_client
def delete(self, project_id, blob_handle_id, object_type, object_id, **kwargs):
"""
Deletes an Attachment from an Object
To delete an Attachment from a Release, Build, Requirement, Test Case, Test Log, Test Step or Defect <strong>qTest Manager version:</strong> 7.5+
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.delete(project_id, blob_handle_id, object_type, object_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int project_id: ID of the project (required)
:param int blob_handle_id: ID of the Attachment (required)
:param str object_type: Valid values include releases, builds, requirements, test-cases, test-logs, test-steps or defects <strong>qTest Manager version:</strong> 4+ (required)
:param int object_id: ID of the object (Release, Build, Requirement, Test Case, Test Log, Test Step or Defect) (required)
:return: Message
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.delete_with_http_info(project_id, blob_handle_id, object_type, object_id, **kwargs)
else:
(data) = self.delete_with_http_info(project_id, blob_handle_id, object_type, object_id, **kwargs)
return data
def delete_with_http_info(self, project_id, blob_handle_id, object_type, object_id, **kwargs):
"""
Deletes an Attachment from an Object
To delete an Attachment from a Release, Build, Requirement, Test Case, Test Log, Test Step or Defect <strong>qTest Manager version:</strong> 7.5+
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.delete_with_http_info(project_id, blob_handle_id, object_type, object_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int project_id: ID of the project (required)
:param int blob_handle_id: ID of the Attachment (required)
:param str object_type: Valid values include releases, builds, requirements, test-cases, test-logs, test-steps or defects <strong>qTest Manager version:</strong> 4+ (required)
:param int object_id: ID of the object (Release, Build, Requirement, Test Case, Test Log, Test Step or Defect) (required)
:return: Message
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['project_id', 'blob_handle_id', 'object_type', 'object_id']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'project_id' is set
if ('project_id' not in params) or (params['project_id'] is None):
raise ValueError("Missing the required parameter `project_id` when calling `delete`")
# verify the required parameter 'blob_handle_id' is set
if ('blob_handle_id' not in params) or (params['blob_handle_id'] is None):
raise ValueError("Missing the required parameter `blob_handle_id` when calling `delete`")
# verify the required parameter 'object_type' is set
if ('object_type' not in params) or (params['object_type'] is None):
raise ValueError("Missing the required parameter `object_type` when calling `delete`")
# verify the required parameter 'object_id' is set
if ('object_id' not in params) or (params['object_id'] is None):
raise ValueError("Missing the required parameter `object_id` when calling `delete`")
collection_formats = {}
resource_path = '/api/v3/projects/{projectId}/{objectType}/{objectId}/blob-handles/{blobHandleId}'.replace('{format}', 'json')
path_params = {}
if 'project_id' in params:
path_params['projectId'] = params['project_id']
if 'blob_handle_id' in params:
path_params['blobHandleId'] = params['blob_handle_id']
if 'object_type' in params:
path_params['objectType'] = params['object_type']
if 'object_id' in params:
path_params['objectId'] = params['object_id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = ['Authorization']
return self.api_client.call_api(resource_path, 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Message',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_attachment(self, project_id, attachment_id, object_type, object_id, **kwargs):
"""
Gets an Attachment of an Object
To retrieve an Attachment from a Release, Build, Requirement, Test Case, Test Log, Test Step or Defect
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_attachment(project_id, attachment_id, object_type, object_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int project_id: ID of the project (required)
:param int attachment_id: ID of attachment (required)
:param str object_type: Valid values include <em>release</em>, <em>build</em>, <em>requirements</em>, <em>test-cases</em>, <em>test-logs</em>, <em>test-steps</em>, or <em>defects</em> (required)
:param int object_id: ID of the object (Release, Build, Requirement, Test Case, Test Log, Test Step or Defect) (required)
:return: OutputStream
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_attachment_with_http_info(project_id, attachment_id, object_type, object_id, **kwargs)
else:
(data) = self.get_attachment_with_http_info(project_id, attachment_id, object_type, object_id, **kwargs)
return data
def get_attachment_with_http_info(self, project_id, attachment_id, object_type, object_id, **kwargs):
"""
Gets an Attachment of an Object
To retrieve an Attachment from a Release, Build, Requirement, Test Case, Test Log, Test Step or Defect
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_attachment_with_http_info(project_id, attachment_id, object_type, object_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int project_id: ID of the project (required)
:param int attachment_id: ID of attachment (required)
:param str object_type: Valid values include <em>release</em>, <em>build</em>, <em>requirements</em>, <em>test-cases</em>, <em>test-logs</em>, <em>test-steps</em>, or <em>defects</em> (required)
:param int object_id: ID of the object (Release, Build, Requirement, Test Case, Test Log, Test Step or Defect) (required)
:return: OutputStream
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['project_id', 'attachment_id', 'object_type', 'object_id']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_attachment" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'project_id' is set
if ('project_id' not in params) or (params['project_id'] is None):
raise ValueError("Missing the required parameter `project_id` when calling `get_attachment`")
# verify the required parameter 'attachment_id' is set
if ('attachment_id' not in params) or (params['attachment_id'] is None):
raise ValueError("Missing the required parameter `attachment_id` when calling `get_attachment`")
# verify the required parameter 'object_type' is set
if ('object_type' not in params) or (params['object_type'] is None):
raise ValueError("Missing the required parameter `object_type` when calling `get_attachment`")
# verify the required parameter 'object_id' is set
if ('object_id' not in params) or (params['object_id'] is None):
raise ValueError("Missing the required parameter `object_id` when calling `get_attachment`")
collection_formats = {}
resource_path = '/api/v3/projects/{projectId}/{objectType}/{objectId}/attachments/{attachmentId}'.replace('{format}', 'json')
path_params = {}
if 'project_id' in params:
path_params['projectId'] = params['project_id']
if 'attachment_id' in params:
path_params['attachmentId'] = params['attachment_id']
if 'object_type' in params:
path_params['objectType'] = params['object_type']
if 'object_id' in params:
path_params['objectId'] = params['object_id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = ['Authorization']
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='OutputStream',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_attachments_of(self, project_id, object_type, object_id, **kwargs):
"""
Gets all Attachments of an Object
To retrieve all Attachments of a Release, Build, Requirement, Test Case, Test Log, Test Step or Defect
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_attachments_of(project_id, object_type, object_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int project_id: ID of the project (required)
:param str object_type: Valid values include <em>release</em>, <em>build</em>, <em>requirements</em>, <em>test-cases</em>, <em>test-logs</em>, <em>test-steps</em>, or <em>defects</em> (required)
:param int object_id: ID of the object (Release, Build, Requirement, Test Case, Test Log, Test Step or Defect) (required)
:return: list[AttachmentResource]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_attachments_of_with_http_info(project_id, object_type, object_id, **kwargs)
else:
(data) = self.get_attachments_of_with_http_info(project_id, object_type, object_id, **kwargs)
return data
def get_attachments_of_with_http_info(self, project_id, object_type, object_id, **kwargs):
"""
Gets all Attachments of an Object
To retrieve all Attachments of a Release, Build, Requirement, Test Case, Test Log, Test Step or Defect
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_attachments_of_with_http_info(project_id, object_type, object_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int project_id: ID of the project (required)
:param str object_type: Valid values include <em>release</em>, <em>build</em>, <em>requirements</em>, <em>test-cases</em>, <em>test-logs</em>, <em>test-steps</em>, or <em>defects</em> (required)
:param int object_id: ID of the object (Release, Build, Requirement, Test Case, Test Log, Test Step or Defect) (required)
:return: list[AttachmentResource]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['project_id', 'object_type', 'object_id']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_attachments_of" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'project_id' is set
if ('project_id' not in params) or (params['project_id'] is None):
raise ValueError("Missing the required parameter `project_id` when calling `get_attachments_of`")
# verify the required parameter 'object_type' is set
if ('object_type' not in params) or (params['object_type'] is None):
raise ValueError("Missing the required parameter `object_type` when calling `get_attachments_of`")
# verify the required parameter 'object_id' is set
if ('object_id' not in params) or (params['object_id'] is None):
raise ValueError("Missing the required parameter `object_id` when calling `get_attachments_of`")
collection_formats = {}
resource_path = '/api/v3/projects/{projectId}/{objectType}/{objectId}/attachments'.replace('{format}', 'json')
path_params = {}
if 'project_id' in params:
path_params['projectId'] = params['project_id']
if 'object_type' in params:
path_params['objectType'] = params['object_type']
if 'object_id' in params:
path_params['objectId'] = params['object_id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = ['Authorization']
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[AttachmentResource]',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def search(self, project_id, type, **kwargs):
"""
Searches for Attachments
To query for attachments of <em>Releases</em>, <em>Builds</em>, <em>Requirements</em>, <em>Test Cases</em>, <em>Test Logs</em>, <em>Test Steps</em> or <em>Defects</em>
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.search(project_id, type, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int project_id: ID of the project (required)
:param str type: Its valid values include <em>releases</em>, <em>builds</em>, <em>requirements</em>, <em>test-cases</em>, <em>test-steps</em>, <em>test-logs</em> or <em>defects</em> (required)
:param list[int] ids: List of object IDs (of the same type as specified in the parameter above), separated by commas
:param int author: ID of the user who created the attachment
:param str created_date: Its format is: <strong>{operator} {createdDate in timestamp or UTC}</strong> The <em>operator</em> can be one of the following values: <b>lt</b>: less than the given date <b>gt</b>: greater than given date <b>eq</b>: equal to the given date <b>le</b>: less than or equal to the given date <b>ge</b>: greater then or equal to the given date
:param int page_size: The result is paginated. By the default, the number of objects in each page is 100 if this is omitted. You can specify your custom number (up to 999) in this parameter
:param int page: By default, the first page is returned but you can specify any page number to retrieve attachments
:return: PagedResourceAttachmentResource
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.search_with_http_info(project_id, type, **kwargs)
else:
(data) = self.search_with_http_info(project_id, type, **kwargs)
return data
def search_with_http_info(self, project_id, type, **kwargs):
"""
Searches for Attachments
To query for attachments of <em>Releases</em>, <em>Builds</em>, <em>Requirements</em>, <em>Test Cases</em>, <em>Test Logs</em>, <em>Test Steps</em> or <em>Defects</em>
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.search_with_http_info(project_id, type, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int project_id: ID of the project (required)
:param str type: Its valid values include <em>releases</em>, <em>builds</em>, <em>requirements</em>, <em>test-cases</em>, <em>test-steps</em>, <em>test-logs</em> or <em>defects</em> (required)
:param list[int] ids: List of object IDs (of the same type as specified in the parameter above), separated by commas
:param int author: ID of the user who created the attachment
:param str created_date: Its format is: <strong>{operator} {createdDate in timestamp or UTC}</strong> The <em>operator</em> can be one of the following values: <b>lt</b>: less than the given date <b>gt</b>: greater than given date <b>eq</b>: equal to the given date <b>le</b>: less than or equal to the given date <b>ge</b>: greater then or equal to the given date
:param int page_size: The result is paginated. By the default, the number of objects in each page is 100 if this is omitted. You can specify your custom number (up to 999) in this parameter
:param int page: By default, the first page is returned but you can specify any page number to retrieve attachments
:return: PagedResourceAttachmentResource
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['project_id', 'type', 'ids', 'author', 'created_date', 'page_size', 'page']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method search" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'project_id' is set
if ('project_id' not in params) or (params['project_id'] is None):
raise ValueError("Missing the required parameter `project_id` when calling `search`")
# verify the required parameter 'type' is set
if ('type' not in params) or (params['type'] is None):
raise ValueError("Missing the required parameter `type` when calling `search`")
collection_formats = {}
resource_path = '/api/v3/projects/{projectId}/attachments'.replace('{format}', 'json')
path_params = {}
if 'project_id' in params:
path_params['projectId'] = params['project_id']
query_params = {}
if 'type' in params:
query_params['type'] = params['type']
if 'ids' in params:
query_params['ids'] = params['ids']
collection_formats['ids'] = 'multi'
if 'author' in params:
query_params['author'] = params['author']
if 'created_date' in params:
query_params['createdDate'] = params['created_date']
if 'page_size' in params:
query_params['pageSize'] = params['page_size']
if 'page' in params:
query_params['page'] = params['page']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = ['Authorization']
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='PagedResourceAttachmentResource',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def upload(self, project_id, object_type, object_id, file_name, content_type, body, **kwargs):
"""
Uploads an Attachment to an Object
To upload an Attachment to a Release, Build, Requirement, Test Case, Test Log, Test Step, or Defect
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.upload(project_id, object_type, object_id, file_name, content_type, body, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int project_id: ID of the project (required)
:param str object_type: Valid values include releases, builds, requirements, test-cases, test-logs, test-steps or defects <strong>qTest Manager version:</strong> 4+ (required)
:param int object_id: ID of the object (Release, Build, Requirement, Test Case, Test Log, Test Step or Defect) (required)
:param str file_name: (required)
:param str content_type: (required)
:param str body: (required)
:return: AttachmentResource
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.upload_with_http_info(project_id, object_type, object_id, file_name, content_type, body, **kwargs)
else:
(data) = self.upload_with_http_info(project_id, object_type, object_id, file_name, content_type, body, **kwargs)
return data
def upload_with_http_info(self, project_id, object_type, object_id, file_name, content_type, body, **kwargs):
"""
Uploads an Attachment to an Object
To upload an Attachment to a Release, Build, Requirement, Test Case, Test Log, Test Step, or Defect
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.upload_with_http_info(project_id, object_type, object_id, file_name, content_type, body, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int project_id: ID of the project (required)
:param str object_type: Valid values include releases, builds, requirements, test-cases, test-logs, test-steps or defects <strong>qTest Manager version:</strong> 4+ (required)
:param int object_id: ID of the object (Release, Build, Requirement, Test Case, Test Log, Test Step or Defect) (required)
:param str file_name: (required)
:param str content_type: (required)
:param str body: (required)
:return: AttachmentResource
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['project_id', 'object_type', 'object_id', 'file_name', 'content_type', 'body']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method upload" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'project_id' is set
if ('project_id' not in params) or (params['project_id'] is None):
raise ValueError("Missing the required parameter `project_id` when calling `upload`")
# verify the required parameter 'object_type' is set
if ('object_type' not in params) or (params['object_type'] is None):
raise ValueError("Missing the required parameter `object_type` when calling `upload`")
# verify the required parameter 'object_id' is set
if ('object_id' not in params) or (params['object_id'] is None):
raise ValueError("Missing the required parameter `object_id` when calling `upload`")
# verify the required parameter 'file_name' is set
if ('file_name' not in params) or (params['file_name'] is None):
raise ValueError("Missing the required parameter `file_name` when calling `upload`")
# verify the required parameter 'content_type' is set
if ('content_type' not in params) or (params['content_type'] is None):
raise ValueError("Missing the required parameter `content_type` when calling `upload`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `upload`")
collection_formats = {}
resource_path = '/api/v3/projects/{projectId}/{objectType}/{objectId}/blob-handles'.replace('{format}', 'json')
path_params = {}
if 'project_id' in params:
path_params['projectId'] = params['project_id']
if 'object_type' in params:
path_params['objectType'] = params['object_type']
if 'object_id' in params:
path_params['objectId'] = params['object_id']
query_params = {}
header_params = {}
if 'file_name' in params:
header_params['File-Name'] = params['file_name']
if 'content_type' in params:
header_params['Content-Type'] = params['content_type']
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# Authentication setting
auth_settings = ['Authorization']
return self.api_client.call_api(resource_path, 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='AttachmentResource',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| 1.515625
| 2
|
day13-1.py
|
kenleung5e28/advent-of-code-2021
| 0
|
12783170
|
<gh_stars>0
m, n = 0, 0
points = []
folds = []
with open('input-day13.txt') as file:
while True:
line = file.readline().rstrip()
if len(line) == 0:
break
x, y = [int(s) for s in line.split(',')]
m, n = max(m, x), max(n, y)
points.append((x, y))
while True:
line = file.readline()
if len(line) == 0:
break
line = line.rstrip()
dir, coor = line[len('fold along '):].split('=')
folds.append((dir, int(coor)))
grid = [[False] * (n + 1) for _ in range(m + 1)]
for x, y in points:
grid[x][y] = True
for dir, coor in folds[:1]:
if dir == 'x':
for x in range(coor + 1, m + 1):
for y in range(n + 1):
if grid[x][y]:
grid[2 * coor - x][y] = True
m = coor - 1
elif dir == 'y':
for x in range(m + 1):
for y in range(coor + 1, n + 1):
if grid[x][y]:
grid[x][2 * coor - y] = True
n = coor - 1
counts = 0
for x in range(m + 1):
for y in range(n + 1):
if grid[x][y]:
counts += 1
print(counts)
| 2.78125
| 3
|
metrics.py
|
wemyss/five-video-classification-methods
| 0
|
12783171
|
import functools
from keras import backend as K
import tensorflow as tf
def as_keras_metric(method):
@functools.wraps(method)
def wrapper(self, args, **kwargs):
""" Wrapper for turning tensorflow metrics into keras metrics """
value, update_op = method(self, args, **kwargs)
K.get_session().run(tf.local_variables_initializer())
with tf.control_dependencies([update_op]):
value = tf.identity(value)
return value
return wrapper
precision = as_keras_metric(tf.metrics.precision)
recall = as_keras_metric(tf.metrics.recall)
f1_score = as_keras_metric(tf.contrib.metrics.f1_score)
def rmse(y_true, y_pred):
return K.sqrt(K.mean(K.square(y_pred - y_true), axis=-1))
def recall2(y_true, y_pred):
"""Recall metric.
Only computes a batch-wise average of recall.
Computes the recall, a metric for multi-label classification of
how many relevant items are selected.
"""
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))
recall = true_positives / (possible_positives + K.epsilon())
return recall
def precision2(y_true, y_pred):
"""Precision metric.
Only computes a batch-wise average of precision.
Computes the precision, a metric for multi-label classification of
how many selected items are relevant.
"""
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))
precision = true_positives / (predicted_positives + K.epsilon())
return precision
def f1(y_true, y_pred):
precision = precision2(y_true, y_pred)
recall = recall2(y_true, y_pred)
return 2*((precision*recall)/(precision+recall+K.epsilon()))
| 2.78125
| 3
|
servers/solusapi.py
|
SteelBall/StrutSkinn
| 0
|
12783172
|
<filename>servers/solusapi.py
from xmltodict import parse
import urllib
import urllib2
class SolusAPI(object):
api_key = ''
api_hash = ''
api_url = ''
document = None
success = False
error = None
GIGABYTES = 1073741824.0
MEGABYTES = 1048576.0
def __init__(self, url, api_key, api_hash):
self.api_key = api_key
self.api_hash = api_hash
if not url.endswith('/'):
url += '/'
url += 'api/client/command.php'
self.api_url = url
def perform_request(self, **kwargs):
params = dict(
{
"key": self.api_key,
"hash": self.api_hash,
"action": "info",
}.items() + kwargs.items()
)
request_data = urllib.urlencode(params)
request = urllib2.Request(self.api_url, request_data)
request.add_header('User-agent', 'Mozilla/5.0')
try:
response = urllib2.urlopen(request)
except urllib2.URLError:
self.error = "Incorrect URL"
self.success = False
return False
response_data = response.read()
document = parse("<doc>" + response_data + "</doc>")
document = document["doc"]
self.document = None
self.success = False
if "status" in document:
if document["status"] == "success":
self.success = True
self.document = document
return True
else:
self.error = document["statusmsg"]
else:
self.error = 'Incorrect data format'
return False
def get_ips(self):
if self.perform_request(ipaddr='true'):
return self.document["ipaddr"].split(',')
else:
return False
def get_status(self):
if self.perform_request(action='status'):
if self.document["vmstat"] == "online":
return "online"
return "offline"
else:
return False
def get_hostname(self):
if self.perform_request():
return self.document["hostname"]
return False
def get_main_ip(self):
if self.perform_request():
return self.document["ipaddress"]
return False
def get_hdd(self, output_format=GIGABYTES):
if self.perform_request(hdd='true'):
hdd = self.document["hdd"]
total, used, free, percent_used = hdd.split(',')
total = float(total) / output_format
used = float(used) / output_format
free = float(free) / output_format
return {
"total": total,
"used": used,
"free": free,
"percent": float(percent_used)
}
return False
def get_memory(self, output_format=MEGABYTES):
if self.perform_request(mem='true'):
mem = self.document["mem"]
total, used, free, percent_used = mem.split(',')
total = float(total) / output_format
used = float(used) / output_format
free = float(free) / output_format
return {
"total": total,
"used": used,
"free": free,
"percent": float(percent_used)
}
return False
def get_bandwidth(self, output_format=GIGABYTES):
if self.perform_request(bw='true'):
bw = self.document["bw"]
total, used, free, percent_used = bw.split(',')
total = float(total) / output_format
used = float(used) / output_format
free = float(free) / output_format
return {
"total": total,
"used": used,
"free": free,
"percent": float(percent_used)
}
return False
def get_all(self, output_hdd=GIGABYTES, output_mem=MEGABYTES, output_bw=GIGABYTES):
if self.perform_request(bw='true', mem='true', hdd='true'):
# HDD
hdd = self.document["hdd"]
total, used, free, percent_used = hdd.split(',')
total = float(total) / output_hdd
used = float(used) / output_hdd
free = float(free) / output_hdd
self.hdd = {
"total": total,
"used": used,
"free": free,
"percent": float(percent_used)
}
# Memory
mem = self.document["mem"]
total, used, free, percent_used = mem.split(',')
total = float(total) / output_mem
used = float(used) / output_mem
free = float(free) / output_mem
self.ram = {
"total": total,
"used": used,
"free": free,
"percent": float(percent_used)
}
# Bandwidth
bw = self.document["bw"]
total, used, free, percent_used = bw.split(',')
total = float(total) / output_bw
used = float(used) / output_bw
free = float(free) / output_bw
self.bw = {
"total": total,
"used": used,
"free": free,
"percent": float(percent_used)
}
return True
return False
| 3
| 3
|
lib/opengl/postproc/Bypass.py
|
defgsus/thegame
| 1
|
12783173
|
<filename>lib/opengl/postproc/Bypass.py
from .base import PostProcNode
class Bypass(PostProcNode):
def __init__(self, name, alpha=None):
super().__init__(name)
self.alpha = alpha
def get_code(self):
return """
void mainImage(out vec4 fragColor, in vec2 fragCoord, in vec2 texCoord) {
fragColor = texture(u_tex1, texCoord);
#if %(override)s
fragColor.w = %(alpha)s;
#endif
}
""" % {
"override": 0 if self.alpha is None else 1,
"alpha": self.alpha,
}
| 2.453125
| 2
|
py12306/config.py
|
welking1/py12306
| 1
|
12783174
|
<gh_stars>1-10
from os import path
# 12306 账号
USER_ACCOUNTS = []
# 查询任务
QUERY_JOBS = []
# 查询间隔
QUERY_INTERVAL = 1
# 用户心跳检测间隔
USER_HEARTBEAT_INTERVAL = 120
# 多线程查询
QUERY_JOB_THREAD_ENABLED = 0
# 打码平台账号
AUTO_CODE_ACCOUNT = {
'user': '',
'pwd': ''
}
# 输出日志到文件
OUT_PUT_LOG_TO_FILE_ENABLED = 0
OUT_PUT_LOG_TO_FILE_PATH = 'runtime/12306.log'
SEAT_TYPES = {
'特等座': 25,
'商务座': 32,
'一等座': 31,
'二等座': 30,
'软卧': 23,
'硬卧': 28,
'硬座': 29,
'无座': 26,
}
ORDER_SEAT_TYPES = {
'特等座': 'P',
'商务座': 9,
'一等座': 'M',
'二等座': 'O',
'软卧': 4,
'硬卧': 3,
'硬座': 1,
'无座': 1,
}
PROJECT_DIR = path.dirname(path.dirname(path.abspath(__file__))) + '/'
# Query
RUNTIME_DIR = PROJECT_DIR + 'runtime/'
QUERY_DATA_DIR = RUNTIME_DIR + 'query/'
USER_DATA_DIR = RUNTIME_DIR + 'user/'
STATION_FILE = PROJECT_DIR + 'data/stations.txt'
CONFIG_FILE = PROJECT_DIR + 'env.py'
# 语音验证码
NOTIFICATION_BY_VOICE_CODE = 0
NOTIFICATION_VOICE_CODE_PHONE = ''
NOTIFICATION_API_APP_CODE = ''
if path.exists(CONFIG_FILE):
exec(open(CONFIG_FILE, encoding='utf8').read())
class UserType:
ADULT = 1
CHILD = 2
STUDENT = 3
SOLDIER = 4
dicts = {
'成人': ADULT,
'儿童': CHILD,
'学生': STUDENT,
'残疾军人、伤残人民警察': SOLDIER,
}
| 1.960938
| 2
|
problems/test_0374.py
|
chrisxue815/leetcode_python
| 1
|
12783175
|
<filename>problems/test_0374.py
import unittest
# The guess API is already defined for you.
# @param num, your guess
# @return -1 if my number is lower, 1 if my number is higher, otherwise return 0
_my_num = 0
def guess(num):
if _my_num < num:
return -1
elif _my_num > num:
return 1
return 0
class Solution:
def guessNumber(self, n):
"""
:type n: int
:rtype: int
"""
lo = 1
hi = n
while True:
mid = lo + (hi - lo) // 2
result = guess(mid)
if result == -1:
hi = mid - 1
elif result == 1:
lo = mid + 1
else:
return mid
class Test(unittest.TestCase):
def test(self):
self._test(10, 6)
def _test(self, n, expected):
global _my_num
_my_num = expected
actual = Solution().guessNumber(n)
self.assertEqual(expected, actual)
if __name__ == '__main__':
unittest.main()
| 3.78125
| 4
|
deep_learning_with_tensorFlow/Chapter08/p204.py
|
pearpai/TensorFlow-action
| 3
|
12783176
|
<filename>deep_learning_with_tensorFlow/Chapter08/p204.py
# coding=utf-8
import numpy as np
X = [1, 2]
state = [0.0, 0.0]
# 分开定义不同输入部分的权重以方便操作
w_cell_state = np.asarray([[0.1, 0.2], [0.3, 0.4]])
w_cell_input = np.asarray([0.5, 0.6])
b_cell = np.asarray([0.1, -0.1])
# 定义用于输出的全连接层参数
w_output = np.asarray([[1.0], [2.0]])
b_output = 0.1
# 按照时间信息执行循环神经网络
for i in range(len(X)):
# 计算循环体中的全连接层神经网络
before_activation = np.dot(state, w_cell_state) + X[i] * w_cell_input + b_cell
state = np.tanh(before_activation)
# 根据当前时刻状态计算最终输出
final_output = np.dot(state, w_output) + b_output
# 输出每个时刻的信息
print "before activation: ", before_activation
print "state: ", state
print "output: ", final_output
| 3.03125
| 3
|
bin/list_routines_misc.py
|
shardulc/ec
| 0
|
12783177
|
<reponame>shardulc/ec<gh_stars>0
import matplotlib
matplotlib.use('Agg')
try:
import binutil # required to import from dreamcoder modules
except ModuleNotFoundError:
import bin.binutil # alt import if called as module
import glob
import json
import random
import itertools
import math
import numpy as np
import pandas as pd
import re
import subprocess
from ast import literal_eval
from dreamcoder.program import Program, Primitive, prettyProgram
from dreamcoder.type import *
from dreamcoder.grammar import Grammar
from functools import reduce
from joblib import Parallel, delayed, parallel_backend
# set the seed first thing
random.seed(1)
# Define ourselves a failure mode for recursion
class RecursionDepthExceeded(Exception):
pass
# notice that these are curried
def _reverse(x): return list(reversed(x))
def _cons(x): return lambda xs: [x] + xs
def _append(xs): return lambda x: xs + [x]
def _single(x): return [x]
def _concat(x): return lambda y: x + y
def _unique(x): return list(dict.fromkeys(x))
def _product(x): return reduce(lambda x,y: x*y, x, 1)
def _first(x): return x[0]
def _second(x): return x[1]
def _third(x): return x[2]
def _nth(i):
if i > 0:
return lambda x: x[i-1]
else:
raise IndexError
def _repeat(x): return lambda n: [x]*n
def _range(start): return lambda step: lambda stop: list(range(start, stop+1 if step > 0 else stop-1, step))
def _last(x): return x[-1]
def _drop(i): return lambda xs: xs[i:]
def _droplast(i): return lambda xs: xs[:-i] if i > 0 else xs[:]
def _take(i): return lambda xs: xs[:i]
def _takelast(i): return lambda xs: xs[-i:] if i > 0 else []
def _eq(x): return lambda y: x == y
def _mod(x): return lambda y: x % y
def _slice(x): return lambda y: lambda l: l[(x-1):y]
def _cut_idx(i): return lambda xs: xs[:(i-1)] + xs[i:]
def _cut_slice(i):
def helper(j):
if i > j:
raise IndexError
return lambda xs: xs[:(i-1)] + xs[j:]
return helper
def _cut_val(v):
def helper(xs):
result = []
found = False
for x in xs:
if x != v or found:
result.append(x)
elif x == v:
found = True
return result
return helper
def _cut_vals(v): return lambda xs: [x for x in xs if x != v]
def _replace(idx): return lambda y: lambda xs: [y if i == idx else x for i, x in enumerate(xs, 1)]
def _flatten(l): return [x for xs in l for x in xs]
def _map(f): return lambda l: list(map(f, l))
def _if(c): return lambda t: lambda f: t if c else f
def _addition(x): return lambda y: x + y
def _subtraction(x): return lambda y: x - y
def _multiplication(x): return lambda y: x * y
def _division(x):
def helper(y):
if y == 0:
raise ValueError
return x // y
return helper
def _gt(x): return lambda y: x > y
def _lt(x): return lambda y: x < y
# not the most general form (i.e. zip-with) but it matches standard usage
def _zip(xs): return lambda ys: [list(x) for x in zip(xs, ys)]
def _mapi(f): return lambda l: list(map(lambda i_x: f(i_x[0])(i_x[1]), enumerate(l, 1)))
def _and(x): return lambda y: x and y
def _or(x): return lambda y: x or y
def _not(x): return not x
def _group(key):
def helper(xs):
keys = []
groups = {}
for x in xs:
k = key(x)
if k not in groups:
keys.append(k)
groups[k] = [x]
else:
groups[k].append(x)
return [groups[k] for k in keys]
return helper
def _is_even(x): return x % 2 == 0
def _is_odd(x): return x % 2 == 1
def _count(p): return lambda xs: sum(p(x) for x in xs)
def _filter(f): return lambda xs: list(filter(f, xs))
def _filteri(f): return lambda xs: [x for i, x in enumerate(xs, 1) if f(i)(x)]
def _fold(f): return lambda x0: lambda xs: reduce(lambda a, x: f(a)(x), xs, x0)
def _foldi(f): return lambda x0: lambda xs: reduce(lambda a, t: f(t[0])(a)(t[1]), enumerate(xs, 1), x0)
def _is_in(xs): return lambda x: x in xs
def _find(p): return lambda xs: [i for i, x in enumerate(xs, 1) if p(x)]
def _insert(x): return lambda i: lambda xs: xs[:(i-1)] + [x] + xs[(i-1):]
def _splice(x): return lambda i: lambda xs: xs[:(i-1)] + x + xs[(i-1):]
def _tail(xs): return xs[1:]
def _swap(i):
def swap_helper_j(j):
def swap_helper_xs(xs):
fst = min(i,j)
snd = max(i,j)
return xs[:(fst-1)] + [xs[(snd-1)]] + xs[fst:(snd-1)] + [xs[(fst-1)]] + xs[snd:]
return swap_helper_xs
return swap_helper_j
def _sort(k): return lambda xs: sorted(xs, key=k)
def _fix(argument):
def inner(body):
recursion_limit = [50]
def fix(x):
def r(z):
recursion_limit[0] -= 1
if recursion_limit[0] <= 0:
raise RecursionDepthExceeded()
else:
return fix(z)
return body(r)(x)
return fix(argument)
return inner
def model_comparison_primitives_9():
return _model_comparison_primitives(9)
def model_comparison_primitives_99():
return _model_comparison_primitives(99)
def _model_comparison_primitives(max_num):
return [Primitive(str(j), tint, j) for j in range(0,max_num+1)] + [
Primitive("nan", tint, math.nan),
Primitive("true", tbool, True),
Primitive("false", tbool, False),
Primitive("empty", tlist(t0), []),
Primitive("cons", arrow(t0, tlist(t0), tlist(t0)), _cons),
Primitive("+", arrow(tint, tint, tint), _addition),
Primitive("-", arrow(tint, tint, tint), _subtraction),
Primitive(">", arrow(tint, tint, tbool), _gt),
Primitive("fix", arrow(t0, arrow(arrow(t0, t1), t0, t1), t1), _fix),
Primitive("head", arrow(tlist(t0), t0), _first),
Primitive("if", arrow(tbool, t0, t0, t0), _if),
Primitive("is_empty", arrow(t0, t0, tbool), _eq),
Primitive("is_equal", arrow(t0, t0, tbool), _eq),
# `lambda` is built into the representation.
Primitive("tail", arrow(tlist(t0), tlist(t0)), _tail),
]
# define some primitives
def primitives():
return [Primitive(str(j), tint, j) for j in range(-2,100)] + [
Primitive("%", arrow(tint, tint, tint), _mod),
Primitive("*", arrow(tint, tint, tint), _multiplication),
Primitive("+", arrow(tint, tint, tint), _addition),
Primitive("-", arrow(tint, tint, tint), _subtraction),
Primitive("/", arrow(tint, tint, tint), _division),
Primitive("<", arrow(tint, tint, tbool), _lt),
Primitive("==", arrow(t0, t0, tbool), _eq),
Primitive(">", arrow(tint, tint, tbool), _gt),
Primitive("abs", arrow(tint, tint), abs),
Primitive("and", arrow(tbool, tbool, tbool), _and),
Primitive("append", arrow(tlist(t0), t0, tlist(t0)), _append),
Primitive("concat", arrow(tlist(t0), tlist(t0), tlist(t0)), _concat),
Primitive("cons", arrow(t0, tlist(t0), tlist(t0)), _cons),
Primitive("count", arrow(arrow(t0, tbool), tlist(t0), tint), _count),
Primitive("cut_idx", arrow(tint, tlist(t0), tlist(t0)), _cut_idx),
Primitive("cut_slice", arrow(tint, tint, tlist(t0), tlist(t0)), _cut_slice),
Primitive("cut_val", arrow(t0, tlist(t0), tlist(t0)), _cut_val),
Primitive("cut_vals", arrow(t0, tlist(t0), tlist(t0)), _cut_vals),
Primitive("drop", arrow(tint, tlist(t0), tlist(t0)), _drop),
Primitive("droplast", arrow(tint, tlist(t0), tlist(t0)), _droplast),
Primitive("empty", tlist(t0), []),
Primitive("false", tbool, False),
Primitive("filter", arrow(arrow(t0, tbool), tlist(t0), tlist(t0)), _filter),
Primitive("filteri", arrow(arrow(tint, t0, tbool), tlist(t0), tlist(t0)), _filteri),
Primitive("flatten", arrow(tlist(tlist(t0)), tlist(t0)), _flatten),
Primitive("fold", arrow(arrow(t1, t0, t1), t1, tlist(t0), t1), _fold),
Primitive("foldi", arrow(arrow(tint, t1, t0, t1), t1, tlist(t0), t1), _foldi),
Primitive("group", arrow(arrow(t0, t1), tlist(t1), tlist(tlist(t1))), _group),
Primitive("first", arrow(tlist(t0), t0), _first),
Primitive("second", arrow(tlist(t0), t0), _second),
Primitive("third", arrow(tlist(t0), t0), _third),
Primitive("if", arrow(tbool, t0, t0, t0), _if),
Primitive("is_even", arrow(tint, tbool), _is_even),
Primitive("is_odd", arrow(tint, tbool), _is_odd),
Primitive("last", arrow(tlist(t0), t0), _last),
Primitive("length", arrow(tlist(t0), tint), len),
Primitive("map", arrow(arrow(t0, t1), tlist(t0), tlist(t1)), _map),
Primitive("mapi", arrow(arrow(tint, t0, t1), tlist(t0), tlist(t1)), _mapi),
Primitive("max", arrow(tlist(t0), tint), max),
Primitive("min", arrow(tlist(t0), tint), min),
Primitive("not", arrow(tbool, tbool), _not),
Primitive("nth", arrow(tint, tlist(t0), t0), _nth),
Primitive("or", arrow(tbool, tbool, tbool), _or),
Primitive("product", arrow(tlist(tint), tint), _product),
Primitive("range", arrow(tint, tint, tint, tlist(tint)), _range),
Primitive("repeat", arrow(t0, tint, tlist(t0)), _repeat),
Primitive("replace", arrow(tint, t0, tlist(t0), tlist(t0)), _replace),
Primitive("reverse", arrow(tlist(t0), tlist(t0)), _reverse),
Primitive("singleton", arrow(t0, tlist(t0)), _single),
Primitive("slice", arrow(tint, tint, tlist(t0), tlist(t0)), _slice),
Primitive("sort", arrow(arrow(t0, tint), tlist(t0), tlist(t0)), _sort),
Primitive("sum", arrow(tlist(tint), tint), sum),
Primitive("take", arrow(tint, tlist(t0), tlist(t0)), _take),
Primitive("takelast", arrow(tint, tlist(t0), tlist(t0)), _takelast),
Primitive("true", tbool, True),
Primitive("unique", arrow(tlist(t0), tlist(t0)), _unique),
Primitive("zip", arrow(tlist(t0), tlist(t0), tlist(tlist(t0))), _zip),
Primitive("is_in", arrow(tlist(t0), t0, tbool), _is_in),
Primitive("find", arrow(arrow(t0, tbool), tlist(t0), tlist(tint)), _find),
Primitive("insert", arrow(t0, tint, tlist(t0), tlist(t0)), _insert),
Primitive("splice", arrow(tlist(t0), tint, tlist(t0), tlist(t0)), _splice),
Primitive("swap", arrow(tint, tint, tlist(t0), tlist(t0)), _swap),
]
def proportion(xs, f):
return sum(f(i, o) for i,o in xs)/len(xs)
def proportion_set(xs, f):
return len({f(i, o) for i,o in xs})/len(xs)
def limit(xs, accept, f):
return max(0, sum(f(i, o) for i,o in xs) - accept)
def forbid(xs, f):
return limit(xs, 0, f)
def center(xs, f, factor = 1/2):
return 1 + abs(factor * len(xs) - sum(f(i,o) for i, o in xs))
def proportion_unique_elements(xs):
return sum(len(set(i)) for i,o in xs) / sum(len(i) for i,o in xs)
def wave_pilot():
return [
{"concept": '(lambda (unique $0))',
"adjust": lambda xs: min(1.0, 1.0/(len(xs)-2)*sum((len(o)/len(i) < 0.75 if len(i) > 0 else 1) for i, o in xs)),
"inputs": [
[7, 31, 7, 7, 31],
[3, 8, 3],
[7, 9, 2, 2, 3, 7, 6, 7],
[19, 19],
[66, 3, 89, 4, 66, 66, 4, 37, 0, 3],
[56, 93, 1, 1, 0, 93],
[],
[19, 38, 14, 76, 7, 4, 88],
[16, 25, 8, 8],
[79],
[5, 19, 49, 7, 62]
]},
{"concept": '(lambda (singleton (length $0)))',
"adjust": lambda xs: 1.0,
"inputs": [
[],
[31],
[23, 6],
[38, 4, 18],
[88, 67, 0, 44],
[3, 3, 7, 49, 6],
[80, 70, 51, 5, 98, 2],
[45, 76, 37, 3, 8, 1, 76],
[66, 12, 43, 12, 25, 6, 6, 15],
[22, 24, 58, 84, 3, 46, 0, 22, 3],
[10, 10, 10, 10, 10, 10, 10, 10, 10, 10],
]},
{"concept": '(lambda (repeat (max $0) (min $0)))',
"adjust": lambda xs: 1.0 if any(len(o) == 0 for i, o in xs) else 0.0,
"inputs": [
[99, 7, 55], # 7/3
[36, 22, 2, 15, 7], # 2/5
[62, 5], # 5/2
[23, 9, 14, 7, 2, 31, 4, 4, 0, 18], # 0/10
[3, 3, 3, 3], # 3/4
[4, 4, 4], # 4/3
[32, 14, 67, 32, 9, 70, 77], # 9/7
[7], # 7/1
[12, 42, 92, 58, 62, 38], # 12/6
[48, 56, 39, 58, 13], # 13/5
[43, 84, 8, 17, 8, 78, 64, 10], # 8/9
]},
{"concept": '(lambda (concat (reverse (drop 1 $0)) $0))',
"adjust": lambda xs: 1.0,
"inputs": [
[],
[1],
[7, 7],
[49, 0, 34],
[54, 6, 3, 8],
[70, 70, 3, 70, 3],
[64, 15, 92, 54, 15, 85],
[61, 6, 6, 2, 2, 6, 6],
[0, 1, 1, 21, 4, 50, 50, 78],
[93, 93, 93, 93, 93, 93, 93, 93, 93],
[1, 79, 0, 21, 4, 32, 42, 81, 23, 9],
]},
{"concept": '(lambda (concat (drop (last $0) $0) (take (last $0) $0)))',
"adjust": lambda xs: 0 if sum(i[-1] >= len(i) for i, o in xs) > 2 else 1,
"inputs": [
[1, 17, 4, 2],
[20, 14, 66, 2, 68, 46, 93, 5],
[50, 71, 6, 32, 1],
[72, 8, 54, 98, 72, 43, 49, 42, 7, 8],
[12, 5, 83, 5, 0, 1],
[46, 69, 70, 4, 20, 5, 42, 41, 22, 6],
[9, 33, 0],
[0, 23, 17, 81, 87, 3],
[53, 22, 57, 37, 59, 66, 26, 21, 4],
[96, 32, 99, 98, 98, 60, 80, 90, 26, 7],
[88, 10, 1, 78, 56, 32],
]},
{"concept": '(lambda (flatten (map (lambda (cons (first $0) (singleton (length $0)))) (group (lambda $0) $0))))',
"adjust": lambda xs: len({e for i, o in xs for e in o[1::2]})/10,
"inputs": [
[2, 2, 2, 19, 2, 2, 25, 2],
[4, 4, 8, 4, 3],
[4, 4, 4, 4, 4, 4, 4],
[79, 79, 8, 79, 7, 7, 7, 79, 8],
[86, 86, 1, 1, 86, 1],
[8, 9, 98, 4, 7, 86],
[1, 41, 6, 90],
[33, 24, 0, 0, 1, 7, 33, 10],
[97, 18, 67, 67],
[8, 8, 9, 8, 1, 9, 8],
[0, 45, 7, 37, 94, 94, 7, 7, 45, 45],
]},
{"concept": '(lambda (fold (lambda (lambda (if (> $0 (last $1)) (append $1 $0) $1))) (take 1 $0) (drop 1 $0)))',
"adjust": lambda xs: 2*len({len(o) for i, o in xs})/11,
"inputs": [
[1, 3, 2, 5, 3, 4, 7, 6, 9], #9
[22, 6, 7, 38, 62, 44, 78, 91], #8
[0, 4, 9], # 3
[5, 2, 19, 18, 37], #5
[4, 0, 9], # 3
[11, 23, 34, 55, 87], # 5
[97, 13, 82, 4, 55, 97, 3], #7
[], # 0
[34, 35, 62, 24, 75, 6], #6
[2, 6, 2, 10, 17, 3, 53, 9, 72, 3], # 10
[48, 61, 37, 86], #4
]},
{"concept": '(lambda (fold (lambda (lambda (if (is_even (second $0)) (append $1 (first $0)) $1))) empty (zip (droplast 1 $0) (drop 1 $0))))',
"adjust": lambda xs: len({len(o) for i, o in xs})/10,
"inputs": [
[6, 0, 7, 32],
[62, 8, 59, 88, 98, 6],
[1, 96, 1, 13, 86, 77, 6, 10, 7, 0],
[6],
[1, 7],
[43, 4, 64, 5, 0],
[0, 2, 3],
[7, 14, 7, 6, 8, 57, 10],
[27, 6, 21, 6, 86, 8, 0],
[4, 10, 6, 8],
[6, 0, 85, 7, 10, 69, 22, 5],
]},
]
def human_experiments_wave_1():
return [
{
"concept": "(lambda (cons 11 (cons 19 (cons 24 (cons 33 (cons 42 (cons 5 (cons 82 (cons 0 (cons 64 (cons 9 empty)))))))))))",
"adjust": lambda xs: 0.0,
},
{
"concept": "(lambda $0)",
"adjust": lambda xs: - limit(xs, 1, lambda i,o: len(i) >= 7),
},
{
"concept": "(lambda (singleton (length $0)))",
"adjust": lambda xs: proportion_set(xs, lambda i,o: o[0]),
},
{
"concept": "(lambda (singleton (max $0)))",
"adjust": lambda xs: 2 * proportion_set(xs, lambda i,o: o[0]) + proportion_unique_elements(xs)
},
{
"concept": "(lambda (splice (drop 1 (droplast 1 $0)) 2 $0))",
"adjust": lambda xs: proportion(xs, lambda i,o: 6 >= len(i) >= 3),
},
{
"concept": "(lambda (sort (lambda $0) $0))",
"adjust": lambda xs: proportion_unique_elements(xs) - limit(xs, 3, lambda i,o: len(i) <= 3 or len(i) >= 7),
},
{
"concept": "(lambda (unique $0))",
"adjust": lambda xs: proportion(xs, lambda i,o: (len(i) - len(o)) > 2) + proportion_set(xs, lambda i,o: len(i) - len(o)) + proportion_set(xs, lambda i,o: len(o)) - limit(xs, 2, lambda i,o: len(o) >= 7),
},
{
"concept": "(lambda (singleton (sum $0)))",
"adjust": lambda xs: 4 * proportion_set(xs, lambda i,o: o[0]) + 2 * proportion_set(xs, lambda i,o: len(i)) + proportion_set(xs, lambda i,o: o[0]/10),
},
{
"concept": "(lambda (singleton (product $0)))",
"adjust": lambda xs: 4 * proportion_set(xs, lambda i,o: o[0]) + 2 * proportion_set(xs, lambda i,o: len(i)) + proportion_set(xs, lambda i,o: o[0]/5),
},
{
"concept": "(lambda (takelast 3 (sort (lambda $0) $0)))",
"adjust": lambda xs: 2 * len({oe for i,o in xs for oe in o})/sum(len(o) for i,o in xs) + proportion_unique_elements(xs)
},
{
"concept": "(lambda (repeat (max $0) (min $0)))",
"adjust": lambda xs: proportion(xs, lambda i,o: min(i) <= 10) + proportion_set(xs, lambda i,o: max(i)) + proportion_set(xs, lambda i,o: min(i))
},
{
"concept": "(lambda (range 1 1 (last $0)))",
"adjust": lambda xs: proportion(xs, lambda i,o: len(i) > 0 and i[-1] <= 10) + proportion_set(xs, lambda i,o: i[-1] if len(i) > 0 else 0) + proportion_unique_elements(xs)
},
{
"concept": "(lambda (filter (lambda (> (first $1) (% $0 10))) $0))",
"adjust": lambda xs: - limit(xs, 2, lambda i,o: len(o) <= 2 or len(o) >= 7) + proportion(xs, lambda i,o: len(i) > 1 and i[0] < 10) + proportion_set(xs, lambda i,o: i[0] if len(i) > 0 else 0)
},
{
"concept": "(lambda (cons (last $0) $0))",
"adjust": lambda xs: - limit(xs, 2, lambda i,o: len(o) <= 2 or len(o) >= 7) + proportion_unique_elements(xs) + proportion_set(xs, lambda i,o: i[-1])
},
{
"concept": "(lambda (cons (sum (unique $0)) (append (unique $0) (sum (unique $0)))))",
"adjust": lambda xs: proportion_set(xs, lambda i,o: len(o)) + proportion_set(xs, lambda i,o: o[0]) - limit(xs, 2, lambda i,o: len(o) <= 3 or len(o) >= 8) + 2 * proportion(xs, lambda i,o: (len(i) - len(o)) > 2) + 2 * proportion_set(xs, lambda i,o: len(i) - len(o))
},
{
"concept": "(lambda (concat (reverse (drop 1 $0)) $0))",
"adjust": lambda xs: proportion_unique_elements(xs) - limit(xs, 2, lambda i,o: len(o) >= 10)
},
{
"concept": "(lambda (concat (drop 3 $0) (take 3 $0)))",
"adjust": lambda xs: proportion_unique_elements(xs) + proportion(xs, lambda i,o: len(i) >= 3) - limit(xs, 2, lambda i,o: len(o) > 7)
},
{
"concept": "(lambda (concat (drop (last $0) $0) (take (last $0) $0)))",
"adjust": lambda xs: proportion_unique_elements(xs) - limit(xs, 2, lambda i,o: len(o) > 7) + 4 * proportion(xs, lambda i,o: len(i) > 0 and len(i) > i[-1]) + proportion_set(xs, lambda i,o: i[-1] if len(i) else 0) + proportion_set(xs, lambda i,o: len(i) - i[-1])
},
{
"concept": "(lambda ((lambda (concat ($0 first) (concat $1 ($0 last)))) (lambda (if (== ($0 $1) 8) empty (singleton 8)))))",
"adjust": lambda xs: 2 / center(xs, lambda i,o: len(i) > 0 and i[0] == 8) + 2 / center(xs, lambda i,o: len(i) > 0 and i[-1] == 8) - limit(xs, 2, lambda i,o: len(o) >= 7) + proportion_unique_elements(xs)
},
{
"concept": "(lambda (singleton (first $0)))",
"adjust": lambda xs: proportion_unique_elements(xs) + proportion_set(xs, lambda i,o: o[0])
},
{
"concept": "(lambda (singleton (last $0)))",
"adjust": lambda xs: proportion_unique_elements(xs) + proportion_set(xs, lambda i,o: o[0])
},
{
"concept": "(lambda (singleton (second (reverse $0))))",
"adjust": lambda xs: 2 * proportion(xs, lambda i,o: len(i) >= 2) + proportion_unique_elements(xs) + proportion_set(xs, lambda i,o: o[0])
},
{
"concept": "(lambda (singleton (nth (last $0) $0)))",
"adjust": lambda xs: 2 * proportion(xs, lambda i,o: len(i) > 0) + proportion_unique_elements(xs) + proportion_set(xs, lambda i,o: o[0]) - 2 * limit(xs, 1, lambda i,o: len(i) > 0 and len(i) == i[-1])
},
{
"concept": "(lambda (singleton (nth (nth (first $0) $0) $0)))",
"adjust": lambda xs: 2 * proportion(xs, lambda i,o: len(i) > 0 and i[0] < len(i) and i[i[0]] < len(i)) + proportion_unique_elements(xs) + proportion_set(xs, lambda i,o: o[0])
},
{
"concept": "(lambda (filter (lambda (== (/ (first $1) 10) (/ $0 10))) $0))",
"adjust": lambda xs: proportion_set(xs, lambda i,o: i[0]/10 if len(i) else 0) + proportion_set(xs, lambda i,o: len(i) - len(o)) - limit(xs, 1, lambda i,o: len(o) <= 1 or len(o) == len(i)) + proportion_set(xs, lambda i,o: len(set(o)))
},
{
"concept": "(lambda (drop 1 $0))",
"adjust": lambda xs: proportion_unique_elements(xs)
},
{
"concept": "(lambda (droplast 1 $0))",
"adjust": lambda xs: proportion_unique_elements(xs)
},
{
"concept": "(lambda (sort (lambda $0) (cut_idx 3 (drop 2 $0))))",
"adjust": lambda xs: proportion_unique_elements(xs) + proportion(xs, lambda i,o: len(i) > 5)
},
{
"concept": "(lambda (slice (first $0) (second $0) (drop 2 $0)))",
"adjust": lambda xs: 4 * proportion(xs, lambda i,o: len(i) > 2 and 1 <= i[0] <= i[1] <= len(i)-2) + proportion_unique_elements(xs) - limit(xs, 1, lambda i,o: len(i) > 0 and i[0] == 1) - limit(xs, 0, lambda i,o: len(i) > 0 and i[0] == 0) - limit(xs, 1, lambda i,o: len(i) > 1 and i[1] == i[0]) - limit(xs, 1, lambda i,o: len(i) > 1 and i[1] == len(i)-2) - limit(xs, 0, lambda i,o: len(i) > 1 and i[1] < i[0]) - limit(xs, 0, lambda i,o: len(i) > 1 and i[1] > len(i)-2)
},
{
"concept": "(lambda (take (first $0) (drop 1 $0)))",
"adjust": lambda xs: 4 * proportion(xs, lambda i,o: len(i) > 1 and i[0] <= len(i)-1) + proportion_unique_elements(xs) + proportion_set(xs, lambda i,o: len(o)) + proportion_set(xs, lambda i,o: len(i)-len(o)) - limit(xs, 1, lambda i,o: len(i) > 0 and i[0] == len(i) - 1) - limit(xs, 1, lambda i,o: len(i) > 0 and i[0] == 0)
},
{
"concept": "(lambda (filter (lambda (is_even (/ $0 10))) $0))",
"adjust": lambda xs: proportion_unique_elements(xs) + proportion(xs, lambda i,o: len({ie//10 for ie in i})/max(1,len(i)) > 4) + proportion_set(xs, lambda i,o: len(i)-len(o))
},
{
"concept": "(lambda (cut_idx 3 $0))",
"adjust": lambda xs: proportion_unique_elements(xs) + 2 * proportion(xs, lambda i,o: len(i) > 3) - limit(xs, 2, lambda i,o: len(o) >= 7)
},
{
"concept": "(lambda (cut_slice 2 5 $0))",
"adjust": lambda xs: proportion_unique_elements(xs) + 2 * proportion(xs, lambda i,o: len(i) > 5) - limit(xs, 2, lambda i,o: len(o) >= 7)
},
{
"concept": "(lambda (cut_slice (first $0) (second $0) $0))",
"adjust": lambda xs: 4 * proportion(xs, lambda i,o: len(i) > 2 and 1 <= i[0] <= i[1] <= len(i)) + proportion_unique_elements(xs) - limit(xs, 3, lambda i,o: len(i) > 0 and i[0] == 1) - limit(xs, 0, lambda i,o: len(i) > 0 and i[0] == 0) - limit(xs, 1, lambda i,o: len(i) > 1 and i[1] == i[0]) - limit(xs, 2, lambda i,o: len(i) > 1 and i[1] == len(i)) - limit(xs, 0, lambda i,o: len(i) > 1 and i[1] < i[0]) - limit(xs, 0, lambda i,o: len(i) > 1 and i[1] > len(i))
},
{
"concept": "(lambda (cut_val 7 $0))",
"adjust": lambda xs: proportion_unique_elements(xs) + 2 * proportion(xs, lambda i,o: 7 in i) + 1 / center(xs, lambda i,o: i.count(7) > 1, factor = 8/11) + 1 / center(xs, lambda i,o: i.count(7) > 2, factor = 4/11)
},
{
"concept": "(lambda (cut_val (max $0) $0))",
"adjust": lambda xs: proportion_unique_elements(xs) + 2 * proportion(xs, lambda i,o: len(i) > 1) + 1 / center(xs, lambda i,o: len(i) > 0 and i.count(max(i)) > 1, factor = 8/11) + 1 / center(xs, lambda i,o: len(i) > 0 and i.count(max(i)) > 2, factor = 4/11)
},
{
"concept": "(lambda (cut_vals 3 $0))",
"adjust": lambda xs: proportion_unique_elements(xs) + 2 * proportion(xs, lambda i,o: 3 in i) + 1 / center(xs, lambda i,o: i.count(3) > 1, factor = 8/11) + 1 / center(xs, lambda i,o: i.count(3) > 2, factor = 4/11)
},
{
"concept": "(lambda (cut_vals (first $0) $0))",
"adjust": lambda xs: proportion(xs, lambda i,o: len(i) > 0) + 1 / center(xs, lambda i,o: len(i) > 1 and i.count(i[0]) == 1, factor = 2/11) + 1 / center(xs, lambda i,o: len(i) > 1 and i.count(i[0]) == 2, factor = 2/11) + 1 / center(xs, lambda i,o: len(i) > 1 and i.count(i[0]) == 3, factor = 3/11) + 1 / center(xs, lambda i,o: len(i) > 1 and i.count(i[0]) == 4, factor = 6/11),
},
{
"concept": "(lambda (cut_vals (max $0) (cut_vals (min $0) $0)))",
"adjust": lambda xs: proportion_unique_elements(xs) + proportion(xs, lambda i,o: len(i) > 2) + 1 / center(xs, lambda i,o: len(i) > 1 and i.count(min(i)) > 1) + 1 / center(xs, lambda i,o: len(i) > 1 and i.count(max(i)) > 1)
},
{
"concept": "(lambda (replace 2 9 $0))",
"adjust": lambda xs: proportion_unique_elements(xs) + 2 * proportion(xs, lambda i,o: len(i) >= 2)
},
{
"concept": "(lambda (replace (first $0) (second $0) (drop 2 $0)))",
"adjust": lambda xs: 4 * proportion(xs, lambda i,o: len(i) > 2 and 1 <= i[0] <= len(i)-2) + proportion_unique_elements(xs) + proportion_set(xs, lambda i,o: i[1] if len(i) > 1 else 0) + proportion_set(xs, lambda i,o: i[0] if len(i) > 0 else 0) + proportion_set(xs, lambda i,o: len(i)-2-i[0] if len(i) > 0 else 0) - limit(xs, 1, lambda i,o: len(i) > 0 and i[0] == 1) - limit(xs, 0, lambda i,o: len(i) > 0 and i[0] == 0) - limit(xs, 1, lambda i,o: len(i) > 1 and i[1] == len(i)-2) - limit(xs, 0, lambda i,o: len(i) > 1 and i[1] > len(i)-2)
},
{
"concept": "(lambda (flatten (map (lambda (cons (/ $0 10) (singleton (% $0 10)))) $0)))",
"adjust": lambda xs: proportion_unique_elements(xs)
},
{
"concept": "(lambda (map (lambda (if (== $0 (max $1)) (min $1) $0)) $0))",
"adjust": lambda xs: proportion_unique_elements(xs) + proportion(xs, lambda i,o: len(i) > 1) + 1 / center(xs, lambda i,o: len(i) > 1 and i.count(max(i)) > 1, factor = 8/11) + proportion_set(xs, lambda i,o: max(0,i.count(max(i))-5) if len(i) > 0 else 0)
},
{
"concept": "(lambda (map (lambda (if (or (== $0 (max $1)) (== $0 (min $1))) (- (max $1) (min $1)) $0)) $0))",
"adjust": lambda xs: proportion_unique_elements(xs),
},
{
"concept": "(lambda (map (lambda (first $1)) $0))",
"adjust": lambda xs: proportion_unique_elements(xs) + proportion_set(xs, lambda i,o: i[0] if len(i) > 0 else 0)
},
{
"concept": "(lambda (map (lambda (- (max $0) (min $0))) (zip (droplast 1 $0) (drop 1 $0))))",
"adjust": lambda xs: proportion_unique_elements(xs)
},
{
"concept": "(lambda (flatten (mapi (lambda (lambda (cons $0 (singleton $1)))) $0)))",
"adjust": lambda xs: proportion_unique_elements(xs)
},
{
"concept": "(lambda (flatten (map (range 1 1) $0)))",
"adjust": lambda xs: proportion_unique_elements(xs) + proportion(xs, lambda i,o: all(0 < ie <= 5 for ie in i)) + proportion_set(xs, lambda i,o: len(i)) + proportion(xs, lambda i,o: len(set(i)) > 3) - limit(xs, 3, lambda i,o: len(i) < 3 or len(i) > 5)
},
{
"concept": "(lambda (map (lambda (* $0 (first $1))) (drop 1 $0)))",
"adjust": lambda xs: proportion_unique_elements(xs) + proportion_set(xs, lambda i,o: i[0] if len(i) > 0 else 0) + proportion(xs, lambda i,o: i[0] <= 10 if len(i) > 0 else 0) - limit(xs, 1, lambda i,o: len(i) > 0 and i[0] == 0)
},
{
"concept": "(lambda (flatten (map (lambda (if (> $0 (first $1)) (range (first $1) 1 $0) (singleton $0))) $0)))",
"adjust": lambda xs: proportion_unique_elements(xs) + proportion_set(xs, lambda i,o: i[0] if len(i) > 0 else 0) + proportion(xs, lambda i,o: len(i) > 2)
},
{
"concept": "(lambda (flatten (map (lambda (repeat $0 $0)) $0)))",
"adjust": lambda xs: proportion_unique_elements(xs) + proportion(xs, lambda i,o: all(ie <= 5 for ie in i)) + proportion(xs, lambda i,o: len(set(i)) > 3) + proportion(xs, lambda i,o: i.count(0) < 2) + proportion_set(xs, lambda i,o: len(i)) - limit(xs, 3, lambda i,o: len(i) < 3 or len(i) > 5)
},
{
"concept": "(lambda (map (lambda (* (/ $0 10) (% $0 10))) $0))",
"adjust": lambda xs: proportion_unique_elements(xs)
},
{
"concept": "(lambda (flatten (map (lambda (append (take 1 $0) (length $0))) (group (lambda $0) $0))))",
"adjust": lambda xs: proportion_set(xs, lambda i,o: tuple(o[1::2])) + sum(oe in [2,3,4] for i,o in xs for oe in o[1::2])/sum(len(o[1::2]) for i,o in xs)
},
{
"concept": "(lambda (map (lambda (if (is_even $0) (* 3 $0) $0)) $0))",
"adjust": lambda xs: proportion_unique_elements(xs) + proportion(xs, lambda i,o: len(i) > 5)
},
{
"concept": "(lambda (mapi (lambda (lambda (* $0 $1))) $0))",
"adjust": lambda xs: proportion_unique_elements(xs) + proportion(xs, lambda i,o: len(i) > 5) + proportion(xs, lambda i,o: all(ie <= 10 for ie in i))
},
{
"concept": "(lambda (mapi (lambda (lambda (+ $0 $1))) (reverse $0)))",
"adjust": lambda xs: proportion_unique_elements(xs) + proportion(xs, lambda i,o: len(i) > 5)
},
{
"concept": "(lambda (flatten (map (lambda (cons $0 (singleton (% $0 2)))) $0)))",
"adjust": lambda xs: proportion_unique_elements(xs) + proportion(xs, lambda i,o: len(i) > 5)
},
{
"concept": "(lambda (mapi (lambda (lambda (if (== $0 $1) 1 0))) $0))",
"adjust": lambda xs: proportion_unique_elements(xs) + proportion(xs, lambda i,o: len(i) > 5) + proportion(xs, lambda i,o: all(1 <= ie <= 10 for ie in i)) + proportion(xs, lambda i,o: sum(o) > 2) - limit(xs, 1, lambda i,o: sum(o) == 0)
},
{
"concept": "(lambda (map (lambda (count (lambda (== $1 $0)) $1)) (range 1 1 (max $0))))",
"adjust": lambda xs: proportion(xs, lambda i,o: len(i) > 8) + proportion(xs, lambda i,o: all(1 <= ie <= 10 for ie in i)) - limit(xs, 1, lambda i,o: sum(oe > 0 for oe in o) < 2) + proportion(xs, lambda i,o: sum(oe > 1 for oe in o) in [3,4])
},
{
"concept": "(lambda (map (lambda (- 99 $0)) $0))",
"adjust": lambda xs: proportion_unique_elements(xs) + proportion(xs, lambda i,o: len(i) > 5)
},
{
"concept": "(lambda (mapi (lambda (lambda (+ $0 (- (length $2) $1)))) $0))",
"adjust": lambda xs: proportion_unique_elements(xs) + proportion(xs, lambda i,o: len(i) > 5)
},
{
"concept": "(lambda (map (lambda (+ 7 (* 3 $0))) $0))",
"adjust": lambda xs: proportion_unique_elements(xs) + proportion(xs, lambda i,o: len(i) > 5) + proportion(xs, lambda i,o: all(0 <= ie <= 20 for ie in i))
},
{
"concept": "(lambda (map (lambda (- (* $0 2) 10)) $0))",
"adjust": lambda xs: 4 * proportion_unique_elements(xs) + 2 * proportion(xs, lambda i,o: len(i) in [4,5]) - 2 * limit(xs, 2, lambda i,o: len(i) <= 3)
},
{
"concept": "(lambda (map (lambda (+ (/ $0 4) 5)) $0))",
"adjust": lambda xs: 2 * proportion_unique_elements(xs) + proportion(xs, lambda i,o: len(i) > 5) + proportion(xs, lambda i,o: all(10 <= ie <= 40 for ie in i))
},
{
"concept": "(lambda (filter is_even (reverse $0)))",
"adjust": lambda xs: proportion_unique_elements(xs) + proportion(xs, lambda i,o: len(i) > 5)
},
{
"concept": "(lambda (sort (lambda (+ (% $0 10) (/ $0 10))) (unique $0)))",
"adjust": lambda xs: proportion_unique_elements(xs) + proportion(xs, lambda i,o: len(i) > 5)
},
{
"concept": "(lambda (filter (lambda (== (% $0 3) 0)) $0))",
"adjust": lambda xs: proportion_unique_elements(xs) + proportion(xs, lambda i,o: len(i) > 5)
},
{
"concept": "(lambda (cut_val (length $0) (range 1 1 10)))",
"adjust": lambda xs: proportion_unique_elements(xs) + proportion_set(xs, lambda i,o: len(i)) - limit(xs, 0, lambda i,o: len(i) == 0)
},
{
"concept": "(lambda (singleton (max (cut_vals (max $0) $0))))",
"adjust": lambda xs: proportion_unique_elements(xs) + proportion(xs, lambda i,o: len(i) > 5)
},
{
"concept": "(lambda (cons (first $0) (singleton (last $0))))",
"adjust": lambda xs: proportion_unique_elements(xs) + proportion(xs, lambda i,o: len(i) > 5)
},
{
"concept": "(lambda (drop 1 (fold (lambda (lambda (append $1 (+ (last $1) $0)))) (singleton 0) $0)))",
"adjust": lambda xs: proportion_unique_elements(xs) + proportion(xs, lambda i,o: len(i) > 5) + proportion(xs, lambda i,o: all(0 <= ie <= 20 for ie in i))
},
{
"concept": "(lambda (drop 1 (fold (lambda (lambda (append $1 (* (last $1) $0)))) (singleton 1) $0)))",
"adjust": lambda xs: 2 * proportion_unique_elements(xs) + 2 * proportion(xs, lambda i,o: len(i) > 5) + proportion(xs, lambda i,o: all(2 <= ie <= 9 for ie in i)) - forbid(xs, lambda i,o: 0 in i or 0 in o) - limit(xs, 1, lambda i,o: len(i) < 3)
},
{
"concept": "(lambda (mapi (lambda (lambda (max (take $1 $2)))) $0))",
"adjust": lambda xs: proportion_unique_elements(xs) + proportion(xs, lambda i,o: len(i) > 5) + proportion(xs, lambda i,o: len(set(o))/max(1,len(o)))
},
{
"concept": "(lambda (take (length (unique $0)) $0))",
"adjust": lambda xs: proportion(xs, lambda i,o: len(i) > 5)
},
{
"concept": "(lambda (fold (lambda (lambda (if (> $0 (last $1)) (append $1 $0) $1))) (take 1 $0) (drop 1 $0)))",
"adjust": lambda xs: proportion_unique_elements(xs) + proportion(xs, lambda i,o: len(i) > 5) + proportion(xs, lambda i,o: len(o) > 2)
},
{
"concept": "(lambda (map (lambda (sum $0)) (zip (droplast 1 $0) (drop 1 $0))))",
"adjust": lambda xs: proportion_unique_elements(xs) + proportion(xs, lambda i,o: len(i) > 5)
},
{
"concept": "(lambda (flatten (zip $0 (reverse $0))))",
"adjust": lambda xs: proportion_unique_elements(xs) - limit(xs, 2, lambda i,o: len(i) < 3 or len(i) > 6)
},
{
"concept": "(lambda (map first (filter (lambda (is_even (second $0))) (zip (droplast 1 $0) (drop 1 $0)))))",
"adjust": lambda xs: proportion_unique_elements(xs) + proportion(xs, lambda i,o: len(i) > 5)
},
{
"concept": "(lambda (fold (lambda (lambda (append (reverse $1) $0))) empty (reverse (sort (lambda $0) $0))))",
"adjust": lambda xs: proportion_unique_elements(xs) + proportion(xs, lambda i,o: len(i) > 5)
},
{
"concept": "(lambda (fold (lambda (lambda (append (reverse $1) $0))) empty (sort (lambda $0) $0)))",
"adjust": lambda xs: proportion_unique_elements(xs) + proportion(xs, lambda i,o: len(i) > 5)
},
{
"concept": "(lambda (flatten (zip (filteri (lambda (lambda (is_odd $1))) $0) (reverse (filteri (lambda (lambda (is_even $1))) $0)))))",
"adjust": lambda xs: proportion_unique_elements(xs) + proportion(xs, lambda i,o: len(i) > 5) + proportion(xs, lambda i,o: len(i) % 2 == 0)
},
{
"concept": "(lambda (filteri (lambda (lambda (== (% $1 3) 0))) $0))",
"adjust": lambda xs: proportion_unique_elements(xs) + proportion(xs, lambda i,o: len(i) >= 9)
},
{
"concept": "(lambda (find (== (first $0)) (drop 1 $0)))",
"adjust": lambda xs: proportion(xs, lambda i,o: len(i) > 8) + proportion(xs, lambda i,o: all(1 <= ie <= 10 for ie in i)) + proportion_set(xs, lambda i,o: len(set(i))) - 2 * limit(xs, 1, lambda i,o: len(o) <= 1) - limit(xs, 1, lambda i,o: len(o) > 5)
},
{
"concept": "(lambda (filteri (lambda (lambda (and (is_even $1) (is_odd $0)))) $0))",
"adjust": lambda xs: proportion_unique_elements(xs) + proportion(xs, lambda i,o: len(i) > 5)
},
{
"concept": "(lambda (cons (first $0) (cons (sum (drop 1 (droplast 1 $0))) (takelast 1 $0))))",
"adjust": lambda xs: proportion_unique_elements(xs) + proportion(xs, lambda i,o: len(i) > 5)
},
{
"concept": "(lambda (filter (lambda (> $0 (first $1))) $0))",
"adjust": lambda xs: proportion(xs, lambda i,o: len(i) > 8) - 2 * limit(xs, 1, lambda i,o: len(o) < 2) - limit(xs, 1, lambda i,o: len(o) > 5)
},
{
"concept": "(lambda (concat $0 (cons 0 $0)))",
"adjust": lambda xs: proportion_unique_elements(xs) + proportion(xs, lambda i,o: len(i) <= 5)
},
{
"concept": "(lambda (map (lambda (if (== (% $0 3) 0) 1 0)) $0))",
"adjust": lambda xs: proportion_unique_elements(xs) + proportion(xs, lambda i,o: 2 <= sum(o) <= 6)
},
{
"concept": "(lambda (range (min $0) 1 (max $0)))",
"adjust": lambda xs: proportion_unique_elements(xs) + proportion(xs, lambda i,o: len(o) < 8) + proportion(xs, lambda i,o: len(i) > 3) + proportion_set(xs, lambda i,o: min(i) if len(i) > 0 else 0) + proportion_set(xs, lambda i,o: max(i) if len(i) > 0 else 0) - limit(xs, 1, lambda i,o: len(o) <= 1)
},
{
"concept": "(lambda (range (first $0) 2 (last $0)))",
"adjust": lambda xs: proportion_unique_elements(xs) + proportion(xs, lambda i,o: 2 < len(o) < 8) + proportion(xs, lambda i,o: len(i) > 0 and i[0] % 2 == i[-1] % 2)
},
{
"concept": "(lambda (flatten (map (lambda (repeat $0 (/ $0 10))) $0)))",
"adjust": lambda xs: proportion_unique_elements(xs)
},
{
"concept": "(lambda (map (lambda (/ $0 10)) $0))",
"adjust": lambda xs: proportion_unique_elements(xs)
},
{
"concept": "(lambda (drop 1 (droplast 1 (sort (lambda $0) $0))))",
"adjust": lambda xs: proportion_unique_elements(xs) + proportion(xs, lambda i,o: len(o) > 6)
},
{
"concept": "(lambda (cons (length $0) (append (reverse $0) (length $0))))",
"adjust": lambda xs: proportion_unique_elements(xs)
},
{
"concept": "(lambda (cons (first $0) (cons 23 (cons 68 (cons 42 (cons 99 (cons 71 (singleton (last $0)))))))))",
"adjust": lambda xs: proportion_unique_elements(xs) + proportion(xs, lambda i,o: len(i) > 2)
},
{
"concept": "(lambda (concat (cons 17 (cons 38 (singleton 82))) (concat $0 (cons 1 (cons 55 (singleton 27))))))",
"adjust": lambda xs: proportion_unique_elements(xs) + proportion(xs, lambda i,o: 5 >= len(i))
},
{
"concept": "(lambda (map (lambda (count (== $0) $1)) $0))",
"adjust": lambda xs: proportion_set(xs, lambda i,o: tuple(sorted(o)))
},
{
"concept": "(lambda (reverse (sort (lambda $0) (unique $0))))",
"adjust": lambda xs: proportion_unique_elements(xs) + proportion(xs, lambda i,o: len(i) > 5) + proportion_set(xs, lambda i,o: len(o)) + proportion_set(xs, lambda i,o: len(i) - len(o))
},
{
"concept": "(lambda (flatten (zip (range 1 1 (length $0)) (sort (lambda $0) $0))))",
"adjust": lambda xs: proportion_unique_elements(xs)
},
{
"concept": "(lambda (sort (lambda $0) (map (lambda (/ $0 10)) $0)))",
"adjust": lambda xs: proportion_unique_elements(xs) + proportion(xs, lambda i,o: len(set(o))/max(1,len(o))) + proportion(xs, lambda i,o: len(i) >= 3)
},
{
"concept": "(lambda (concat (filter (lambda (< (first $1) $0)) $0) (filter (lambda (> (first $1) $0)) $0)))",
"adjust": lambda xs: proportion_unique_elements(xs) + proportion(xs, lambda i,o: -2 < sum(ie > i[0] for ie in i)-sum(ie < i[0] for ie in i) < 2)
},
{
"concept": "(lambda (find is_even $0))",
"adjust": lambda xs: proportion_unique_elements(xs) + proportion(xs, lambda i,o: 5 >= len(o) >= 2)
},
{
"concept": "(lambda (mapi (lambda (lambda (* (min $2) $1))) $0))",
"adjust": lambda xs: proportion_unique_elements(xs) + proportion_set(xs, lambda i,o: min(i) if len(i) > 0 else 0) - forbid(xs, lambda i,o: len(i) > 0 and min(i) == 0)
},
{
"concept": "(lambda (map first (filter (lambda (== (second $0) 0)) (zip (droplast 1 $0) (drop 1 $0)))))",
"adjust": lambda xs: proportion_unique_elements(xs) + 2 * proportion(xs, lambda i,o: 5 >= len(o) >= 2)
},
{
"concept": "(lambda (singleton (product (filter (lambda (== (% $0 4) 0)) $0))))",
"adjust": lambda xs: proportion_unique_elements(xs) + 2 * proportion(xs, lambda i,o: 5 >= sum(ie % 4 == 0 for ie in i) >= 2) - limit(xs, 1, lambda i,o: o[0] == 0) + proportion_set(xs, lambda i,o: o[0]) + proportion(xs, lambda i,o: len(i) >= 7)
},
{
"concept": "(lambda (filter (lambda (and (> (max (take 2 $1)) $0) (> $0 (min (take 2 $1))))) $0))",
"adjust": lambda xs: proportion_unique_elements(xs) + 2 * proportion(xs, lambda i,o: 6 >= len(o) >= 3) + proportion_set(xs, lambda i,o: min(i[:2]) if len(i) > 1 else 0) + proportion_set(xs, lambda i,o: max(i[:2]) if len(i) > 1 else 0)
},
{
"concept": "(lambda (map sum (zip $0 (reverse $0))))",
"adjust": lambda xs: proportion_unique_elements(xs) + proportion(xs, lambda i,o: 6 >= len(o) >= 3)
},
{
"concept": "(lambda (takelast (last $0) $0))",
"adjust": lambda xs: 4 * proportion(xs, lambda i,o: len(i) > 1 and i[-1] <= len(i)-1) + proportion_unique_elements(xs) + proportion_set(xs, lambda i,o: len(o)) + proportion_set(xs, lambda i,o: len(i)-len(o)) - limit(xs, 1, lambda i,o: len(i) > 0 and i[-1] == len(i) - 1) - limit(xs, 1, lambda i,o: len(i) > 0 and i[-1] == 0)
},
{
"concept": "(lambda (insert (+ (max $0) (min $0)) 3 (sort (lambda $0) $0)))",
"adjust": lambda xs: proportion_unique_elements(xs) + proportion(xs, lambda i,o: 7 >= len(o) >= 4)
},
{
"concept": "(lambda (insert (last $0) (first $0) (unique $0)))",
"adjust": lambda xs: 4 * proportion(xs, lambda i,o: len(i) > 1 and i[0] <= len(set(i))-1) + proportion_set(xs, lambda i,o: len(o)) + proportion_set(xs, lambda i,o: len(i)-len(o)) - limit(xs, 1, lambda i,o: len(i) > 0 and i[0] == len(set(i))) - limit(xs, 1, lambda i,o: len(i) > 0 and i[0] == 1) - forbid(xs, lambda i,o: len(i) > 0 and i[0] == 0),
},
{
"concept": "(lambda (splice (slice 4 5 $0) (- (length $0) 2) (reverse $0)))",
"adjust": lambda xs: proportion_unique_elements(xs) + proportion(xs, lambda i,o: i != o) + proportion(xs, lambda i,o: 8 >= len(i) > 5)
},
{
"concept": "(lambda (splice (cons 3 (cons 3 (singleton 3))) 3 $0))",
"adjust": lambda xs: proportion_unique_elements(xs) + proportion(xs, lambda i,o: 7 >= len(i) >= 3)
},
{
"concept": "(lambda (take 3 (sort (lambda $0) $0)))",
"adjust": lambda xs: proportion_unique_elements(xs) + proportion(xs, lambda i,o: len(i) >= 3)
},
{
"concept": "(lambda (cut_idx (first $0) (drop 1 $0)))",
"adjust": lambda xs: 4 * proportion(xs, lambda i,o: len(i) > 1 and i[0] <= len(i)-1) + proportion_set(xs, lambda i,o: len(i)-i[0] if len(i) > 0 else 0) + proportion_set(xs, lambda i,o: i[0] if len(i) > 0 else 0) - limit(xs, 1, lambda i,o: len(i) > 0 and i[0] == len(i)-1) - limit(xs, 1, lambda i,o: len(i) > 0 and i[0] == 1) - forbid(xs, lambda i,o: len(i) > 0 and i[0] == 0),
},
{
"concept": "(lambda (replace (first $0) (length $0) (drop 1 $0)))",
"adjust": lambda xs: 4 * proportion(xs, lambda i,o: len(i) > 1 and i[0] <= len(i)-1) + proportion_set(xs, lambda i,o: len(i)) + proportion_set(xs, lambda i,o: i[0] if len(i) > 0 else 0) - limit(xs, 1, lambda i,o: len(i) > 0 and i[0] == len(i)-1) - limit(xs, 1, lambda i,o: len(i) > 0 and i[0] == 1) - forbid(xs, lambda i,o: len(i) > 0 and i[0] == 0),
},
{
"concept": "(lambda (sort (lambda (/ $0 10)) $0))",
"adjust": lambda xs: proportion_unique_elements(xs) + proportion(xs, lambda i,o: 8 > len(i) > 3)
},
{
"concept": "(lambda (sort (lambda (% $0 10)) $0))",
"adjust": lambda xs: proportion_unique_elements(xs) + proportion(xs, lambda i,o: 8 > len(i) > 3)
},
{
"concept": "(lambda (filter (lambda (== $0 (first $1))) (drop 1 $0)))",
"adjust": lambda xs: proportion(xs, lambda i,o: len(i) >= 8) + proportion_set(xs, lambda i,o: len(o)) - limit(xs, 2, lambda i,o: len(o) < 2),
},
{
"concept": "(lambda (reverse (filteri (lambda (lambda (is_odd $1))) (reverse $0))))",
"adjust": lambda xs: proportion_unique_elements(xs) + proportion(xs, lambda i,o: len(i) >= 7),
},
{
"concept": "(lambda (map (lambda (* $0 (if (is_even (length $1)) 2 3))) $0))",
"adjust": lambda xs: proportion_unique_elements(xs) - forbid(xs, lambda i,o: len(i) == 0) - limit(xs, 1, lambda i,o: len(i) == 1) + 2 / center(xs, lambda i,o: len(i) % 2 == 0)
},
{
"concept": "(lambda (singleton (sum (filter is_even $0))))",
"adjust": lambda xs: proportion_unique_elements(xs) + proportion(xs, lambda i,o: len(i) >= 7) + proportion(xs, lambda i,o: 5 >= len(o) >= 2)
},
{
"concept": "(lambda (map (lambda (length $1)) $0))",
"adjust": lambda xs: proportion_unique_elements(xs)
},
{
"concept": "(lambda (map (lambda (+ (* (% $0 10) 10) (/ $0 10))) $0))",
"adjust": lambda xs: proportion_unique_elements(xs) + proportion(xs, lambda i,o: len(i) >= 3)
},
{
"concept": "(lambda (fold (lambda (lambda (cons $0 (reverse $1)))) empty $0))",
"adjust": lambda xs: proportion_unique_elements(xs) + proportion(xs, lambda i,o: len(i) >= 4)
},
{
"concept": "(lambda (drop 2 (droplast 2 $0)))",
"adjust": lambda xs: proportion_unique_elements(xs) + proportion(xs, lambda i,o: len(i) > 4)
},
{
"concept": "(lambda (drop (first $0) (droplast (last $0) $0)))",
"adjust": lambda xs: 4 * proportion(xs, lambda i,o: len(i) > 1 and (i[0] + i[-1]) <= len(i)) + proportion_set(xs, lambda i,o: i[0] if len(i) > 0 else 0) + proportion_set(xs, lambda i,o: i[-1] if len(i) > 0 else 0) + proportion_set(xs, lambda i,o: len(o)) - limit(xs, 2, lambda i,o: len(i) > 0 and i[0] == 0 or i[-1] == 0)
},
{
"concept": "(lambda (unique (flatten (zip $0 (reverse $0)))))",
"adjust": lambda xs: proportion_unique_elements(xs) + 2 * proportion_set(xs, lambda i,o: len(i)-len(o)) + 2 * proportion_set(xs, lambda i,o: len(o))
},
{
"concept": "(lambda (mapi (lambda (lambda (count (== $0) (take $1 $2)))) $0))",
"adjust": lambda xs: proportion(xs, lambda i,o: sum(oe in [2,3,4] for oe in o)/max(1,len(o)))
},
{
"concept": "(lambda (take (first $0) (reverse $0)))",
"adjust": lambda xs: 4 * proportion(xs, lambda i,o: len(i) > 1 and i[0] <= len(i)) + proportion_unique_elements(xs) + proportion_set(xs, lambda i,o: len(o)) + proportion_set(xs, lambda i,o: len(i)-len(o)) - limit(xs, 1, lambda i,o: len(i) > 0 and i[0] == len(i)) - limit(xs, 1, lambda i,o: len(i) > 0 and i[0] == 0)
},
{
"concept": "(lambda (range (min $0) 2 (max $0)))",
"adjust": lambda xs: proportion_unique_elements(xs) + proportion(xs, lambda i,o: 2 < len(o) < 8) + proportion_set(xs, lambda i,o: min(i) if len(i) > 0 else 0) + proportion_set(xs, lambda i,o: max(i) if len(i) > 0 else 0)
},
{
"concept": "(lambda (sort (lambda $0) (map length (group (lambda $0) $0))))",
"adjust": lambda xs: proportion_set(xs, lambda i,o: tuple(sorted(o))) + proportion(xs, lambda i,o: len(i) >= 3) + proportion_set(xs, lambda i,o: len(i)-len(o)) + proportion_set(xs, lambda i,o: len(o)) - limit(xs, 1, lambda i,o: sum(o) == len(o))
},
{
"concept": "(lambda (singleton (/ (sum $0) (length $0))))",
"adjust": lambda xs: proportion_unique_elements(xs) + proportion(xs, lambda i,o: 2 < len(i) < 6) + proportion_set(xs, lambda i,o: o[0])
},
{
"concept": "(lambda (map length (group (lambda $0) $0)))",
"adjust": lambda xs: proportion_set(xs, lambda i,o: tuple(sorted(o))) + proportion(xs, lambda i,o: len(i) >= 3) + proportion_set(xs, lambda i,o: len(i)-len(o)) + proportion_set(xs, lambda i,o: len(o)) - limit(xs, 1, lambda i,o: sum(o) == len(o))
},
{
"concept": "(lambda (flatten (map (lambda (drop 1 $0)) (group (lambda $0) $0))))",
"adjust": lambda xs: proportion_set(xs, lambda i,o: tuple(sorted(o))) + proportion(xs, lambda i,o: len(i) >= 3) + proportion_set(xs, lambda i,o: len(i)-len(o)) + proportion_set(xs, lambda i,o: len(o))
},
{
"concept": "(lambda (fold (lambda (lambda (concat $1 (drop 1 (range (last $1) (if (> $0 (last $1)) 1 -1) $0))))) (take 1 $0) (drop 1 $0)))",
"adjust": lambda xs: proportion_unique_elements(xs) - forbid(xs, lambda i,o: len(o) <= 1) - limit(xs, 1, lambda i,o: len(i) <= 2)
},
{
"concept": "(lambda (map (lambda (/ $0 2)) (filter is_even $0)))",
"adjust": lambda xs: proportion_unique_elements(xs) + proportion(xs, lambda i,o: len(i) >= 7)
},
{
"concept": "(lambda (fold (lambda (lambda (append $1 (+ (last $1) $0)))) (take 1 (unique $0)) (drop 1 (unique $0))))",
"adjust": lambda xs: proportion_unique_elements(xs) + proportion(xs, lambda i,o: len(i) > 5) + proportion(xs, lambda i,o: all(0 <= ie <= 20 for ie in i)) + proportion(xs, lambda i,o: 5 >= len(i)-len(set(i)) >= 2) - limit(xs, 1, lambda i,o: len(o) <= 1) + proportion_set(xs, lambda i,o: len(o))
},
{
"concept": "(lambda (filter (lambda (== 1 (count (== $0) $1))) $0))",
"adjust": lambda xs: proportion(xs, lambda i,o: len(i) > 5) + proportion(xs, lambda i,o: all(0 <= ie <= 20 for ie in i)) + proportion_set(xs, lambda i,o: len(i)-len(set(i))) + proportion(xs, lambda i,o: 2 <= len(o) <= 5)
},
{
"concept": "(lambda (singleton (- (length $0) (length (unique $0)))))",
"adjust": lambda xs: 2 * proportion_set(xs, lambda i,o: o[0]) + proportion_unique_elements(xs)
},
{
"concept": "(lambda (singleton (count (lambda ((== (length $1)) $0)) $0)))",
"adjust": lambda xs: 3 * proportion_set(xs, lambda i,o: min(10,o[0]+5)) + proportion_unique_elements(xs) + proportion(xs, lambda i,o: len(i) >= 7) - limit(xs, 2, lambda i,o: o[0] <= 1)
},
{
"concept": "(lambda (singleton (count is_even $0)))",
"adjust": lambda xs: 2 * proportion_set(xs, lambda i,o: o[0]) + proportion_unique_elements(xs) + proportion(xs, lambda i,o: len(i) >= 7)
},
{
"concept": "(lambda (fold (lambda (lambda (append (reverse $1) $0))) empty (reverse (unique (sort (lambda $0) $0)))))",
"adjust": lambda xs: proportion_unique_elements(xs) + proportion(xs, lambda i,o: len(i) > 5) + proportion_set(xs, lambda i,o: len(i)-len(o)) + proportion_set(xs, lambda i,o: len(o))
},
{
"concept": "(lambda (singleton (count is_odd $0)))",
"adjust": lambda xs: 2 * proportion_set(xs, lambda i,o: o[0]) + proportion_unique_elements(xs) + proportion(xs, lambda i,o: len(i) >= 7)
},
{
"concept": "(lambda (singleton (count (lambda (== 3 $0)) $0)))",
"adjust": lambda xs: 2 * proportion_set(xs, lambda i,o: o[0]) + proportion_unique_elements(xs) + proportion(xs, lambda i,o: len(i) >= 7)
},
{
"concept": "(lambda (singleton (count (lambda (== (first $1) $0)) (drop 1 $0))))",
"adjust": lambda xs: 2 * proportion_set(xs, lambda i,o: o[0]) + proportion_unique_elements(xs) + proportion(xs, lambda i,o: len(i) >= 7)
},
{
"concept": "(lambda (singleton (length (unique $0))))",
"adjust": lambda xs: 2 * proportion_set(xs, lambda i,o: o[0]) + proportion_unique_elements(xs) + proportion(xs, lambda i,o: len(i) >= 7)
},
{
"concept": "(lambda (first (reverse (fold (lambda (lambda (if (== $0 0) (cons empty $1) (cons (append (first $1) $0) (drop 1 $1))))) (singleton empty) $0))))",
"adjust": lambda xs: proportion(xs, lambda i,o: len(i) >= 9) + 4 * proportion(xs, lambda i,o: 4 > len(words(i)) > 2) - limit(xs, 2, lambda i,o: [] in words(i)) + proportion_set(xs, lambda i,o: tuple(o)) + 2 * proportion(xs, lambda i,o: 1 <= len(o) <= 4)
},
{
"concept": "(lambda (first (fold (lambda (lambda (if (== $0 0) (cons empty $1) (cons (append (first $1) $0) (drop 1 $1))))) (singleton empty) $0)))",
"adjust": lambda xs: proportion(xs, lambda i,o: len(i) >= 9) + 4 * proportion(xs, lambda i,o: 4 > len(words(i)) > 2) - limit(xs, 2, lambda i,o: [] in words(i)) + proportion_set(xs, lambda i,o: tuple(o)) + 2 * proportion(xs, lambda i,o: 1 <= len(o) <= 4)
},
{
"concept": "(lambda (map first (reverse (fold (lambda (lambda (if (== $0 0) (cons empty $1) (cons (append (first $1) $0) (drop 1 $1))))) (singleton empty) $0))))",
"adjust": lambda xs: proportion(xs, lambda i,o: len(i) >= 9) + 4 * proportion(xs, lambda i,o: 4 > len(words(i)) > 2) - limit(xs, 2, lambda i,o: [] in words(i)) + proportion_set(xs, lambda i,o: tuple(o)) + 2 * proportion(xs, lambda i,o: 3 <= len(o) <= 4)
},
{
"concept": "(lambda (flatten (map reverse (reverse (fold (lambda (lambda (if (== $0 0) (cons empty $1) (cons (append (first $1) $0) (drop 1 $1))))) (singleton empty) $0)))))",
"adjust": lambda xs: proportion(xs, lambda i,o: len(i) >= 9) + 4 * proportion(xs, lambda i,o: 4 > len(words(i)) > 2) - limit(xs, 2, lambda i,o: [] in words(i)) + proportion_set(xs, lambda i,o: tuple(o)) + proportion(xs, lambda i,o: [ie for ie in i if ie != 0] != o)
},
]
def words(xs, sep=0):
words = []
word = []
looped = False
for x in xs:
looped = True
if x == sep:
words.append(word)
word = []
looped = False
else:
word.append(x)
if looped:
words.append(word)
return words
def model_comparison_wave_3():
return [
{'concept': '(lambda (singleton (third $0)))',
'adjust': lambda xs: 3.0 if all(len(i) >= 3 for i, o in xs) else 0
},
{'concept': '(lambda (if (> 3 (length $0)) empty (singleton (third $0))))',
'adjust': lambda xs: 6/center(xs, lambda i,o: len(i) >= 3) + 2 * proportion_unique_elements(xs)
},
{'concept': '(lambda (singleton (nth 7 $0)))',
'adjust': lambda xs: 3.0 if all(len(i) >= 7 for i, o in xs) else 0
},
{'concept': '(lambda (if (> 7 (length $0)) empty (singleton (nth 7 $0))))',
'adjust': lambda xs: 3.0 if 0.6 >= sum(len(i) >= 7 for i, o in xs)/len(xs) >= 0.4 else 0,
},
{'concept': '(lambda (singleton (nth (first $0) (drop 1 $0))))',
'adjust': lambda xs: 2.0 * proportion(xs, lambda i,o: i[0] <= len(i)-1) + 2.0 * proportion_set(xs, lambda i,o: i[0]) + 2 * proportion_unique_elements(xs) + 2 * proportion_set(xs, lambda i,o: len(i)-i[0]) - 0.5 * limit(xs, 1, lambda i,o: i[0] == 1)
},
{'concept': '(lambda (take 2 $0))',
'adjust': lambda xs: 3.0 if all(len(i) >= 2 for i, o in xs) else 0
},
{'concept': '(lambda (take 2 $0))',
'adjust': lambda xs: 3.0 if 0.6 >= sum(len(i) >= 2 for i, o in xs)/len(xs) >= 0.4 else 0,
},
{'concept': '(lambda (take 6 $0))',
'adjust': lambda xs: 3.0 if all(len(i) >= 6 for i, o in xs) else 0
},
{'concept': '(lambda (take 6 $0))',
'adjust': lambda xs: 3.0 if 0.6 >= sum(len(i) >= 6 for i, o in xs)/len(xs) >= 0.4 else 0,
},
{'concept': '(lambda (take (first $0) (drop 1 $0)))',
'adjust': lambda xs: 3.0 if all(i[0] <= len(i)-1 for i, o in xs) else 0,
},
{'concept': '(lambda (slice 2 4 $0))',
'adjust': lambda xs: 3.0 if all(len(i) >= 4 for i, o in xs) else 0,
},
{'concept': '(lambda (slice 2 4 $0))',
'adjust': lambda xs: (sum(2 > len(i) for i,o in xs) >= 2) + (sum(4 > len(i) >= 2 for i,o in xs) >= 2) + (sum(len(i) >= 4 for i,o in xs) >= 4),
},
{'concept': '(lambda (slice 3 7 $0))',
'adjust': lambda xs: 3.0 if all(len(i) >= 7 for i, o in xs) else 0,
},
{'concept': '(lambda (slice 3 7 $0))',
'adjust': lambda xs: (sum(3 > len(i) for i,o in xs) >= 2) + (sum(7 > len(i) >= 3 for i,o in xs) >= 2) + (sum(len(i) >= 7 for i,o in xs) >= 4)
},
{'concept': '(lambda (slice (first $0) (second $0) (drop 2 $0)))',
'adjust': lambda xs: 4.0 * proportion(xs, lambda i,o: len(i)-2 >= i[1] >= i[0] > 0) + proportion_set(xs, lambda i,o: i[0]) + proportion_set(xs, lambda i,o: i[1]) + proportion_set(xs, lambda i,o: len(i)-i[1]) - 0.5 * limit(xs, 1, lambda i,o: len(i)-2 == i[1]) - 0.5 * limit(xs, 1, lambda i,o: i[1] == i[0])
},
{'concept': '(lambda (replace 2 8 $0))',
'adjust': lambda xs: 3.0 if all(len(i) >= 2 for i, o in xs) else 0,
},
{'concept': '(lambda (replace 2 8 $0))',
'adjust': lambda xs: 3.0 if 0.6 >= sum(2 > len(i) for i, o in xs)/len(xs) >= 0.4 else 0,
},
{'concept': '(lambda (replace 6 3 $0))',
'adjust': lambda xs: 3.0 if all(len(i) >= 6 for i, o in xs) else 0,
},
{'concept': '(lambda (replace 6 3 $0))',
'adjust': lambda xs: 3.0 if 0.6 >= sum(6 > len(i) for i, o in xs)/len(xs) >= 0.4 else 0,
},
{'concept': '(lambda (replace 1 (last $0) $0))',
'adjust': lambda xs: 3.0 if all(len(i) >= 1 for i, o in xs) else 0,
},
{'concept': '(lambda (insert 8 2 $0))',
'adjust': lambda xs: 3.0 if all(len(i) >= 2 for i, o in xs) else 0,
},
{'concept': '(lambda (insert 5 2 $0))',
'adjust': lambda xs: 3.0 if all(len(i) >= 2 for i, o in xs) else 0,
},
{'concept': '(lambda (insert (if (> 5 (length $0)) 8 5) 2 $0))',
'adjust': lambda xs: 3.0 if all(len(i) >= 2 for i, o in xs) else 0,
},
{'concept': '(lambda (insert (if (> 5 (first $0)) 8 5) 2 $0))',
'adjust': lambda xs: 3.0 if all(len(i) >= 2 for i, o in xs) else 0,
},
{'concept': '(lambda (cut_idx 2 $0))',
'adjust': lambda xs: 3.0 if all(len(i) >= 2 for i, o in xs) else 0,
},
{'concept': '(lambda (cut_idx 3 $0))',
'adjust': lambda xs: 3.0 if all(len(i) >= 3 for i, o in xs) else 0,
},
{'concept': '(lambda (cut_idx (if (== (first $0) (second $0)) 2 3) $0))',
'adjust': lambda xs: 4 * proportion(xs, lambda i,o: len(i) >= 3 and i[1] != i[2]) + 2 / center(xs, lambda i,o: len(i) >= 3 and i[0] == i[1]) + 2 * proportion_set(xs, lambda i,o: (i[0], i[1]) if len(i) > 1 else (0, 0)),
},
{'concept': '(lambda (cut_idx (if (> (first $0) (second $0)) 2 3) $0))',
'adjust': lambda xs: 4 * proportion(xs, lambda i,o: len(i) >= 3 and ((i[0] > i[1] and i[0] < i[2]) or (i[0] > i[2] and i[0] < i[1]))) + 2 / center(xs, lambda i,o: len(i) >= 3 and i[0] > i[1]) + 2 * proportion_set(xs, lambda i,o: (i[0], i[1]) if len(i) > 1 else (0, 0)),
},
{'concept': '(lambda (drop 2 $0))',
'adjust': lambda xs: 3.0 if all(len(i) >= 2 for i, o in xs) else 0,
},
{'concept': '(lambda (droplast 2 $0))',
'adjust': lambda xs: 4 * proportion(xs, lambda i,o: len(i) >= 2) + proportion_set(xs, lambda i,o: len(o)),
},
{'concept': '(lambda ((if (== (first $0) (second $0)) drop droplast) 2 $0))',
'adjust': lambda xs: 4 * proportion(xs, lambda i,o: len(i) >= 3 and ((i[0]==i[1] and i[-1] != i[-2]) or (i[0]!=i[1] and i[-1] == i[-2])))
},
{'concept': '(lambda ((if (> (first $0) (last $0)) drop droplast) 2 $0))',
'adjust': lambda xs: 4 * proportion(xs, lambda i,o: len(i) >= 3 and (i[0]!=i[-1])) + 2 / center(xs, lambda i,o: len(i) >= 3 and i[0] > i[-1]),
},
{'concept': '(lambda (swap 1 4 $0))',
'adjust': lambda xs: 3.0 if all(len(i) >= 4 for i, o in xs) else 0,
},
{'concept': '(lambda (swap 2 3 $0))',
'adjust': lambda xs: 3.0 if all(len(i) >= 3 for i, o in xs) else 0,
},
{'concept': '(lambda (if (== (second $0) (third $0)) (swap 1 4 $0) (swap 2 3 $0)))',
'adjust': lambda xs: 4 * proportion(xs, lambda i,o: len(i) >= 4 and ((i[1] == i[2] and i[0] != i[3]) or (i[1] != i[2] and i[0] == i[3]))) + 2 / center(xs, lambda i,o: i[1] == i[2])
},
{'concept': '(lambda (if (> (second $0) (third $0)) (swap 2 3 $0) (swap 1 4 $0)))',
'adjust': lambda xs: 4 * proportion(xs, lambda i,o: len(i) >= 4 and ((i[1] > i[2] and i[0] <= i[3]) or (i[1] <= i[2] and i[0] > i[3]))) + 2 / center(xs, lambda i,o: i[1] > i[2]),
},
{'concept': '(lambda (append $0 3))',
'adjust': lambda xs: 1.0,
},
{'concept': '(lambda (append $0 9))',
'adjust': lambda xs: 1.0,
},
{'concept': '(lambda (if (== (length $0) 3) (append $0 3) (if (== (length $0) 9) (append $0 9) $0)))',
'adjust': lambda xs: 4 / center(xs, lambda i,o: len(i) in [3, 9], factor = 8/11) + 1 / max(1, abs(sum(len(i) == 3 for i,o in xs) - sum(len(i) == 9 for i,o in xs))),
},
{'concept': '(lambda (if (is_in $0 3) (append $0 3) (if (is_in $0 9) (append $0 9) $0)))',
'adjust': lambda xs: 4 / center(xs, lambda i,o: (3 in i and 9 not in i), factor = 4/11) + 4 / center(xs, lambda i,o: (9 in i and 3 not in i), factor = 4/11) + 4 / center(xs, lambda i,o: (3 not in i and 9 not in i), factor = 3/11)
},
{'concept': '(lambda (singleton 9))',
'adjust': lambda xs: 1.0,
},
{'concept': '(lambda (cons 5 (singleton 2)))',
'adjust': lambda xs: 1.0,
},
{'concept': '(lambda (cons 8 (cons 2 (cons 7 (cons 0 (singleton 3))))))',
'adjust': lambda xs: 1.0,
},
{'concept': '(lambda (cons 1 (cons 9 (cons 4 (cons 3 (cons 2 (cons 5 (cons 8 (cons 0 (cons 4 (singleton 9)))))))))))',
'adjust': lambda xs: 1.0,
},
{'concept': '(lambda $0)',
'adjust': lambda xs: 1.0,
},
{'concept': '(lambda (cons 7 $0))',
'adjust': lambda xs: 1.0,
},
{'concept': '(lambda (cons 9 (cons 6 (cons 3 (cons 8 (cons 5 $0))))))',
'adjust': lambda xs: 1.0,
},
{'concept': '(lambda (take 1 $0))',
'adjust': lambda xs: 3.0 if all(len(i) > 0 for i,o in xs) else 0.0,
},
{'concept': '(lambda (drop 1 $0))',
'adjust': lambda xs: 3.0 if all(len(i) > 0 for i,o in xs) else 0.0,
},
{'concept': '(lambda (cons (first $0) $0))',
'adjust': lambda xs: 1.0,
},
{'concept': '(lambda (concat (repeat (first $0) 5) $0))',
'adjust': lambda xs: 1.0,
},
{'concept': '(lambda (repeat (first $0) 10))',
'adjust': lambda xs: 1.0,
},
{'concept': '(lambda (concat (repeat (first $0) 2) (drop 2 $0)))',
'adjust': lambda xs: 1.0,
},
{'concept': '(lambda (concat (repeat (third $0) 3) (drop 3 $0)))',
'adjust': lambda xs: 1.0,
},
{'concept': '(lambda (concat (slice 3 4 $0) (concat (take 2 $0) (drop 4 $0))))',
'adjust': lambda xs: 3.0 if all(len(i) >= 4 for i,o in xs) else 0.0,
},
{'concept': '(lambda (cut_idx 5 $0))',
'adjust': lambda xs: 3.0 if all(len(i) >= 5 for i,o in xs) else 0.0,
},
{'concept': '(lambda (insert 4 7 $0))',
'adjust': lambda xs: 3.0 if all(len(i) >= 7 for i,o in xs) else 0.0,
},
{'concept': '(lambda (drop 7 $0))',
'adjust': lambda xs: 3.0 if all(len(i) >= 7 for i,o in xs) else 0.0,
},
{'concept': '(lambda (swap 4 8 $0))',
'adjust': lambda xs: 3.0 if all(len(i) >= 8 for i,o in xs) else 0.0,
},
{'concept': '(lambda (swap 3 1 (replace 4 4 (cut_idx 6 (take 7 $0)))))',
'adjust': lambda xs: 4 * proportion(xs, lambda i,o: len(i) >= 7) + 2 * proportion_unique_elements(xs),
},
{'concept': '(lambda (singleton (last $0)))',
'adjust': lambda xs: 3.0 if all(len(i) >= 1 for i,o in xs) else 0.0,
},
{'concept': '(lambda (droplast 1 $0))',
'adjust': lambda xs: 3.0 if all(len(i) >= 1 for i,o in xs) else 0.0,
},
{'concept': '(lambda (drop (first $0) (drop 1 $0)))',
'adjust': lambda xs: 3.0 if all(i[0] <= len(i)-1 for i,o in xs) else 0.0,
},
{'concept': '(lambda (drop 1 (droplast 1 $0)))',
'adjust': lambda xs: 3.0 if all(len(i) >= 2 for i,o in xs) else 0.0,
},
{'concept': '(lambda (cons 9 (append $0 7)))',
'adjust': lambda xs: 1.0,
},
{'concept': '(lambda (append (drop 1 $0) (first $0)))',
'adjust': lambda xs: 3.0 if all(len(i) >= 1 for i,o in xs) else 0.0,
},
{'concept': '(lambda (cons (last $0) (append (drop 1 (droplast 1 $0)) (first $0))))',
'adjust': lambda xs: 3.0 if all(len(i) >= 2 for i,o in xs) else 0.0,
},
{'concept': '(lambda (concat $0 (cons 7 (cons 3 (cons 8 (cons 4 (singleton 3)))))))',
'adjust': lambda xs: 1.0,
},
{'concept': '(lambda (concat (cons 9 (cons 3 (cons 4 (singleton 0)))) (concat $0 (cons 7 (cons 2 (cons 9 (singleton 1)))))))',
'adjust': lambda xs: 1.0,
},
{'concept': '(lambda (concat $0 $0))',
'adjust': lambda xs: 1.0,
},
{'concept': '(lambda (map (lambda (+ 2 $0)) $0))',
'adjust': lambda xs: 1.0,
},
{'concept': '(lambda (flatten (map (lambda (cons $0 (singleton $0))) $0)))',
'adjust': lambda xs: 1.0,
},
{'concept': '(lambda (mapi + $0))',
'adjust': lambda xs: 1.0,
},
{'concept': '(lambda (filter (lambda (> $0 7)) $0))',
"adjust": lambda xs: min(1.0, 1.0/(len(xs)-2)*sum((len(o)/len(i) <= 0.75 if len(i) > 0 else 1) for i, o in xs)),
},
{'concept': '(lambda (filteri (lambda (lambda (is_odd $1))) $0))',
'adjust': lambda xs: 1.0,
},
{'concept': '(lambda (cons (max $0) (cons (last $0) (cons (length $0) (cons (first $0) (singleton (min $0)))))))',
"adjust": lambda xs: 2 * proportion_set(xs, lambda i,o: len(i)) + 2 * proportion_unique_elements(xs),
},
{'concept': '(lambda (singleton (length $0)))',
'adjust': lambda xs: 1.0,
},
{'concept': '(lambda (singleton (max $0)))',
"adjust": lambda xs: 4 * proportion_set(xs, lambda i,o: o[0]) + 2 * proportion_unique_elements(xs),
},
{'concept': '(lambda (singleton (sum $0)))',
'adjust': lambda xs: 1.0,
},
{'concept': '(lambda (reverse $0))',
'adjust': lambda xs: 1.0,
},
{'concept': '(lambda (singleton (third $0)))',
'adjust': lambda xs: 3.0 if all(len(i) >= 3 for i, o in xs) else 0
},
{'concept': '(lambda (if (> 3 (length $0)) empty (singleton (third $0))))',
'adjust': lambda xs: 3.0 if 0.6 >= sum(len(i) >= 3 for i, o in xs)/len(xs) >= 0.4 else 0,
},
{'concept': '(lambda (singleton (nth 7 $0)))',
'adjust': lambda xs: 3.0 if all(len(i) >= 7 for i, o in xs) else 0
},
{'concept': '(lambda (if (> 7 (length $0)) empty (singleton (nth 7 $0))))',
'adjust': lambda xs: 3.0 if 0.6 >= sum(len(i) >= 7 for i, o in xs)/len(xs) >= 0.4 else 0,
},
{'concept': '(lambda (singleton (nth (first $0) (drop 1 $0))))',
'adjust': lambda xs: 3.0 if all(i[0] <= len(i)-1 for i, o in xs) else 0,
},
{'concept': '(lambda (swap 1 4 $0))',
'adjust': lambda xs: 3.0 if all(len(i) >= 4 for i, o in xs) else 0,
},
{'concept': '(lambda (swap 2 3 $0))',
'adjust': lambda xs: 3.0 if all(len(i) >= 3 for i, o in xs) else 0,
},
{'concept': '(lambda (if (== (second $0) (third $0)) (swap 1 4 $0) (swap 2 3 $0)))',
'adjust': lambda xs: 4 * proportion(xs, lambda i,o: len(i) >= 4 and ((i[1] == i[2] and i[0] != i[3]) or (i[1] != i[2] and i[0] == i[3]))) + 2 / center(xs, lambda i,o: i[1] == i[2])
},
{'concept': '(lambda (if (> (second $0) (third $0)) (swap 2 3 $0) (swap 1 4 $0)))',
'adjust': lambda xs: 4 * proportion(xs, lambda i,o: len(i) >= 4 and ((i[1] > i[2] and i[0] <= i[3]) or (i[1] <= i[2] and i[0] > i[3]))) + 2 / center(xs, lambda i,o: i[1] > i[2]),
},
{'concept': '(lambda (cons 18 (cons 42 (cons 77 (cons 20 (singleton 36))))))',
'adjust': lambda xs: 1.0,
},
{'concept': '(lambda (cons 81 (cons 99 (cons 41 (cons 23 (cons 22 (cons 75 (cons 68 (cons 30 (cons 24 (singleton 69)))))))))))',
'adjust': lambda xs: 1.0,
},
{'concept': '(lambda (cons 92 (cons 63 (cons 34 (cons 18 (cons 55 $0))))))',
'adjust': lambda xs: 1.0,
},
{'concept': '(lambda (repeat (first $0) 10))',
'adjust': lambda xs: 1.0,
},
{'concept': '(lambda (concat (slice 3 4 $0) (concat (take 2 $0) (drop 4 $0))))',
'adjust': lambda xs: 3.0 if all(len(i) >= 4 for i,o in xs) else 0
},
{'concept': '(lambda (drop 1 (droplast 1 $0)))',
'adjust': lambda xs: 3.0 if all(len(i) >= 2 for i,o in xs) else 0
},
{'concept': '(lambda (cons 98 (append $0 37)))',
'adjust': lambda xs: 1.0,
},
{'concept': '(lambda (concat (cons 11 (cons 21 (cons 43 (singleton 19)))) (concat $0 (cons 7 (cons 89 (cons 0 (singleton 57)))))))',
'adjust': lambda xs: 1.0,
},
{'concept': '(lambda (mapi + $0))',
'adjust': lambda xs: 1.0,
},
{'concept': '(lambda (filter (lambda (> $0 49)) $0))',
"adjust": lambda xs: min(1.0, 1.0/(len(xs)-2)*sum((len(o)/len(i) <= 0.75 if len(i) > 0 else 1) for i, o in xs)),
},
{'concept': '(lambda (reverse $0))',
'adjust': lambda xs: 1.0,
},
]
def sample_examples_greedy(p,adjust,n=10,n_restarts=10000,n_tries=100,small=False):
best_score = 0.0
best_s = None
for i_restart in range(n_restarts):
s, score = greedy_set(p,adjust,n,n_tries,small)
if score > best_score:
best_s = s
best_score = score
print(f"{i_restart}. {best_score}")
for i,o in s:
print(f" {i} = {o}")
return best_s
def greedy_set(p,adjust,n,n_tries,small):
s = initialize_set(p,n,small)
score = score_set(s, adjust)
for i_try in range(n_tries):
i = sample_input(small)
if i not in list(zip(*s))[0]:
try:
o = p.runWithArguments([i])
except:
continue
if valid_output(o, small):
options = []
for idx in range(n):
new_s = s[:]
new_s[idx] = (i,o)
new_score = score_set(new_s, adjust)
options.append((new_score, new_s))
new_score, new_s = max(options, key = lambda x: x[0])
if new_score > score:
s = new_s
score = new_score
return s, score
def initialize_set(p,n,small):
s = []
while len(s) < n:
i = sample_input(small)
try:
o = p.runWithArguments([i])
except:
continue
if valid_output(o, small) and (len(s) == 0 or i not in list(zip(*s))[0]):
s.append((i,o))
return s
def sample_examples_parallel(p,adjust,n=10,n_pools=1000,n_tries=20,n_sets=1000,small=False):
def helper2(pool):
s = make_example_set(pool, n)
score = score_set(s, adjust)
return score, s
def helper1():
best_score = 0.0
best_s = None
pool = build_pool(p, n_tries, False, small)
return max((helper2(pool) for _ in range(n_sets)), key=lambda x: x[0])
bests = Parallel(n_jobs=-1)(delayed(helper1)() for _ in range(n_pools))
return max(bests, key=lambda x: x[0])[1]
def sample_examples(p,adjust,n=10,n_pools=1000,n_tries=20,n_sets=1000,verbose=True,small=False):
best_score = 0.0
best_s = None
for i_pool in range(n_pools):
if verbose:
print(f"{i_pool}. ", end="")
pool = build_pool(p, n_tries, verbose, small)
for scanned in range(n_sets):
s = make_example_set(pool, n)
score = score_set(s, adjust)
if score > best_score:
if verbose:
print(f" {scanned}: {score}")
best_score = score
best_s = s
return best_s
def build_pool(p, n_tries, verbose, small):
if verbose:
print("building pool", end="", flush=True)
try:
pool = [[[([], p.runWithArguments([[]]))]]]
except(IndexError, ValueError):
pool = [[]]
for length in range(1,11):
if verbose:
print(".", end="", flush=True)
subpool = []
for repetitions in range(length):
subsubpool = []
os = []
tries = 0
while len(subsubpool) < n_tries and tries < 100:
tries += 1
i = sample_input(small, length, repetitions)
try:
o = p.runWithArguments([i])
if valid_output(o, small) and (i, o) not in subsubpool and os.count(o) < n_tries/10:
tries = 0
os.append(o)
subsubpool.append((i,o))
except(IndexError, ValueError):
continue
subpool.append(subsubpool)
pool.append(subpool)
if verbose:
print("done")
return pool
def make_example_set(pool, n):
def helper():
examples = []
ls = set()
outputs = []
while len(examples) < n:
if len(ls) == len(pool):
return
length = random.randint(0, len(pool)-1)
ls.add(length)
if len(pool[length]) == 0:
continue
subpool = random.choice(pool[length])
for i, o in subpool:
if (i,o) not in examples and o not in outputs:
examples.append((i,o))
outputs.append(o)
ls = set()
break
if len(ls) == 0:
continue
grouped_outputs = dict((lambda xs: (tuple(xs[0]), len(list(xs[1]))))(xs)
for xs in itertools.groupby(sorted(outputs)))
least_common = [list(k) for k,v in grouped_outputs.items()
if v == min(grouped_outputs.values())]
for i, o in subpool:
if o in least_common and (i, o) not in examples:
examples.append((i,o))
outputs.append(o)
ls = set()
break
return examples
examples = None
while not examples:
examples = helper()
return examples
def valid_output(xs, small):
return len(xs) == 0 or (len(xs) <= 15 and max(xs) < (10 if small else 100) and min(xs) >= 0)
def score_set(s, adjust):
(inputs, outputs) = zip(*s)
n = len(s)
# Measure the distribution of output lengths
out_ws = [sum(len(o) == l for o in outputs) for l in range(11)]
foil = [len(s)//11 + (1 if x < len(s) % 11 else 0) for x in range(11)]
out_len = simple_entropy(out_ws)/simple_entropy(foil)
# Inputs are unique by construction.
# Measure the proportion of unique outputs
unique = len(list(itertools.groupby(outputs)))/n
# Measure the proportion of non-trivial i/o pairs
nontrivial = sum(i != o for i,o in s)/n
# Measure the distribution of list elements.
all_items = _flatten(_flatten(s))
ws = [sum(i == j for i in all_items) for j in range(100)]
foil = [len(all_items)//100 + (1 if x < len(all_items) % 100 else 0) for x in range(100)]
span = simple_entropy(ws)/simple_entropy(foil)
# Measure the distribution over input lengths & repetitions
# lrs = [(len(i), len(i)-len(set(i))) for i in inputs]
# lr_ws = [len(list(x)) for x in itertools.groupby(sorted(lrs))]
# foil = [len(lrs)//46 + (1 if x < len(lrs) % 46 else 0) for x in range(46)]
# combos = simple_entropy(lr_ws)/simple_entropy(foil)
# Measure the distribution over input lengths
in_ws = [sum(len(i) == l for i in inputs) for l in range(11)]
foil = [len(s)//11 + (1 if x < len(s) % 11 else 0) for x in range(11)]
in_len = simple_entropy(in_ws)/simple_entropy(foil)
# Adjust the score if necessary.
adjustment = 0 if adjust is None else adjust(s)
# print(f"{out_len/5} {unique/5} {nontrivial/5} {span/5} {in_len/5} {adjustment}")
return (out_len + unique + nontrivial + span + in_len)/5 + adjustment
def order_examples(xs, n_orders, n_tries):
orders = []
for _ in range(max(n_orders, n_tries)):
candidate = random.sample(xs, len(xs))
orders.append((score_order(candidate), candidate))
ranked = sorted(orders, key= lambda x: x[0])
best = []
while len(best) < n_orders:
try:
s, candidate = ranked.pop()
except IndexError:
break
firsts = [order[0] for order in best]
start = [{tuple(i) for i,o in order[:5]} for order in best]
cand_set = {tuple(i) for i,o in candidate[:5]}
if (candidate not in best and
candidate[0] not in firsts and
(len(start) == 0 or
max(len(cand_set.intersection(s)) for s in start) <= 2)):
best.append(candidate)
return best
def score_order(xs):
first_short = 1 - (abs(5 - len(xs[0][0])) / 6)
first_informative = 1 if xs[0][0] != xs[0][1] else 0
good_start = score_set(xs[:5], adjust=lambda xs: 0.0 )/5
good_finish = score_set(xs[5:], adjust=lambda xs: 0.0 )/5
return 2 * first_short + first_informative + 2 * good_start + good_finish
def flip(p=0.5):
return random.random() < p
def sample_element(small):
if small or flip(0.5):
return random.randint(0, 9)
return random.randint(0, 99)
def sample_input(small, l=None, r=None):
length = random.randint(0, 10) if l is None else l
repetitions = (random.randint(0, length-1) if r is None else r) if length > 1 else 0
xs = set()
while len(xs) < length-repetitions:
xs.add(sample_element(small))
xs = list(xs)
xs.extend([random.choice(xs) for _ in range(repetitions)])
random.shuffle(xs)
return xs
def simple_entropy(ws):
z = sum(ws)
return -sum(w/z*math.log2(w/z) for w in ws if w > 0)
def list_primitives():
print("Primitives:")
for primitive in Primitive.GLOBALS:
print(f"- {primitive}")
def sample_programs(g, type_of_sample, n=10):
return [grammar.sample(type_of_sample, maximumDepth=10) for _ in range(n)]
def test_p_with_i(e, i):
#print(f"e = {e}")
p = Program.parse(e)
#print(f"p = {p}")
#print(f"i = {i}")
o = p.runWithArguments([i])
#print(f"o = {o}")
return o
def process(dirname, i, c, n_trials=10, n_orders=2, verbose=True, small=False, human=False, kind="greedy"):
Primitive.GLOBALS.clear()
grammar = Grammar.uniform(primitives())
tp = arrow(tlist(tint), tlist(tint))
p = Program.parse(c['concept'])
if verbose:
print(f"{i}. [`{p}`](./json/c{i:03}.json)", flush=True)
if not p.canHaveType(tp):
if verbose:
print(f" incorrect type {p.infer()}", flush=True)
return
if human:
examples = [(inp, p.runWithArguments([inp])) for inp in c['inputs']]
elif kind == "parallel":
examples = sample_examples_parallel(p, c["adjust"], n=n_trials, n_pools=1000, n_tries=20, n_sets=1000, small=small)
elif kind == "greedy":
examples = sample_examples_greedy(p, c["adjust"], n=n_trials, n_restarts=1000, n_tries=1000, small=small)
else:
examples = sample_examples(p, c["adjust"], n=n_trials, n_pools=1000, n_tries=20, n_sets=1000, verbose=verbose, small=small)
for i_order, order in enumerate(order_examples(examples, n_orders, 5000), 1):
data = {
'concept': c['concept'],
'examples': [{"i": i, "o": o} for i,o in order]
}
with open(f"{dirname}/c{i:03}_{i_order}.json", "w") as fd:
fd.write(json.dumps(data))
def process_2(programs, n_trials=1000, small=False):
Primitive.GLOBALS.clear()
grammar = Grammar.uniform(primitives())
inputs = []
while len(inputs) < n_trials:
i = sample_input(small)
if i not in inputs:
inputs.append(i)
pairss = {}
for program in programs:
p = Program.parse(program)
s = ""
for i in inputs:
try:
s += f" {str((i, p.runWithArguments([i])))} "
except:
s += f" ({i}, ERR) "
if s not in pairss:
pairss[s] = [p]
else:
pairss[s].append(p)
return pairss
def count_applications(program):
return sum(subprogram[1].isApplication for subprogram in program.walk())
def ilogit(x):
return 1/(1+math.exp(-x))
def predict(program, visible, semi):
Primitive.GLOBALS.clear()
grammar = Grammar.uniform(primitives())
request = arrow(tlist(tint), tlist(tint))
p = Program.parse(program)
apps = count_applications(p)
length = p.size()
depth = p.depth()
# ps = [-0.36083580, -1.40545069, -0.83059482, -0.09067722, -2.72309754, -1.85036866, 0.48188347, 0.02257724, -0.13186151, 0.06397171, -0.29154169, -0.16128822, -0.68534256, -0.68331928, 0.37239565, -0.33644133, -2.19778909, -0.54080431, 0.00494636, 0.27146955, -0.22453515, -0.10963924, -3.03394161, -0.01081037, -0.75062149]
# mean_accuracy = 0.0
# for p in ps:
# mean_accuracy += sum(ilogit(-0.60883 + p + -0.22964 * length + 0.11709 * depth + 0.39263 * visible + 0.25289 * semi + 0.25874 * block_trial) for block_trial in range(1,12))
# mean_accuracy /= (11*len(ps))
print(f"{program},{length},{depth},1,{apps},{visible},{semi}")
def list_priors(filename, programs, small=False):
Primitive.GLOBALS.clear()
grammar = Grammar.uniform(primitives())
request = arrow(tlist(tint), tlist(tint))
with open(filename, 'w') as fd:
fd.write("id,program,prior,length,depth,lambda,apps,visible,semi,hidden\n")
for i, program in enumerate(programs):
p = Program.parse(program["concept"])
try:
prior = grammar.logLikelihood(request, p)
except AssertionError:
prior = "NA"
apps = count_applications(p)
fd.write(f"c{(i+1):03},{program['concept']},{prior},{p.size()},{p.depth()},1,{apps},,\n")
def make_grammar():
Primitive.GLOBALS.clear()
return Grammar.uniform(primitives())
def robustfill_fill(data):
programs = []
cpus = []
counts = []
for x in data.iterrows():
if x[1]['program'] == np.nan:
programs.append("(lambda $0)")
cpus.append(0)
counts.append(0)
else:
programs.append(x[1]['program'])
cpus.append(x[1]['cpu'])
counts.append(x[1]['count'])
data['program'] = programs
data['cpu'] = cpus
data['count'] = counts
return data
def compute_output_and_accuracy(data):
Primitive.GLOBALS.clear()
grammar = Grammar.uniform(model_comparison_primitives_99())
accuracys = []
responses = []
for x in data.iterrows():
try:
response = test_p_with_i(x[1]['program'],literal_eval(x[1]['input']))
except IndexError:
response = None
except RecursionDepthExceeded:
response = None
except AttributeError:
reponse = None
responses.append(response)
accuracys.append(response == literal_eval(x[1]['output']))
data['response'] = responses
data['accuracy'] = accuracys
return data
def wave_3_1_ids():
return ["c002", "c005", "c015", "c027", "c028", "c030", "c031", "c032", "c035", "c036", "c039", "c040", "c060", "c076", "c078", "c088", "c089"]
def adjust_id(id, purpose):
if purpose == "model":
return id
else:
i = int(id[1:])+100
return f"c{i:03}"
def pretty_print(e):
Primitive.GLOBALS.clear()
grammar = Grammar.uniform(primitives())
return(prettyProgram(Program.parse(e), Lisp=True))
def print_table():
filename = "~/sync/josh/library/phd/thesis/analyses/table.csv"
stimuli = "~/sync/josh/library/phd/thesis/analyses/stimuli.csv"
df1 = pd.read_csv(filename)
df2 = pd.read_csv(stimuli)
r = re.compile('𝜆')
with open("/Users/rule/table.txt", "w") as fd:
for x in df1.iterrows():
fd.write(f"{x[1]['mu']} & {adjust_id(x[1]['id'], x[1]['purpose'])} & {x[1]['length']} & \\emph{{{x[1]['gloss']}}}\\\\\n")
pretty = r.sub('@$\lambda$@', pretty_print(x[1]['program']))
fd.write(f" & & & \\mnln{{{pretty}}}\\\\[0.5em]\n")
def process_enumeration_data():
stimuli_filename = "~/sync/josh/library/phd/thesis/analyses/stimuli.csv"
mc_dir = "~/sync/josh/library/research/list-routines/project/list-routine-model-comparison/waves"
h_dir = "~/sync/josh/library/research/list-routines/project/list-routine-human-experiments/waves/1/data/enumeration"
enumeration3_filename = mc_dir + "/3/data/enumeration/bests.csv"
enumeration31_filename = mc_dir + "/3_1/data/enumeration/w3.1"
enumeration31_filename = mc_dir + "/3_1/data/enumeration/w3.1"
h_filename = h_dir + "/data.csv"
stimuli = pd.read_csv(stimuli_filename).rename(columns={'program': 'concept'})
enumeration_3 = (pd.read_csv(enumeration3_filename, header=None, names=['id','run','order','trial','program','cpu','count'])
.assign(trial=lambda x: x.trial+1,
purpose="model"))
enumeration_3 = enumeration_3[enumeration_3.id.isin(wave_3_1_ids()) == False]
enumeration_31 = (pd.read_csv(enumeration31_filename, header=None, names=['id','run','order','trial','program','cpu','count'])
.assign(trial=lambda x: x.trial+1,
purpose="model"))
enumeration_h = (pd.read_csv(h_filename, header=None, names=['id','run','order','trial','program','cpu','count'])
.assign(trial=lambda x: x.trial+1,
purpose="dataset"))
enumeration = pd.concat([enumeration_3, enumeration_31, enumeration_h])
(pd.merge(stimuli, enumeration, how='left', on=['purpose','id','order','trial'])
.reindex(columns = ["purpose", "id", "order", "run", "trial", "concept", "input", "output", "program", "cpu", "count"])
.pipe(compute_output_and_accuracy)
.to_csv("enumeration_data.csv", header=True, index=False))
def process_fleet_data():
Primitive.GLOBALS.clear()
grammar = Grammar.uniform(model_comparison_primitives_99())
stimuli_filename = "~/sync/josh/library/phd/thesis/analyses/stimuli.csv"
fleet_dirname1 = "/Users/rule/sync/josh/library/research/list-routines/project/list-routine-model-comparison/waves/3_1/data/fleet/out"
fleet_dirname2 = "/Users/rule/sync/josh/library/research/list-routines/project/list-routine-human-experiments/waves/1/data/fleet/out"
stimuli = pd.read_csv(stimuli_filename).rename(columns={'program': 'concept'})
fleets = []
for filename in glob.iglob(fleet_dirname1 + "/*predictions.txt"):
tmp = (pd.read_csv(filename, delimiter="\t", header=None, names=['file','run','trial','cpu','count','program'])
.assign(trial=lambda x: x.trial + 1,
run=lambda x: x.run + 1,
program=lambda x: "(lambda (fix $0 (lambda " + x.program.str.replace("fix", "$1") + ")))",
id=lambda x: x.file.str.extract(r'\/(c\d{3})_'),
purpose="model",
order=lambda x: pd.to_numeric(x.file.str.extract(r'\/c\d{3}_(\d)', expand = False)))
.drop(['file'], axis=1))
fleets.append(tmp)
for filename in glob.iglob(fleet_dirname2 + "/*predictions.txt"):
tmp = (pd.read_csv(filename, delimiter="\t", header=None, names=['file','run','trial','cpu','count','program'])
.assign(trial=lambda x: x.trial + 1,
run=lambda x: x.run + 1,
program=lambda x: "(lambda (fix $0 (lambda " + x.program.str.replace("fix", "$1") + ")))",
id=lambda x: x.file.str.extract(r'\/(c\d{3})_'),
purpose="dataset",
order=lambda x: pd.to_numeric(x.file.str.extract(r'\/c\d{3}_(\d)', expand = False)))
.drop(['file'], axis=1))
fleets.append(tmp)
fleet = pd.concat(fleets)
(pd.merge(stimuli, fleet, how='left', on=['purpose','id','order','trial'])
.pipe(compute_output_and_accuracy)
.to_csv("fleet_data.csv", header=True, index=False))
def process_metagol_data():
Primitive.GLOBALS.clear()
grammar = Grammar.uniform(model_comparison_primitives_99())
results3_filename = "/Users/rule/sync/josh/library/research/list-routines/project/list-routine-model-comparison/waves/3_1/data/metagol/cropper-results/results3.csv"
preds3_filename = "/Users/rule/sync/josh/library/research/list-routines/project/list-routine-model-comparison/waves/3/data/metagol/predictions.csv"
results31_filename = "/Users/rule/sync/josh/library/research/list-routines/project/list-routine-model-comparison/waves/3_1/data/metagol/cropper-results/results3-1.csv"
results312_filename = "/Users/rule/sync/josh/library/research/list-routines/project/list-routine-model-comparison/waves/3_1/data/metagol/cropper-results/results3-1-c005.csv"
preds31_filename = "/Users/rule/sync/josh/library/research/list-routines/project/list-routine-model-comparison/waves/3_1/data/metagol/predictions.csv"
stimuli_filename = "~/sync/josh/library/phd/thesis/analyses/stimuli.csv"
metagol_3 = (pd.read_csv(results3_filename,
delimiter=", ",
header=None,
engine='python',
names=['task','run','order','trial','input','output','response','accuracy'])
.assign(id=lambda x: x.task.apply(str).str.replace('(\d+)', lambda m: f"c{int(m.group(0)):03}"),
accuracy=lambda x: 1 * x.accuracy))
metagol_3 = metagol_3[metagol_3.id.isin(wave_3_1_ids()) == False]
metagol_31 = (pd.read_csv(results31_filename,
delimiter=", ",
header=None,
engine='python',
names=['task','run','order','trial','input','output','response','accuracy'])
.assign(id=lambda x: x.task.apply(str).str.replace('(\d+)', lambda m: f"c{int(m.group(0)):03}"),
run=lambda x: x.run - 1,
accuracy=lambda x: 1 * x.accuracy))
metagol_312 = (pd.read_csv(results312_filename,
delimiter=", ",
header=None,
engine='python',
names=['task','run','order','trial','input','output','response','accuracy'])
.assign(id=lambda x: x.task.apply(str).str.replace('(\d+)', lambda m: f"c{int(m.group(0)):03}"),
run=lambda x: x.run - 1,
accuracy=lambda x: 1 * x.accuracy))
metagol = pd.concat([metagol_3, metagol_31, metagol_312])
counts_3 = (pd.read_csv(preds3_filename,
header=None,
names=['task','run','order','trial','program','cpu','count'])
.assign(id=lambda x: x.task.apply(str).str.replace('(\d+)', lambda m: f"c{int(m.group(0)):03}"))
.drop(['program','count'], axis=1))
counts_3 = counts_3[counts_3.id.isin(wave_3_1_ids()) == False]
counts_31 = (pd.read_csv(preds31_filename,
header=None,
names=['task','run','order','trial','program','cpu','count'])
.assign(id=lambda x: x.task.apply(str).str.replace('(\d+)', lambda m: f"c{int(m.group(0)):03}"))
.drop(['program','count'], axis=1))
counts = pd.concat([counts_3, counts_31])
stimuli = (pd.read_csv(stimuli_filename)
.query('purpose == "model"')
.drop(['purpose'], axis=1)
.rename(columns={'program': 'concept'}))
metagol = (pd.merge(metagol, counts, how='left', on=['id','task','trial','order','run'])
.assign(input = lambda x: x.input.str.replace(',', ', '),
response = lambda x: x.response.str.replace(',', ', '),
output = lambda x: x.output.str.replace(',', ', ')))
(pd.merge(stimuli, metagol, how='left', on=['id','order','trial','input','output'])
.drop(['task'], axis=1)
.to_csv("metagol_data.csv", header=True, index=False))
def process_robustfill_data():
Primitive.GLOBALS.clear()
grammar = Grammar.uniform(model_comparison_primitives_99())
stimuli_filename = "~/sync/josh/library/phd/thesis/analyses/stimuli.csv"
stimuli = (pd.read_csv(stimuli_filename)
.rename(columns={'program': 'concept'})
.query('purpose == "model"')
.drop(['purpose'], axis=1))
rf_dirname = "~/sync/josh/library/research/list-routines/project/list-routine-model-comparison/waves/3_1/data/robustfill"
rf3_filename = rf_dirname + "/predictions_wave_3_complete.csv"
rf_3 = pd.read_csv(rf3_filename)
rf_3 = rf_3[rf_3.id.isin(wave_3_1_ids()) == False]
rf31_filename = rf_dirname + "/predictions_wave_31_complete.csv"
rf_31 = pd.read_csv(rf31_filename).query('id != "c076"')
rf76_filename = rf_dirname + "/predictions_wave_31_76_complete.csv"
rf_76 = pd.read_csv(rf76_filename)
rf = pd.concat([rf_3, rf_31, rf_76])
(pd.merge(stimuli, rf, how='left', on=['id','order','trial'])
.sort_values(['id','order','run','trial'], axis=0)
.pipe(compute_output_and_accuracy)
.to_csv("robustfill_data.csv", header=True, index=False))
def process_hl_data():
Primitive.GLOBALS.clear()
grammar = Grammar.uniform(model_comparison_primitives_99())
stimuli_filename = "~/sync/josh/library/phd/thesis/analyses/stimuli.csv"
hl_dirname1 = "/Users/rule/sync/josh/library/research/list-routines/project/list-routine-model-comparison/waves/3_1/data/trs/out/results"
hl_dirname2 = "/Users/rule/sync/josh/library/research/list-routines/project/list-routine-human-experiments/waves/1/data/hl/out/results"
stimuli = (pd.read_csv(stimuli_filename)
.rename(columns={'program': 'concept'}))
hls = []
for filename in glob.iglob(hl_dirname1 + "/*predictions.csv"):
tmp = (pd.read_csv(filename, delimiter=",", header=0)
.assign(run=lambda x: x.run + 1,
purpose= lambda x: 'model',
id=lambda x: x.problem)
.drop(['problem'], axis=1))
hls.append(tmp)
for filename in glob.iglob(hl_dirname2 + "/*predictions.csv"):
tmp = (pd.read_csv(filename, delimiter=",", header=0)
.assign(run=lambda x: x.run + 1,
purpose= lambda x: 'dataset',
id=lambda x: x.problem)
.drop(['problem'], axis=1))
hls.append(tmp)
hl = pd.concat(hls)
(pd.merge(stimuli, hl, how='left', on=['id','purpose','order','trial'])
.to_csv("hl_data.csv", header=True, index=False))
def process_hl_data_2():
Primitive.GLOBALS.clear()
grammar = Grammar.uniform(model_comparison_primitives_99())
stimuli_filename = "~/sync/josh/library/phd/thesis/analyses/stimuli.csv"
hl_dirname1 = "/Users/rule/sync/josh/library/research/list-routines/project/list-routine-model-comparison/waves/3_1/data/trs/out/results2"
stimuli = (pd.read_csv(stimuli_filename)
.rename(columns={'program': 'concept'}))
hls = []
for filename in glob.iglob(hl_dirname1 + "/*predictions.csv"):
tmp = (pd.read_csv(filename, delimiter=",", header=0)
.assign(run=lambda x: x.run + 1,
purpose= lambda x: 'model',
id=lambda x: x.problem)
.drop(['problem'], axis=1))
hls.append(tmp)
hl = pd.concat(hls)
(pd.merge(hl, stimuli, how='left', on=['id','purpose','order','trial'])
.to_csv("hl_data_2.csv", header=True, index=False))
if __name__ == "__main__":
## Human Experiment - Wave Pilot
#
# for i, c in enumerate(wave_pilot(), 1):
# process("../waves/pilot/json/human", i, c, n_trials=11, n_orders=2, verbose=True, small=False, human=True)
# process("../waves/pilot/json/machine", i, c, n_trials=11, n_orders=2, verbose=True, small=False, human=False, parallel=True)
## Human Experiment - Wave 1
for i, c in enumerate(human_experiments_wave_1(), 1):
if i in [] + list(range(151,151)):
process("./tmp_dataset",
i,
c,
n_trials=11,
n_orders=5,
verbose=True,
small=False,
human=False,
kind="greedy",
)
# list_priors("dataset_priors.csv", human_experiments_wave_1())
## Model Comparison - Wave 3
# for i, c in enumerate(model_comparison_wave_3(), 1):
# if i in [2,5,15,27,28,30,31,32,35,36,39,40,60,76,78,88,89]:
# process("./tmp",
# i,
# c,
# n_trials=11,
# n_orders=5,
# verbose=True,
# small=(i <= 80),
# human=False,
# kind="greedy",
# )
# list_priors("model_comparison_priors.csv", model_comparison_wave_3())
## Uniqueness checks for all thesis concepts
# mc_ps = [x['concept'] for x in model_comparison_wave_3()]
# hd_ps = [x['concept'] for x in human_experiments_wave_1()]
# ps_0_09 = mc_ps[0:80]
# ps_0_99 = mc_ps[80:] + hd_ps
# blah = process_2(ps_0_09, small=True)
# print(len(blah))
# for v in blah.values():
# print(f"{len(v)} => {v}")
# print("\n\n\n")
# blah = process_2(ps_0_99, small=False)
# print(len(blah))
# for v in blah.values():
# print(f"{len(v)} => {v}")
| 2.375
| 2
|
lib/RequestHandler.py
|
audrummer15/motif-crawler
| 0
|
12783178
|
<filename>lib/RequestHandler.py
import pycurl
from StringIO import StringIO
class RequestHandler(object):
USER_AGENT = "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/46.0.2490.80 Safari/537.36"
def __init__(self, cookieJar=None):
if cookieJar != None:
if isinstance(cookieJar, str):
self.cookieJar = cookieJar
else:
raise ValueError("RequestHandler.__init__: CookieJar must be a string.")
else:
self.cookieJar = "cookies.txt"
self.stringbuf = StringIO()
self.c = pycurl.Curl()
self.c.setopt(pycurl.VERBOSE, 0) # don't show request info
self.c.setopt(pycurl.COOKIEJAR, self.cookieJar)
self.c.setopt(pycurl.COOKIEFILE, self.cookieJar)
self.c.setopt(pycurl.ENCODING, 'gzip,deflate')
self.c.setopt(pycurl.USERAGENT, self.USER_AGENT)
self.c.setopt(pycurl.CONNECTTIMEOUT, 5)
self.c.setopt(pycurl.TIMEOUT, 20)
def __curl_callback(self, buf):
self.stringbuf.write(buf)
def setCookieJar(self, cookieJar):
if cookieJar != None:
if isinstance(cookieJar, str):
self.cookieJar = cookieJar
self.c.setopt(pycurl.COOKIEJAR, self.cookieJar)
self.c.setopt(pycurl.COOKIEFILE, self.cookieJar)
return True
else:
raise ValueError("MotifAuthorizationManager.setCookieJar: cookieJar must be a string.")
else:
raise ValueError("MotifAuthorizationManager.setCookieJar: cookieJar not optional.")
def gethtml(self, url, headers=None):
self.c.setopt(pycurl.URL, url)
self.c.setopt(pycurl.WRITEFUNCTION, self.__curl_callback)
self.c.setopt(pycurl.HEADERFUNCTION, self.__curl_callback)
self.c.perform()
statusCode = int(self.c.getinfo(pycurl.HTTP_CODE))
self.c.close()
return [statusCode, self.stringbuf.getvalue()]
| 2.671875
| 3
|
5 - unit testing/vehicle_info_test.py
|
mickeybeurskens/betterpython
| 523
|
12783179
|
import unittest
from vehicle_info_after import VehicleInfo
class TestVehicleInfoMethods(unittest.TestCase):
pass
# def test_compute_tax_non_electric(self):
# v = VehicleInfo("BMW", False, 10000)
# self.assertEqual(v.compute_tax(), 500)
# def test_compute_tax_electric(self):
# v = VehicleInfo("BMW", True, 10000)
# self.assertEqual(v.compute_tax(), 200)
# def test_compute_tax_exemption(self):
# v = VehicleInfo("BMW", False, 10000)
# self.assertEqual(v.compute_tax(5000), 250)
# def test_compute_tax_exemption_negative(self):
# v = VehicleInfo("BMW", False, 10000)
# self.assertRaises(ValueError, v.compute_tax, -5000)
# def test_compute_tax_exemption_high(self):
# v = VehicleInfo("BMW", False, 10000)
# self.assertEqual(v.compute_tax(20000), 0)
# def test_can_lease_false(self):
# v = VehicleInfo("BMW", False, 10000)
# self.assertEqual(v.can_lease(5000), False)
# def test_can_lease_true(self):
# v = VehicleInfo("BMW", False, 10000)
# self.assertEqual(v.can_lease(15000), True)
# def test_can_lease_negative_income(self):
# v = VehicleInfo("BMW", False, 10000)
# self.assertRaises(ValueError, v.can_lease, -5000)
# run the actual unittests
unittest.main()
| 3.40625
| 3
|
ianlmk/pyStuff/data_structs_1.py
|
ianlmk/cs161
| 0
|
12783180
|
#!/usr/bin/ python3
'''
author: < <NAME>
description: < just a study on structures in python
input: < none for now
output: < various shit related to data types, structures and algorithms
return: < if you have to
'''
import sys
import os
print("#########################################################")
print("# INDEXING #")
print("#########################################################")
# indexes are position mapping tools
# you can call them directly, or in a loop.
# EXAMPLES:
#----------string indexing----------#
x = 'frog'
print("\nstring x = 'frog'\nx[3] = {}\n".format(x[3]))
#
#-----------list indexing-----------#
y = ['dog','cat','fish']
print("\nlist y = ['dog','cat','fish']\ny[0] = {}\n".format(y[0]))
#
#----------tuple indexing-----------#
z = ('ian','caitlin','ayla','sawyer')
print("\ntuple: z = ('ian','caitlin','ayla','sawyer')\nz[2] = {}\n".format(z[2]))
#-----------------------------------#
print("#########################################################")
print("# SLICING #")
print("#########################################################")
# slices find subSTRINGS, subLISTS, subTUPLES using indeces
# format: [start:end+1:step] where:
# start = the index to start,
# end+1 = the last position
# step = how many steps to take when iterating. Default is 1
# STRING EXAMPLES:
varA = 'computadora'
print("\nvarA = {}".format(varA))
for i in varA:
print("'{}',".format(i), end = ' ')
print('')
for i in range(0, len(varA)):
print("'{}',".format(i), end = ' ')
# from index 1 - 4:
print("\n varA[1:4] = {}".format(varA[1:4]))
#from index 1 - 6, but every 2 chars
print(" varA[1:6:2] = {}".format(varA[1:6:2]))
#from index 3 on
print(" varA[3:] = {}".format(varA[3:]))
# from the beginning to index 5
print(" varA[:5] = {}".format(varA[:5]))
# last char ( len(varA) - 1)
print(" varA[-1] = {}".format(varA[-1]))
# from start until the second to last char
print(" varA[:-2] = {}".format(varA[:-2]))
print("\n#########################################################")
print("# ADDING/CONCATENATING #")
print("#########################################################")
# adding and concatenating combine ints/strings/chars/lists
# formatting is similar for all data types. Tuples need a "," at the end of a single itemed tuple
print("\nSTRINGS:")
stringA = 'lunch' + 'box'
print("stringA = 'lunch' + 'box' | {}".format(stringA))
print("stringA * 3 = {}".format(stringA * 3))
print('')
print("LISTS:")
listA = ['mac','linux'] + ['windows','freeBSD']
print("listA = ['mac','linux'] + ['windows','freeBSD'] | {}".format(listA))
print("listA * 2 = {}".format(listA * 2))
print('')
print("TUPLES:")
tupleA = ('ian','caitlin','sawyer') + ('ayla',)
print("tupleA = ('ian','caitlin','sawyer') + ('ayla',) | {}".format(tupleA))
print("tupleA * 4 = {}".format(tupleA * 4))
print('')
print("\n#########################################################")
print("# MEMBERSHIP CHECKS #")
print("#########################################################")
# Membership checking returns boolean values.
# These can be used for more than a print action
stringB = 'lunchbox'
result = ('u' in stringB)
print("stringB = 'lunchbox'\n result = ('u' in stringB): {}".format(result))
print('')
listB = ['left','center','right']
result = ('center' not in listB)
print("listB = ['left','center','right']\n result = ('center' not in listB): {}".format(result))
print('')
tupleB = ('coffee','tea','water')
result = ('coffee' in tupleB)
print("tupleB = ('coffee','tea','water')\n result = ('coffee' in tupleB): {}".format(result))
print('')
print("\n#########################################################")
print("# ITERATIONS #")
print("#########################################################")
# Iterating structures is a "for" loop action
#
listC = ['one','two','three','four','five']
print("listC = ['one','two','three','four','five']\n>> for item in listC:\n print(item)\n--------")
# just a for loop to iterate the list
for item in listC:
print(item)
print("--------")
# to get the index along with the item, enumerate returns what we want:
print(">> for h,k in enumerate(listC):\n print(h,k)\n--------")
for h,k in enumerate(listC):
print(h,k)
print("--------")
tupleC = ('snoopy','linus','charlie','lucy','pigpen',)
print("\n#########################################################")
print("# COUNT ITEMS #")
print("#########################################################")
# just use the len(var) operator
stringCount = 'letters'
stringLen = len(stringCount)
print("stringCount = 'letters'\n stringLen = len(stringCount)\n stringLen: {}".format(stringLen))
listCount = [1,2,3,4,5,6,7,8,9,0]
listLen = len(listCount)
print("listCount = [1,2,3,4,5,6,7,8,9,0]\n listLen = len(listCount)\n listLen: {}".format(listLen))
tupleCount = (1,2,3,4,5,6,7,8,9)
tupleLen = len(tupleCount)
print("tupleCount = (1,2,3,4,5,6,7,8,9)\n tupleLen = len(tupleCount)\n tupleLen: {}".format(tupleLen))
print("\n#########################################################")
print("# MIN/MAX #")
print("#########################################################")
# There are a few ways to pull out min/max
# this is lexographic, so chars and ints cant be mixed in lists and tuples.
# Start of algorithms.
print("ASCII chars are interesting with MIN/MAX.\nIts counting, so 'a' is min and 'z' is max.")
#------------ Minimum Algo ------------#
stringD= "the skinny brown fox xyz"
minChar = stringD[0]
maxChar = stringD[0]
for i in range(0,len(stringD)):
if stringD[i] != ' ':
if stringD[i] < minChar:
minChar = stringD[i]
print("smallest char in '{}': '{}'".format(stringD, minChar))
# can also be written as "minChar = min(stringD)"
for i in range(0, len(stringD)):
if stringD[i] != ' ':
if stringD[i] > maxChar:
maxChar = stringD[i]
print("largest char in '{}': '{}'".format(stringD, maxChar))
#can also be written as "maxChar = max(stringD)"
print("\n#########################################################")
print("# ADD/SUM #")
print("#########################################################")
# applies to ints/doubles/numbers
# strings >> error
print("ADD ITEMS IN A LIST:\n")
sumList = 0
numList = [2,3,5,6,1,4,9,8]
for item in numList:
sumList += item
print("numList = [2,3,5,6,1,4,9,8]\nfor item in numList:\n sumList += item\n print(numList): {}".format(sumList))
print("alternatives:\n")
print("sum all items in the list:")
print(" print(sum(numList)): {}".format(sum(numList)))
print("sum the last 2 items in the list:")
print(" print(sum(numList[-2:])): {}".format(sum(numList[-2:])))
print("sum every other number in the list:")
print("print(sum(numList[::2])): {}".format(sum(numList[::2])))
print("\nADD ITEMS IN A TUPLE:\n")
sumTuple = 0
numTuple = (3,6,1,2,4,9,7)
for item in numTuple:
sumTuple += item
print("numTuple = (3,6,1,2,4,9,7)\nfor item in numTuple:\n numTuple += item\n print(numTuple): {}".format(sumTuple))
| 4.09375
| 4
|
src/better_corruptions/fast_corruptions/benchmark.py
|
anonymousicml2021/paper2888
| 85
|
12783181
|
<filename>src/better_corruptions/fast_corruptions/benchmark.py
from robustness import datasets, model_utils, loaders
from robustness.model_utils import make_and_restore_model as make_model
from robustness.tools.vis_tools import show_image_row
from matplotlib import pyplot as plt
from .ext_files import make_cifar_c, make_imagenet_c
from . import gpu_corruptions
from .gpu_corruptions.utils import torch_to_np, np_to_torch
from .configs import FIXED_CONFIG
from torchvision.datasets import ImageFolder
from torchvision.transforms import ToTensor
from torch.utils.data import DataLoader, TensorDataset
from timeit import Timer
from argparse import ArgumentParser
from PIL import Image
from tqdm import tqdm
from pathlib import Path
import multiprocessing as mp
import dill
import torch as ch
import numpy as np
## Constants
IM_DIMS = {
'imagenet': [3, 224, 224],
'cifar': [3, 32, 32]
}
OLD_C_MAKERS = {
'imagenet': make_imagenet_c,
'cifar': make_cifar_c
}
## Multiprocessing stuff
def process_batch(in_tens, out_tens, in_q, out_q):
t = out_tens.numpy()
while True:
req = in_q.get()
if req == 'Done': break
i, dataset, c_name, severity = req
old_c = getattr(OLD_C_MAKERS[dataset], c_name)
res = np_to_torch(np.stack([old_c(Image.fromarray(m), severity) \
for m in torch_to_np(in_tens[i][None,...])]))
t[i] = res
out_q.put(i)
def corruption_tx(c_func):
def f(x, y):
return c_func(x), y
return f
def mp_corruption_tx(in_q, out_q, input_tens, output_tens, dataset, c_name, severity):
def f(x, y):
input_tens[:len(x)] = x
[in_q.put((i, dataset, c_name, severity)) for i in range(len(x))]
[out_q.get() for _ in range(len(x))]
return output_tens[:len(x)].clone(), y
return f
def start_server(num_threads, im_dims):
ctx = mp.get_context("spawn")
in_q, out_q = ctx.Queue(), ctx.Queue()
input_tens = ch.zeros(*im_dims).share_memory_()
output_tens = ch.zeros(*im_dims).share_memory_()
proc_args = (input_tens, output_tens, in_q, out_q)
ps = [ctx.Process(target=process_batch, args=proc_args) for _ in range(num_threads)]
[p.start() for p in ps]
return in_q, out_q, input_tens, output_tens
## Timing and logging
def time_and_return(f, x):
t = Timer(lambda: f(x))
return f(x), t.timeit(number=5) / 5.
def log_and_save(out_dir, name, ims, _time, acc):
if out_dir is not None:
show_image_row([ims.cpu()])
plt.savefig(str(Path(out_dir) / f'{name}.png'))
plt.close()
print(f"Corruption: {name} | "
f"Time (new): {_time:.4f}s for 10 images | "
f"Model accuracy: {acc}")
## Setup tools
def model_and_dataset(args):
ds = datasets.DATASETS[args.dataset](args.dataset_path)
if args.dataset == 'cifar':
model, _ = make_model(arch=args.arch, dataset=ds,
resume_path='PATH_TO_MODEL')
else:
model, _ = make_model(arch=args.arch, dataset=ds, pytorch_pretrained=True)
model.eval()
return ds, model
def precomputed_loader(args, c_name):
if args.dataset == 'imagenet':
if_ds = ImageFolder(Path(args.precomputed) / c_name / str(args.severity), transform=ToTensor())
elif args.dataset == 'cifar':
ims = np_to_torch(np.load(str(Path(args.precomputed) / f'{c_name}.npy')))
labs = ch.tensor(np.load(str(Path(args.precomputed) / 'labels.npy'))).long()
ims, labs = [x[(args.severity-1)*10000:args.severity*10000] for x in [ims, labs]]
if_ds = TensorDataset(ims, labs)
loader = DataLoader(if_ds, batch_size=args.batch_size, shuffle=True, num_workers=20)
return loader
## Evaluation
def eval_loader(model, corruption_fn, ds, loader, max_ims=None):
tot_corr, tot = 0., 0.
tqdm_tot = max_ims or len(loader)
# it = tqdm(enumerate(loader), total=tqdm_tot)
it = enumerate(loader)
for i, (ims, labs) in it:
ims = ims.cuda()
tot_corr += model(ims)[0].argmax(1).eq(labs.cuda()).sum()
tot += len(labs)
if (max_ims is not None) and (i == max_ims):
break
return tot_corr / tot
def main(args):
dataset, model = model_and_dataset(args)
if args.threads > 1:
mp_args = start_server(args.threads, [args.batch_size] + IM_DIMS[args.dataset])
print("Server started")
_, vl = dataset.make_loaders(batch_size=10, workers=0, only_val=True)
_, big_vl = dataset.make_loaders(batch_size=args.batch_size,
workers=20, only_val=True)
fixed_im, _ = next(iter(vl))
if args.out_dir is not None:
show_image_row([fixed_im])
plt.savefig(str(Path(args.out_dir) / 'base_figs.png'))
CFG = FIXED_CONFIG[args.dataset]
corruption_list = CFG if args.corruption == 'all' else [args.corruption]
for c_name in corruption_list:
severities = [1, 2, 3, 4, 5] if args.severity == 'all' else [int(args.severity)]
print('=' * 40)
for severity in severities:
print('-' * 40)
print(f"Corruption: {c_name} | Severity: {severity}")
new_inp = fixed_im.clone().cuda()
old_inp = fixed_im.clone()
c_params = FIXED_CONFIG[args.dataset][c_name]
if c_params is None:
print(f"Skipping corruption {c_name}...")
continue
### NEW CORRUPTIONS
new_fn = lambda x: getattr(gpu_corruptions, c_name)(x.cuda(), *c_params[severity - 1])
new_res, new_time = time_and_return(new_fn, new_inp)
c_loader = loaders.LambdaLoader(big_vl, corruption_tx(new_fn))
new_acc = eval_loader(model, new_fn, dataset, c_loader, max_ims=args.subset)
log_and_save(args.out_dir, f'{c_name}_new', new_res.cpu(), new_time, new_acc)
if not args.compare: continue
### OLD CORRUPTIONS
c_maker = {'imagenet': make_imagenet_c, 'cifar': make_cifar_c}[args.dataset]
old_c = getattr(c_maker, c_name)
old_fn = lambda x: np_to_torch(np.stack([old_c(Image.fromarray(m), severity) for m in torch_to_np(x)]))
old_res, old_time = time_and_return(old_fn, old_inp)
if args.precomputed is not None:
loader = precomputed_loader(args, c_name)
elif args.threads > 1:
loader = loaders.LambdaLoader(big_vl, mp_corruption_tx(*mp_args, args.dataset, c_name, severity))
elif args.threads == 1:
loader = loaders.LambdaLoader(big_vl, corruption_tx(old_fn))
old_acc = eval_loader(model, old_fn, dataset, loader, max_ims=args.subset)
log_and_save(args.out_dir, f'{c_name}_old', old_res, old_time, old_acc)
[mp_args[0].put('Done') for _ in range(args.threads)]
if __name__ == '__main__':
corruption_choices = list(FIXED_CONFIG['imagenet'].keys()) + ['all']
parser = ArgumentParser()
parser.add_argument('--arch', default='resnet50', help="Arch to evaluate")
parser.add_argument('--batch-size', type=int, default=512)
parser.add_argument('--dataset', choices=['cifar', 'imagenet'])
parser.add_argument('--resume-path', help='path to model checkpoint')
parser.add_argument('--dataset-path', required=True)
parser.add_argument('--corruption', choices=corruption_choices, default='all')
parser.add_argument('--severity', required=True)
parser.add_argument('--precomputed', help='path to ImageNet-C')
parser.add_argument('--threads', type=int, default=1)
parser.add_argument('--compare', action='store_true',
help='whether to compare to ImageNet-C/CIFAR-C')
parser.add_argument('--subset', type=int, help='Number of iterations (batches) to evaluate')
parser.add_argument('--out-dir')
args = parser.parse_args()
with ch.no_grad():
main(args)
| 2.109375
| 2
|
deep-learning/pretrained_test.py
|
georgepachitariu/machine-learning-portfolio
| 2
|
12783182
|
<filename>deep-learning/pretrained_test.py
from data import Imagenet2012
import gpu
import tensorflow as tf
from tensorflow.python.keras.engine import data_adapter
from tensorflow.keras.layers import Conv2D, BatchNormalization, AveragePooling2D, GlobalAveragePooling2D, \
Flatten, Dropout, Dense, ReLU, Lambda, Softmax
from tensorflow.dtypes import cast
import os, sys, shutil
from resnet import Preprocessing
import tensorflow_hub as hub
class PretrainedResnet(tf.keras.Model):
def __init__(self):
super(PretrainedResnet, self).__init__(name='')
self.model= tf.keras.Sequential([
hub.KerasLayer(
"https://tfhub.dev/tensorflow/resnet_50/classification/1",
#"https://tfhub.dev/google/imagenet/resnet_v1_50/classification/4",
trainable=False)
])
def call(self, input_tensor, training=False):
return self.model(input_tensor, training=training)
def test_step(self, data):
data = data_adapter.expand_1d(data)
images_crops, y, sample_weight = data_adapter.unpack_x_y_sample_weight(data)
total_number_crops = tf.shape(images_crops)[0]
distinct_images = total_number_crops / 10 # there are 10 crops per image
# Segments will look like this: [0, 0, ..., 1, 1, ...].
segments = tf.repeat(tf.range(0, distinct_images, 1, dtype=tf.int32), repeats=10)
y_pred_crops=self.model(images_crops, training=False)
# I segment to get the mean score per 10 crops for each image. Check the testing preprocessing as well.
y_pred = tf.math.segment_mean(y_pred_crops, segments)
y = tf.math.segment_max(y, segments) # I segment y based on same rules to have the same final shape
# Updates stateful loss metrics.
self.compiled_loss(y, y_pred, sample_weight, regularization_losses=self.losses)
self.compiled_metrics.update_state(y, y_pred, sample_weight)
return {m.name: m.result() for m in self.metrics}
def compile(self, learning_rate=0.1):
super().compile(
# Also check the ReduceLROnPlateau training callback lower in script.
optimizer=tf.keras.optimizers.SGD(learning_rate=learning_rate, momentum=0.9),
# "categorical_crossentropy": uses a one-hot array to calculate the probability,
# "sparse_categorical_crossentropy": uses a category index
# source: https://stackoverflow.com/questions/58565394/what-is-the-difference-between-sparse-categorical-crossentropy-and-categorical-c
loss='sparse_categorical_crossentropy',
metrics=['accuracy', tf.keras.metrics.SparseTopKCategoricalAccuracy(k=5)])
def main():
gpu.configure_gpu()
r = PretrainedResnet()
r.compile()
r.build(input_shape=(None, 224, 224, 3))
total_train_data_size = 1281167
total_validation_data_size = 50000
train_data, validation_data = Imagenet2012.load()
nr_images = 32*1000
train_data = train_data.take(nr_images)
validation_data = validation_data.take(total_validation_data_size)
batch_size=32
train_augmented_gen = Preprocessing.create_generator(train_data, for_training=True, batch_size=batch_size)
validation_gen = Preprocessing.create_generator(validation_data, for_training=False,
batch_size=None # batch_size is treated differently during validations
)
auto=tf.data.experimental.AUTOTUNE
train_augmented_gen = train_augmented_gen.map(lambda im, l: (im+0.5, l), num_parallel_calls=auto)
validation_gen = validation_gen.map(lambda im, l: (im+0.5, l), num_parallel_calls=auto)
log_dir = './logs/fit/jupyterhub_version'
tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=log_dir, histogram_freq=1, profile_batch=2)
history = r.fit(x=train_augmented_gen,
validation_data=validation_gen,
initial_epoch=0,
epochs=1,
# steps_per_epoch = total number of steps (batches of samples) before declaring one epoch finished and starting the next epoch
steps_per_epoch=nr_images/batch_size,
callbacks=[tensorboard_callback]
)
if __name__ == '__main__':
main()
| 2.78125
| 3
|
head_pos.py
|
AuroreBussalb/app-head-pos
| 0
|
12783183
|
<reponame>AuroreBussalb/app-head-pos<filename>head_pos.py<gh_stars>0
#!/usr/local/bin/python3
import mne
import json
import os
from brainlife_apps_helper import helper
def head_pos(raw, param_compute_amplitudes_t_step_min,
param_compute_amplitudes_t_window,
param_compute_amplitudes_ext_order,
param_compute_amplitudes_tmin,
param_compute_amplitudes_tmax, param_compute_locs_t_step_max,
param_compute_locs_too_close, param_compute_locs_adjust_dig,
param_compute_head_pos_dist_limit,
param_compute_head_pos_gof_limit, param_compute_head_pos_adjust_dig):
"""Compute time-varying head positions from cHPI and save them in a .pos file.
Parameters
----------
raw: instance of mne.io.Raw
MEG fif file contaning cHPI info.
param_compute_amplitudes_t_step_min: float
Minimum time step to use to compute cHPI amplitudes. If correlations are sufficiently high, t_step_max
will be used. Default is 0.01.
param_compute_amplitudes_t_window: float
Time window to use to estimate the amplitudes. Default is 0.2 (200 ms).
param_compute_amplitudes_ext_order: int
The external order for SSS-like interfence suppression to compute cHPI amplitudes. Default is 1.
param_compute_amplitudes_tmin: float
Start time of the raw data to use in seconds to compute cHPI amplitudes. Default is 0.
param_compute_amplitudes_tmax: float or None
End time of the raw data to use in seconds to compute cHPI amplitudes. Default is None.
param_compute_locs_t_step_max: float
Maximum step to use to compute HPI coils locations. Default is 1.
param_compute_locs_too_close: str
How to handle HPI positions too close to sensors when computing HPI coils locations.
Can be 'raise', (default), 'warning', or 'info'.
param_compute_locs_adjust_dig: bool
If True, adjust the digitization locations used for fitting when computing HPI coils locations.
Default is False.
param_compute_head_pos_dist_limit: float
Minimum distance (m) to accept for coil position fitting when computing head positions. Default is 0.005.
param_compute_head_pos_gof_limit: float
Minimum goodness of fit to accept for each coil to compute head positions. Default is 0.98.
param_compute_head_pos_adjust_dig: bool
If True, adjust the digitization locations used for fitting when computing head positions. Default is False.
Returns
-------
head_pos_file: ndarray, shape (n_pos, 10)
The time-varying head positions.
"""
# Extract HPI coils amplitudes as a function of time
chpi_amplitudes = mne.chpi.compute_chpi_amplitudes(raw, t_step_min=param_compute_amplitudes_t_step_min,
t_window=param_compute_amplitudes_t_window,
ext_order=param_compute_amplitudes_ext_order,
tmin=param_compute_amplitudes_tmin,
tmax=param_compute_amplitudes_tmax)
# Compute time-varying HPI coils locations
chpi_locs = mne.chpi.compute_chpi_locs(raw.info, chpi_amplitudes, t_step_max=param_compute_locs_t_step_max,
too_close=param_compute_locs_too_close, adjust_dig=param_compute_locs_adjust_dig)
# Compute head positions from the coil locations
head_pos_file = mne.chpi.compute_head_pos(raw.info, chpi_locs, dist_limit=param_compute_head_pos_dist_limit,
gof_limit=param_compute_head_pos_gof_limit, adjust_dig=param_compute_head_pos_adjust_dig)
# Save file
mne.chpi.write_head_pos("out_dir/headshape.pos", head_pos_file)
return head_pos_file
def main():
# Generate a json.product to display messages on Brainlife UI
dict_json_product = {'brainlife': []}
# Load inputs from config.json
with open('config.json') as config_json:
config = json.load(config_json)
# Read the meg file
data_file = config.pop('fif')
raw = mne.io.read_raw_fif(data_file, allow_maxshield=True)
# Convert empty strings values to None
config = helper.convert_parameters_to_None(config)
# Read and save optional files
config = helper.read_optional_files(config, 'out_dir')[0]
# Delete keys values in config.json when this app is executed on Brainlife
kwargs = helper.define_kwargs(config)
# Apply head pos
head_pos(raw, **kwargs)
# Success message in product.json
dict_json_product['brainlife'].append({'type': 'success',
'msg': 'Head position file was written successfully.'})
# Save the dict_json_product in a json file
with open('product.json', 'w') as outfile:
json.dump(dict_json_product, outfile)
if __name__ == '__main__':
main()
| 2.15625
| 2
|
agents/lib/ping_agent.py
|
hseritt/alfmonitor
| 0
|
12783184
|
""" Ping agent - checks up/down status of nodes."""
import socket
from alfmonitor.lib.alflogger import logger
from agents.lib.abs_agent import AbstractConnectionAgent
socket.setdefaulttimeout(10)
class PingAgent(AbstractConnectionAgent):
agent_name = 'Ping'
def __init__(self):
self.log = logger(
'{}.{}'.format(
__name__,
self.__class__.__name__,
)
)
def connect(self, profile):
""" Connects to profile's uri. """
try:
hostname, port = profile.uri.split(':')
except (IndexError, ValueError) as err:
hostname = profile.uri
port = 0
self.log.exception(err)
self.log.error('Port cannot be assigned. Attempting with port 0.')
self.log.debug(
f'Attempting connection to {hostname} '
f'at port {port} ...'
)
if profile.protocol == 'TCP':
connection = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
elif profile.protocol == 'UDP':
connection = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
else:
self.log.warn(
f'Protocol not set for profile: {profile.name}. Assuming TCP.'
)
connection = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
address = (hostname, int(port))
is_connected = False
try:
connection.connect(address)
is_connected = True
except (socket.timeout, ConnectionRefusedError):
pass
except socket.gaierror as err:
self.log.exception(
'Check to see if this profile should use Http agent instead '
'of Ping agent.\n'
f'Profile uri is {profile.uri}'
)
finally:
connection.close()
return is_connected
if __name__ == '__main__':
agent = PingAgent()
agent.run()
| 2.765625
| 3
|
models/__init__.py
|
mdca-aux-loss/MDCA-Calibration
| 8
|
12783185
|
<filename>models/__init__.py<gh_stars>1-10
from .resnet import resnet20, resnet32, resnet56, resnet110
from .mendley_networks import resnet50_mendley
from .resnet_pacs import resnet18_pacs
from .resnet_mnist import resnet20_mnist
from .resnet_imagenet import resnet34, resnet50
model_dict = {
# resnet models can be used for cifar10/100, svhn
# mendley models only to be used for mendley datasets
"resnet20" : resnet20,
"resnet32" : resnet32,
"resnet56" : resnet56,
"resnet110" : resnet110,
"resnet50_mendley" : resnet50_mendley,
"resnet_pacs" : resnet18_pacs,
"resnet_mnist" : resnet20_mnist,
"resnet34_imagenet" : resnet34,
"resnet50_imagenet" : resnet50
}
| 1.734375
| 2
|
adocker/models/resources.py
|
leesdolphin/adocker
| 0
|
12783186
|
import typing as typ
_sentinal = object()
def attribute_value(attr_key, *, default=_sentinal, type=typ.Any, convert=lambda x: x):
docs = """The value of {0!r} from the attributes.""".format(attr_key)
if default == _sentinal:
def getter(self) -> type:
return convert(self.attrs[attr_key])
docs += """
.. raises::
KeyError: If {0!r} is missing from this instance's attributes.
""".format(attr_key)
else:
def getter(self) -> type:
return convert(self.attrs.get(attr_key, default))
return property(
fget=getter,
doc=docs
)
class Model(object):
"""
A base class for representing a single object on the server.
"""
id_attribute = 'Id'
def __init__(self, attrs=None, client=None, collection=None):
#: A client pointing at the server that this object is on.
self.client = client
#: The collection that this model is part of.
self.collection = collection
#: The raw representation of this object from the API
self.attrs = attrs
if self.attrs is None:
self.attrs = {}
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, self.short_id)
def __eq__(self, other):
return isinstance(other, self.__class__) and self.id == other.id
def __hash__(self):
return hash("%s:%s" % (self.__class__.__name__, self.id))
@property
def id(self):
"""
The ID of the object.
"""
return self.attrs.get(self.id_attribute)
@property
def short_id(self):
"""
The ID of the object, truncated to 10 characters.
"""
return self.id[:10]
class ReloadableModel(Model):
async def reload(self):
"""
Load this object from the server again and update ``attrs`` with the
new data.
"""
new_model = await self.collection.get(self.id)
self.attrs = new_model.attrs
class Collection(object):
"""
A base class for representing all objects of a particular type on the
server.
"""
#: The type of object this collection represents, set by subclasses
model = None
def __init__(self, client=None):
#: The client pointing at the server that this collection of objects
#: is on.
self.client = client
async def list(self):
raise NotImplementedError
async def get(self, key):
raise NotImplementedError
async def create(self, attrs=None):
raise NotImplementedError
def prepare_model(self, attrs, model=None):
"""
Create a model from a set of attributes.
"""
if isinstance(attrs, Model):
attrs.client = self.client
attrs.collection = self
return attrs
elif isinstance(attrs, dict):
if model is None:
model = self.model
return model(attrs=attrs, client=self.client, collection=self)
else:
raise Exception("Can't create %s from %s" %
(self.model.__name__, attrs))
| 2.6875
| 3
|
inkpy_jinja/__init__.py
|
ar4s/InkPy
| 2
|
12783187
|
# -*- coding: utf-8 -*-
from .api import pdf
__all__ = ['pdf']
| 1.101563
| 1
|
config.py
|
x-surgical-x/movie-recommendations
| 7
|
12783188
|
import json
import os
configName = os.path.dirname(os.path.realpath(__file__)) + '/config.json'
config = None
try:
with open(configName) as data:
try:
config = json.load(data)
config["build_exists"] = os.path.join(os.path.dirname(configName), "app", "frontend", "build")
config["build_exists"] = os.path.exists(config["build_exists"])
except Exception:
print("Error occured while parsing json, please check json validity")
except Exception:
print("Error occured while reading config, make sure config.json is present")
raise
| 2.625
| 3
|
3_DnD_Generator.py
|
OzymandiasThe2/DnD_Project
| 0
|
12783189
|
from dice_gen import dice_details
from monster_gen import monster_details
from name_gen import name_details
from player_gen import final_ran_stats
from weapon_gen import generate_random_weapon
from NPC_gen_vince import generate_random_race
def main():
def clear(arr):
for x in arr:
x.reset()
while True:
print("Welcome brave adventure.\nWhat is your request?")
print("[1] Dice Rolls\n[2] Stat Gen\n[3] Name Gen\n[4] Monster Gen\n[5] Loot Gen\n[6] NPC Gen\n[0] Exit")
try:
selector = int(input("Enter a number for what you want: "))
if selector == 0 or selector is None:
print("Goodbye")
break
if selector == 1: # Dice Roll
dice_details()
elif selector == 2: # Player/Stat Gen
final_ran_stats()
elif selector == 3:
name_details()
elif selector == 4:
monster_details()
elif selector == 5:
print(generate_random_weapon())
# weaponDetails()
elif selector >= 6:
print(generate_random_race())
except ValueError or IndexError:
print("Goodbye")
break
if __name__ == "__main__":
main()
| 2.921875
| 3
|
saas/backend/api/management/urls.py
|
nannan00/bk-iam-saas
| 7
|
12783190
|
# -*- coding: utf-8 -*-
"""
TencentBlueKing is pleased to support the open source community by making 蓝鲸智云-权限中心(BlueKing-IAM) available.
Copyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
from django.urls import path
from . import views
urlpatterns = [
# 分级管理员
path(
"grade_managers/",
views.ManagementGradeManagerViewSet.as_view({"post": "create"}),
name="open.management.grade_manager",
),
# 分级管理员成员
path(
"grade_managers/<int:id>/members/",
views.ManagementGradeManagerMemberViewSet.as_view({"get": "list", "post": "create", "delete": "destroy"}),
name="open.management.grade_manager_member",
),
# 用户组
path(
"grade_managers/<int:id>/groups/",
views.ManagementGradeManagerGroupViewSet.as_view({"get": "list", "post": "create"}),
name="open.management.grade_manager_group",
),
path(
"groups/<int:id>/",
views.ManagementGroupViewSet.as_view({"put": "update", "delete": "destroy"}),
name="open.management.group",
),
# 用户组成员
path(
"groups/<int:id>/members/",
views.ManagementGroupMemberViewSet.as_view({"get": "list", "post": "create", "delete": "destroy"}),
name="open.management.group_member",
),
# 用户组自定义权限
path(
"groups/<int:id>/policies/",
views.ManagementGroupPolicyViewSet.as_view({"post": "create"}),
name="open.management.group_policy",
),
# 用户
path(
"users/grade_managers/",
views.ManagementUserGradeManagerViewSet.as_view({"get": "list"}),
name="open.management.user_grade_manager",
),
path(
"users/grade_managers/<int:id>/groups/",
views.ManagementUserGradeManagerGroupViewSet.as_view({"get": "list"}),
name="open.management.user_grade_manager_group",
),
# 用户组申请单
path(
"groups/applications/",
views.ManagementGroupApplicationViewSet.as_view({"post": "create"}),
name="open.management.group_application",
),
]
| 1.765625
| 2
|
src/qraz/frontend/migrations/0001_initial.py
|
fladi/qraz
| 0
|
12783191
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2015-12-28 13:20
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django_extensions.db.fields
import django_fsm
import markupfield.fields
import qraz.frontend.models
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Repository',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('github', models.PositiveIntegerField(verbose_name='Github repository ID')),
('name', models.CharField(max_length=256, verbose_name='Repository name')),
('state', django_fsm.FSMField(default='inactive', max_length=50)),
('modified', django_extensions.db.fields.ModificationDateTimeField(auto_now=True, verbose_name='Last modified')),
('comment', markupfield.fields.MarkupField(blank=True, null=True, rendered_field=True, verbose_name='Comment')),
('comment_markup_type', models.CharField(choices=[('', '--'), ('ReST', 'ReST')], default='ReST', max_length=30)),
('secret', models.CharField(default=qraz.frontend.models.get_secret, max_length=16, verbose_name='Shared secret for Github webhooks')),
('hook', models.PositiveIntegerField(null=True, verbose_name='ID of Github webhook')),
('_comment_rendered', models.TextField(editable=False, null=True)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| 1.554688
| 2
|
src/CADRE/KS.py
|
JustinSGray/OpenMDAO-CADRE
| 1
|
12783192
|
import numpy as np
from openmdao.main.api import Component
from openmdao.lib.datatypes.api import Float, Array
class KSfunction(object):
"""Helper class that can be used inside other components to aggregate constraint
vectors with a KS function."""
def compute(self, g, rho=50):
"""Gets the value of the KS function for the given array of constraints."""
self.rho = rho
self.g_max = np.max(g)
self.g_diff = g-self.g_max
self.exponents = np.exp(rho * self.g_diff)
self.summation = np.sum(self.exponents)
self.KS = self.g_max + 1.0/rho * np.log(self.summation)
return self.KS
def derivatives(self):
"""returns a row vector of [dKS_gd, dKS_drho]"""
dsum_dg = self.rho*self.exponents
dKS_dsum = 1.0/self.rho/self.summation
self.dKS_dg = dKS_dsum * dsum_dg
dsum_drho = np.sum(self.g_diff*self.exponents)
self.dKS_drho = dKS_dsum * dsum_drho
return self.dKS_dg, self.dKS_drho
class KSComp(Component):
"""Aggregates a number of functions to a single value via the
Kreisselmeier-Steinhauser Function."""
rho = Float(.1,
iotype="in",
desc="Hyperparameter for the KS function")
KS = Float(0,
iotype="out",
desc="Value of the aggregate KS function")
def __init__(self, n=2):
super(KS, self).__init__()
self.n = n
self.add('g',Array(zeros((n,)),
size=(n,1),
dtype=Float,
iotype="in",
desc="Array of function values to be aggregated"))
self._ks = KSfunction()
def execute(self):
self.KS = self._ks.compute(self.g, self.rho)
def linearize(self):
"""Linearize around the last executed point"""
#use g_max, exponsnte, summation from last executed point
self.J = np.hstack(self._ks.derivatives())
def provideDer(self):
ins = ('g','rho')
outs = ('KS', )
return ins, outs, self.J
| 2.765625
| 3
|
frontur_gui/model/FileSolverMenu.py
|
miguelbravo7/frontur_gui
| 0
|
12783193
|
from kivy.uix.textinput import TextInput
from kivy.uix.boxlayout import BoxLayout
from kivy.properties import ObjectProperty
from kivy.uix.popup import Popup
from kivy.clock import Clock
from pandas import Timedelta
from frontur_gui.model.ComputeDialog import ComputeDialog
from frontur_gui.model.FileManagerLoad import FileManagerLoad
from frontur_gui.controller.SolverController import SolverController
import frontur_utilities.constants as const
import os
import json
class FileSolverMenu(BoxLayout):
container = ObjectProperty(None)
save_btn = ObjectProperty(None)
def __init__(self, **kwargs):
super(FileSolverMenu, self).__init__(**kwargs)
Clock.schedule_once(lambda dt: self.set_label_values())
@property
def is_save_disabled(self):
return not self.solver_df.fully_loaded
def set_label_values(self):
data = vars(const)
for child in reversed(self.container.children):
if isinstance(child, TextInput):
child.text = str(data[child.keyword])
elif isinstance(child, FileManagerLoad):
child.ids.text_input.text = str(data[child.keyword]) if child.keyword in data else ''
def get_dict_values(self):
self.interface_values = {}
for child in reversed(self.container.children):
if isinstance(child, TextInput):
self.interface_values[child.keyword] = float(child.text)
elif isinstance(child, FileManagerLoad):
self.interface_values[child.keyword] = child.text
import json
with open(self.interface_values['REQ_INTERVIEWS_FILE_PATH']) as jfile:
data = json.load(jfile)
return {
"filename": self.interface_values['SOLVER_FILE_PATH'],
'solver_parameters': {
'workday_time': Timedelta(hours=float(self.interface_values['workday_time'])).seconds,
'rest_time': Timedelta(minutes=float(self.interface_values['rest_time'])).seconds,
'execution_time_limit': Timedelta(minutes=float(self.interface_values['execution_time_limit'])).seconds,
'country_kwargs': {
'plane_kwargs': {
'seats_used': float(self.interface_values['seats_used']),
'poll_success': float(self.interface_values['poll_success']),
'poll_time': Timedelta(seconds=float(self.interface_values['poll_time'])).seconds
},
'interviews': data
}
}
}
def save_configuration(self):
import frontur_utilities
dir_path = os.path.dirname(frontur_utilities.__file__)
data = {}
with open(dir_path + '/data/config.json') as f:
data = json.load(f)
with open(dir_path + '/data/config.json', 'w') as f:
modified_data = {**data, **self.interface_values}
f.write(json.dumps(modified_data, indent=4, sort_keys=False))
def run_solver(self):
self.solver_df = SolverController(**self.get_dict_values())
content = ComputeDialog(loading_method=self.solver_df.run, cancel=self.dismiss_popup,
callback=self.callback)
self._popup = Popup(title="Solving file", content=content,
size_hint=(0.9, 0.9), auto_dismiss=False)
self._popup.bind(on_open=self._popup.content.compute)
self._popup.open()
def dismiss_popup(self):
self.save_configuration()
self._popup.dismiss()
def callback(self):
self.save_btn.disabled = self.is_save_disabled
if not self.is_save_disabled:
self.save_btn.dataframe = self.solver_df.data_frame
| 2.03125
| 2
|
atom.py
|
greats3an/quicktimeatoms
| 1
|
12783194
|
<filename>atom.py
'''
# atom Module
Unpacks a video's QuickTime ATOM (`moov`) info via its ATOM header / footer
reference:https://developer.apple.com/library/archive/documentation/QuickTime/QTFF/QTFFChap2/qtff2.html (fig.2-3)
'''
import struct
import io
class _ATOM:
whences = {
'ATOM_SIZE': ('I', 4),
'ATOM_TYPE': ('', 4),
'ATOM_VERSION': ('B', 1),
'ATOM_FLAGS': ('', 3),
'ATOM_CREATION_TIME': ('I', 4),
'ATOM_MODIFICATION_TIME': ('I', 4),
'ATOM_TIMESCALE': ('I', 4),
'ATOM_DURATION': ('I', 4),
'ATOM_PREFERED_RATE': ('f', 4),
'ATOM_PREFERED_VOLUME': ('h', 2),
'ATOM_RESERVED': ('', 10),
'ATOM_MATRIX_STRUCT':('',36),
'ATOM_PREVIEW_TIME': ('I', 4),
'ATOM_PREVIEW_DURATION': ('I', 4),
'ATOM_POSTER_TIME': ('I', 4),
'ATOM_SELECTION_TIME': ('I', 4),
'ATOM_SELECTION_DURATION': ('I', 4),
'ATOM_CURRENT_TIME': ('I', 4),
'ATOM_NEXT': ('I', 4),
}
@staticmethod
def _locate_whence(key):
if not key in _ATOM.whences.keys():
raise KeyError('"%s" is not a valiad ATOM key!' % key)
offset, whence = 0, ('',0)
for k, v in _ATOM.whences.items():
if k == key:
whence = v
break
offset += v[1]
return offset,whence
@staticmethod
def prop(func):
@property
def wrapper(self):
return self._read(func.__name__)
@wrapper.setter
def wrapper(self,value):
# here we go bois
return self._write(func.__name__, value)
return wrapper
class ATOM:
'''
# ATOM Movie header object
Parses a standard `mvhd` ATOM header
Usage:
- Reading
`mvhdbytes = ATOM.extract(videoblock)`
`mvhdheader = ATOM(mvhdbytes)`
- Writing
`header_offset = ATOM.locate(videoblock)`
`mvhdheader = ATOM(mvhdbytes)`
`...(do sth to the header)`
`new_header = mvhdheader()`
`videoblock[header_offset:header_offset + len(new_header)] = new_header`
see:https://developer.apple.com/library/archive/documentation/QuickTime/QTFF/QTFFChap2/qtff2.html
'''
def _read(self, key):
offset,whence = _ATOM._locate_whence(key)
# Parses current offset
mode,length = whence
block = self.mvhd[offset:offset+length]
if mode:
# every atom is small-endian
return struct.unpack('>'+mode,block)[0]
else:return block
def _write(self,key,value):
offset,whence = _ATOM._locate_whence(key)
# Parses current offset
mode,length = whence
new_block = value
if mode:
# every atom is small-endian
new_block = struct.pack('>'+mode,new_block)
else:new_block = bytearray(new_block)
# Writes new block to local buffer
self.mvhd[offset:offset+length] = new_block
def __init__(self, mvhd: bytearray):
'''Store raw mvhd header'''
self.mvhd = mvhd
def __call__(self):
'''Once called,return the current mvhd buffer'''
return self.mvhd
# region Staticmethods
@staticmethod
def _index(subiter, mainiter) -> int:
'''
Indexing a iterable from another iterable
'''
for i in range(0, len(mainiter) - len(subiter)):
if mainiter[i:i+len(subiter)] == subiter:
return i
return -1
@staticmethod
def locate(pack, header='mvhd') -> int:
'''
Locates ATOM Header offset
'''
header_index = ATOM._index(header.encode(), pack) - len(header)
# Locating header
if header_index < 0:
raise Exception(f"{header} Header not found")
return header_index
@staticmethod
def extract(pack) -> bytearray:
'''
Extracts bytesarray to get mvhd header
'''
header_index = ATOM.locate(pack)
ATOM_SIZE = bytearray(pack[header_index:header_index + 4])
ATOM_SIZE = struct.unpack('>I', ATOM_SIZE)[0]
# ATOM size,should be 108 bytes in most cases
pack = bytearray(pack[header_index:header_index + ATOM_SIZE])
return pack
# endregion
# region Properties
@_ATOM.prop
def ATOM_SIZE(self):
"""A 32-bit integer that specifies the number of bytes in this movie header atom"""
pass
@_ATOM.prop
def ATOM_TYPE(self):
"""A 32-bit integer that identifies the atom type; must be set to 'mvhd'."""
pass
@_ATOM.prop
def ATOM_VERSION(self):
"""A 1-byte specification of the version of this movie header atom."""
pass
@_ATOM.prop
def ATOM_FLAGS(self):
"""Three bytes of space for future movie header flags."""
pass
@_ATOM.prop
def ATOM_CREATION_TIME(self):
"""
A 32-bit integer that specifies the calendar date and time (in seconds since midnight, January 1, 1904)
when the movie atom was created. It is strongly recommended that this value should be specified using coordinated universal time (UTC).
"""
pass
@_ATOM.prop
def ATOM_MODIFICATION_TIME(self):
"""
A 32-bit integer that specifies the calendar date and time (in seconds since midnight, January 1, 1904)
when the movie atom was changed. BooleanIt is strongly recommended that this value should be specified using coordinated universal time (UTC).
"""
pass
@_ATOM.prop
def ATOM_TIMESCALE(self):
"""
A time value that indicates the time scale for this movie—that is,
the number of time units that pass per second in its time coordinate system.
A time coordinate system that measures time in sixtieths of a second, for example, has a time scale of 60.
"""
pass
@_ATOM.prop
def ATOM_DURATION(self):
"""
A time value that indicates the duration of the movie in time scale units.
Note that this property is derived from the movie’s tracks. The value of this field corresponds to the duration of the longest track in the movie.
"""
pass
@_ATOM.prop
def ATOM_PREFERED_RATE(self):
"""A 32-bit fixed-point number that specifies the rate at which to play this movie. A value of 1.0 indicates normal rate."""
pass
@_ATOM.prop
def ATOM_PREFERED_VOLUME(self):
"""A 16-bit fixed-point number that specifies how loud to play this movie’s sound. A value of 1.0 indicates full volume."""
pass
@_ATOM.prop
def ATOM_RESERVED(self):
"""Ten bytes reserved for use by Apple. Set to 0."""
pass
@_ATOM.prop
def ATOM_MATRIX_STRUCT(self):
"""
The matrix structure associated with this movie. A matrix shows how to map points from one coordinate space into another.
"""
pass
@_ATOM.prop
def ATOM_PREVIEW_TIME(self):
"""The time value in the movie at which the preview begins."""
pass
@_ATOM.prop
def ATOM_PREVIEW_DURATION(self):
"""The duration of the movie preview in movie time scale units."""
pass
@_ATOM.prop
def ATOM_POSTER_TIME(self):
"""The time value of the time of the movie poster."""
pass
@_ATOM.prop
def ATOM_SELECTION_TIME(self):
"""The time value for the start time of the current selection."""
pass
@_ATOM.prop
def ATOM_SELECTION_DURATION(self):
"""The duration of the current selection in movie time scale units."""
pass
@_ATOM.prop
def ATOM_CURRENT_TIME(self):
"""The time value for current time position within the movie."""
pass
@_ATOM.prop
def ATOM_NEXT(self):
"""A 32-bit integer that indicates a value to use for the track ID number of the next track added to this movie. Note that 0 is not a valid track ID value."""
pass
@property
def ATOM_DURATION_SENCONDS(self):
return self.ATOM_DURATION / self.ATOM_TIMESCALE
# endregion
def unpack(block: bytearray) -> ATOM:
'''Unpacks the ATOM header from a bytearray block'''
return ATOM(ATOM.extract(block))
if __name__ == "__main__":
path = input('Media file path >>>').replace("\"", '')
header = open(path, 'rb').read(256)
atom = unpack(header)
def printout():
print('#'*50)
for key in dir(atom):
if key[:4] == 'ATOM':
print(key.ljust(24), getattr(atom, key))
print('#'*50)
return 'Press any key to exit'
input(printout())
| 2.734375
| 3
|
asab/zookeeper/builder.py
|
TeskaLabs/asab
| 23
|
12783195
|
from urllib.parse import urlparse
import aiozk
import logging
L = logging.getLogger(__name__)
"""
This module builds ZooKeeper clients from Configs and urls
urls supported :
1. Absolute url.
Example: zookeeper://zookeeper:12181/etc/configs/file1
2. Relative ur1 with full path
Example: zookeeper:///etc/configs/file1
In this case the relative url is expanded as follows:
zookeeper://{default_server}/etc/configs/file1
Where {default_server} is substituted with the server entry of the [asab:zookeeper] configuration file section.
3. Relative url with relative path
Example: zookeeper:./etc/configs/file1
In this case, the relative url is expanded as follows:
zookeper://{default_server}/{default_path}/etc/configs/file1
Where {default_server} is substituted with the "server" entry of the [asab:zookeeper] configuration file section and
{default_path} is substituted with the "path" entry of the [asab:zookeeper] configuration file section.
Sample config file:
[asab.zookeeper]
server=server1 server2 server3 <-- Default server
path=/myfolder <-- Default path
"""
def build_client(Config, z_url):
# initialize vaiables
url_netloc = ''
url_path = ''
# Parse URL
if z_url is not None:
url_pieces = urlparse(z_url)
url_netloc = url_pieces.netloc
url_path = url_pieces.path
# If there is no location, use implied
if url_netloc == '':
# if server entry is missing exit
if not Config.has_option("asab:zookeeper", "servers"):
L.error("Servers entry not passed in the configuration.")
return None, None
else:
url_netloc = Config["asab:zookeeper"]["servers"]
if url_path == '':
# if path entry is missing retun with only client and path as none.
if not Config.has_option("asab:zookeeper", "path"):
L.error("Path entry not passed in the configuration.")
return url_netloc, None
else:
url_path = Config["asab:zookeeper"]["path"]
if url_path.startswith("/"):
url_path = url_path.strip("/")
# Create and return the client and the url-path
client = aiozk.ZKClient(url_netloc)
return client, url_path
| 2.875
| 3
|
src/eval_pp.py
|
jerichooconnell/lcse_tools
| 0
|
12783196
|
<reponame>jerichooconnell/lcse_tools<gh_stars>0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
C Pre-Processor #define evaluator.
Usage:
.. code:: bash
eval_pp.py input_file.F -o output_file.F -D israyprofile=0 isrestart=0
'''
__author__ = "<NAME>"
__email__ = "<EMAIL>"
__date__ = '25 Apr 2015'
__license__ = "Apache 2.0"
__version__ = "1.0"
import argparse
import logging
import re
# TODO: Remove blank statements
class def_stmt(object):
def __init__(self, l):
self.line = l
tmp = l.split()
self.name = tmp[1]
self.expr = tmp[2:]
def __str__(self):
return str(self.line)
class if_stmt(object):
def __init__(self, l):
self.line = l
self.first = []
self.second = []
tmp = l.split()
self.expr = ' '.join(tmp[1:])
self.eval_res = None
self.prefix = tmp[0]
self._ifdef = tmp[0] == '#ifdef'
def __str__(self):
out = [self.line]
out += [str(s) for s in self.first]
if self.second:
out += ['#else\n']
out += [str(s) for s in self.second]
out += ['#endif\n']
s = ''.join(out)
return s
def eval_cond(self, symbols):
''' Evaluate this if statment '''
# Propagate the eval to nested if statements
first = [s.eval_cond(symbols)[1] if isinstance(s, if_stmt) else s for s in self.first]
second = [s.eval_cond(symbols)[1] if isinstance(s, if_stmt) else s for s in self.second]
expr = self.expr.strip()
res = None
if self._ifdef:
# We don't want to remove undefined symbols at this point.
# Could be an option in the future.
res = True if expr in symbols else None
else:
expr = filter_expr(expr, symbols)
try:
res = bool(eval(expr))
except NameError:
res = None
except SyntaxError as e:
print expr, self.expr
res_str = ''
if res is True:
res_str = ''.join(str(s) for s in first)
elif res is False:
res_str = ''.join(str(s) for s in second)
else:
out = ['%s %s\n' % (self.prefix, expr)]
out += [str(s) for s in first]
if second:
out += ['#else\n']
out += [str(s) for s in second]
out += ['#endif\n']
res_str = ''.join(out)
return res, res_str
def filter_expr(expr, symbols):
'''Tokenize expr and replace symbols in the stream. Otherwise doing a search
and replace on a symbol can partially match other symbols (isSPU vs isSPUC)
'''
res_expr = []
for s in re.split('(\W)', expr):
if len(s) > 0:
res_expr.append(s if s not in symbols else symbols[s])
return ''.join(res_expr)
def process_file(file_in, file_out, **kwargs):
# Read the file in full (we have enough ram)
f = open(file_in)
lines = f.readlines()
f.close()
ix = 0
eval_list = {'ismpi':1}
symbols = {}
statements_raw, ix = consume_lines(lines, symbols, ix, 0)
eval_symbols = kwargs.get('symbols')
statements = []
if eval_symbols:
for s in statements_raw:
if isinstance(s, if_stmt):
r, res_s = s.eval_cond(eval_symbols)
statements.append(res_s)
else:
statements.append(s)
else:
statements = statements_raw
f = open(file_out, 'w')
f.write(''.join(str(s) for s in statements))
f.close()
def consume_lines(lines, symbols, ix, level):
statements = []
lines_ct = len(lines)
ifs = None
while ix < lines_ct:
top = lines[ix]
if top.startswith('#define'):
ds = def_stmt(top)
if level == 0:
symbols[ds.name] = ds
statements.append(ds)
ix += 1
elif top.startswith('#if'):
ifs = if_stmt(top)
ifs.first, ix = consume_lines(lines, symbols, ix+1, level+1)
elif top.startswith('#else'):
if ifs:
ifs.second, ix = consume_lines(lines, symbols, ix+1, level+1)
else:
return statements, ix
elif top.startswith('#endif'):
if ifs:
statements.append(ifs)
ix += 1
ifs = None
else:
return statements, ix
else:
statements.append(top)
ix += 1
return statements, ix
def build_argparse():
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('file_in', metavar='input', help='Intput file')
parser.add_argument('-o', dest='file_out', metavar='output', default='code.F', help='Output file')
parser.add_argument('-D', dest='symbols', metavar='key=value', nargs='*', help='Space separated key=value to evaluate')
return parser
def main():
parser = build_argparse()
args = parser.parse_args()
symbols = dict(s.split('=') for s in args.symbols) if args.symbols else {}
process_file(args.file_in, args.file_out, symbols=symbols)
if __name__ == '__main__':
main()
| 2.703125
| 3
|
tests/test_make_cogs.py
|
washreve/hyp3-lib
| 4
|
12783197
|
import os
import shutil
import requests
from hyp3lib.make_cogs import cogify_dir, cogify_file
def _is_cog(filename):
with open(filename, 'rb') as f:
response = requests.post('http://cog-validate.radiant.earth/api/validate', files={'file': f})
return response.status_code == 200
def test_make_cog(geotiff):
assert not _is_cog(geotiff)
cogify_file(geotiff)
assert _is_cog(geotiff)
def test_cogify_dir(geotiff):
base_dir = os.path.dirname(geotiff)
copy_names = [os.path.join(base_dir, '1.tif'), os.path.join(base_dir, '2.tif')]
for name in copy_names:
shutil.copy(geotiff, name)
# Only cogify our copied files
cogify_dir(base_dir, file_pattern='?.tif')
for name in copy_names:
assert _is_cog(name)
assert not _is_cog(geotiff)
| 2.671875
| 3
|
pi/probemon.py
|
pirsquareff/wireless-wads
| 0
|
12783198
|
#!/usr/bin/python
import time
import datetime
import argparse
import netaddr
import sys
import logging
from scapy.all import *
from pprint import pprint
from logging.handlers import RotatingFileHandler
NAME = 'probemon'
DESCRIPTION = "a command line tool for logging 802.11 probe request frames"
DEBUG = False
def build_packet_callback(time_fmt, logger, delimiter, mac_info, ssid, rssi):
def packet_callback(packet):
if not packet.haslayer(Dot11):
return
# we are looking for management frames with a probe subtype
# if neither match we are done here
if packet.type != 0 or packet.subtype != 0x04:
return
# list of output fields
fields = []
# determine preferred time format
log_time = str(int(time.time()))
if time_fmt == 'iso':
log_time = datetime.now().isoformat()
fields.append(log_time)
# append the mac address itself
fields.append(packet.addr2)
# parse mac address and look up the organization from the vendor octets
if mac_info:
try:
parsed_mac = netaddr.EUI(packet.addr2)
fields.append(parsed_mac.oui.registration().org)
except netaddr.core.NotRegisteredError, e:
fields.append('UNKNOWN')
# include the SSID in the probe frame
if ssid:
fields.append(packet.info)
if rssi:
rssi_val = -(256-ord(packet.notdecoded[-4:-3]))
fields.append(str(rssi_val))
logger.info(delimiter.join(fields))
return packet_callback
def main():
parser = argparse.ArgumentParser(description=DESCRIPTION)
parser.add_argument('-i', '--interface', help="capture interface")
parser.add_argument('-t', '--time', default='iso', help="output time format (unix, iso)")
parser.add_argument('-o', '--output', default='probemon.log', help="logging output location")
parser.add_argument('-b', '--max-bytes', default=5000000, help="maximum log size in bytes before rotating")
parser.add_argument('-c', '--max-backups', default=99999, help="maximum number of log files to keep")
parser.add_argument('-d', '--delimiter', default='\t', help="output field delimiter")
parser.add_argument('-f', '--mac-info', action='store_true', help="include MAC address manufacturer")
parser.add_argument('-s', '--ssid', action='store_true', help="include probe SSID in output")
parser.add_argument('-r', '--rssi', action='store_true', help="include rssi in output")
parser.add_argument('-D', '--debug', action='store_true', help="enable debug output")
parser.add_argument('-l', '--log', action='store_true', help="enable scrolling live view of the logfile")
args = parser.parse_args()
if not args.interface:
print "error: capture interface not given, try --help"
sys.exit(-1)
DEBUG = args.debug
# setup our rotating logger
logger = logging.getLogger(NAME)
logger.setLevel(logging.INFO)
handler = RotatingFileHandler(args.output, maxBytes=args.max_bytes, backupCount=args.max_backups)
logger.addHandler(handler)
if args.log:
logger.addHandler(logging.StreamHandler(sys.stdout))
built_packet_cb = build_packet_callback(args.time, logger,
args.delimiter, args.mac_info, args.ssid, args.rssi)
sniff(iface=args.interface, prn=built_packet_cb, store=0)
if __name__ == '__main__':
main()
| 2.5625
| 3
|
stylegan/get_dataset.py
|
britt0508/ExplainedKinshipCorrect
| 0
|
12783199
|
import csv
import pandas as pd
# from stylegan.metrics import linear_separability
from collections import defaultdict
from glob import glob
from random import choice, sample
def get_data():
train_file_path = "/content/drive/MyDrive/ExplainedKinshipData/data/train-pairs.csv"
train_folders_path = "/content/drive/MyDrive/ExplainedKinshipData/data/train-faces/"
val_families = "F09"
all_images = glob(train_folders_path + "*/*/*.jpg")
train_images = [x for x in all_images if val_families not in x]
val_images = [x for x in all_images if val_families in x]
train_person_to_images_map = defaultdict(list)
ppl = [x.split("/")[-3] + "/" + x.split("/")[-2] for x in all_images]
for x in train_images:
train_person_to_images_map[x.split("/")[-3] + "/" + x.split("/")[-2]].append(x)
val_person_to_images_map = defaultdict(list)
for x in val_images:
val_person_to_images_map[x.split("/")[-3] + "/" + x.split("/")[-2]].append(x)
relationships = pd.read_csv(train_file_path)
relationships = list(zip(relationships.p1.values, relationships.p2.values))
relationships = [x for x in relationships if x[0] in ppl and x[1] in ppl]
train = [x for x in relationships if val_families not in x[0]]
val = [x for x in relationships if val_families in x[0]]
print(relationships)
print(train)
gen(train, train_person_to_images_map, batch_size=16)
def gen(list_tuples, person_to_images_map, batch_size=16):
ppl = list(person_to_images_map.keys())
while True:
batch_tuples = sample(list_tuples, batch_size // 2)
labels = [1] * len(batch_tuples)
while len(batch_tuples) < batch_size:
p1 = choice(ppl)
p2 = choice(ppl)
if p1 != p2 and (p1, p2) not in list_tuples and (p2, p1) not in list_tuples:
batch_tuples.append((p1, p2))
labels.append(0)
for x in batch_tuples:
if not len(person_to_images_map[x[0]]):
print(x[0])
X1 = [choice(person_to_images_map[x[0]]) for x in batch_tuples]
X1 = np.array([read_img(x) for x in X1])
X2 = [choice(person_to_images_map[x[1]]) for x in batch_tuples]
X2 = np.array([read_img(x) for x in X2])
print(X1, X2, labels)
yield [X1, X2], labels
# df_pairs = pd.read_csv("/content/drive/MyDrive/ExplainedKinshipData/data/train-pairs.csv")
# print(df_pairs)
# features = []
# for pair in df_pairs:
# current_features_1 = linear_separability.get_features(pair["p1"], pair["ptype"])
# current_features_2 = linear_separability.get_features(pair["p2"], pair["ptype"])
# features.append([current_features_1, current_features_2])
# df_pairs.insert(-1, "features", features, True)
# print(df_pairs)
# return df_pairs
get_data()
| 2.640625
| 3
|
seqauto/tests/test_models.py
|
SACGF/variantgrid
| 5
|
12783200
|
<reponame>SACGF/variantgrid<filename>seqauto/tests/test_models.py
import logging
import os
from django.conf import settings
from django.test import TestCase
from genes.canonical_transcripts.canonical_transcript_manager import CanonicalTranscriptManager
from genes.canonical_transcripts.create_canonical_transcripts import create_canonical_transcript_collection
from genes.gene_matching import GeneSymbolMatcher
from genes.models import GeneCoverageCollection, TranscriptVersion
from seqauto.models import SequencerModel, Sequencer, SequencingRun, SampleSheet, \
SequencingRunCurrentSampleSheet, SequencingSample, Fastq, UnalignedReads, Aligner, \
BamFile, VCFFile, QC, VariantCaller, EnrichmentKit, QCGeneCoverage
from seqauto.models.models_enums import DataGeneration
from seqauto.tasks.gold_summary_tasks import calculate_gold_summary
from snpdb.models import Manufacturer, GenomeBuild, DataState
class TestSeqAutoModels(TestCase):
TEST_DATA = os.path.join(settings.BASE_DIR, "seqauto", "test_data", "clinical_hg38")
GENES_TEST_DATA = os.path.join(settings.BASE_DIR, "genes", "tests", "test_data")
SEQUENCING_RUN_CAPTURE = "Exome_20_022_200920_NB501009_0410_AHNLYFBGXG"
CANONICAL_TRANSCRIPTS = os.path.join(GENES_TEST_DATA, "canonical_transcripts.tsv")
@staticmethod
def _create_sequencing_run(sequencing_run_name, enrichment_kit=None):
sequencer_model, _ = SequencerModel.objects.get_or_create(model='NextSeq',
data_naming_convention=DataGeneration.MISEQ)
sequencer, _ = Sequencer.objects.get_or_create(name="my sequencer",
sequencer_model=sequencer_model)
sequencing_run, _ = SequencingRun.objects.get_or_create(name=sequencing_run_name,
sequencer=sequencer,
enrichment_kit=enrichment_kit)
return sequencing_run
@staticmethod
def _create_sequencing_qc(sequencing_run, sample_name, enrichment_kit=None):
""" Create all the way down to QC """
sample_sheet, _ = SampleSheet.objects.get_or_create(sequencing_run=sequencing_run)
SequencingRunCurrentSampleSheet.objects.get_or_create(sequencing_run=sequencing_run,
sample_sheet=sample_sheet)
sequencing_sample, _ = SequencingSample.objects.get_or_create(sample_sheet=sample_sheet,
sample_name=sample_name,
sample_number=1,
enrichment_kit=enrichment_kit)
fastq, _ = Fastq.objects.get_or_create(sequencing_sample=sequencing_sample)
unaligned_reads, _ = UnalignedReads.objects.get_or_create(sequencing_sample=sequencing_sample,
fastq_r1=fastq)
aligner, _ = Aligner.objects.get_or_create(name="Fake Aligner")
bam_file, _ = BamFile.objects.get_or_create(unaligned_reads=unaligned_reads,
aligner=aligner)
variant_caller, _ = VariantCaller.objects.get_or_create(name="Fake Caller")
vcf_file, _ = VCFFile.objects.get_or_create(bam_file=bam_file,
variant_caller=variant_caller)
path = os.path.join(TestSeqAutoModels.TEST_DATA,
"idt_exome/Exome_20_022_200920_NB501009_0410_AHNLYFBGXG/4_QC/exec_stats/hiseq_sample1_stats.txt")
qc, _ = QC.objects.get_or_create(bam_file=bam_file, vcf_file=vcf_file,
defaults={"path": path})
return qc
def _create_enrichment_kit(self, name="fake_kit"):
manufacturer, _ = Manufacturer.objects.get_or_create(name="Agilluminent")
enrichment_kit, _ = EnrichmentKit.objects.get_or_create(name=name,
version=1,
manufacturer=manufacturer)
if enrichment_kit.canonical_transcript_collection is None:
enrichment_kit.canonical_transcript_collection = create_canonical_transcript_collection(self.genome_build,
self.genome_build.annotation_consortium,
TestSeqAutoModels.CANONICAL_TRANSCRIPTS,
gene_matcher=GeneSymbolMatcher())
return enrichment_kit
@staticmethod
def _create_qc_gene_coverage(qc, gene_filename, genome_build: GenomeBuild):
gcc, _ = GeneCoverageCollection.objects.get_or_create(path=gene_filename, genome_build=genome_build)
qcgc_defaults = {"path": gene_filename,
"data_state": DataState.COMPLETE,
"gene_coverage_collection": gcc}
qcgc, _ = QCGeneCoverage.objects.get_or_create(qc=qc, defaults=qcgc_defaults)
return qcgc
def setUp(self):
EnrichmentKit.objects.all().delete()
SequencingRun.objects.all().delete()
self.genome_build = GenomeBuild.grch37()
self.gene_matcher = GeneSymbolMatcher()
self.canonical_transcript_manager = CanonicalTranscriptManager(use_system_default=False)
self.transcript_versions_by_id = TranscriptVersion.transcript_versions_by_id(self.genome_build, self.genome_build.annotation_consortium)
def test_load_qc_exec_summary(self):
enrichment_kit = self._create_enrichment_kit("idt_exome")
sequencing_run = TestSeqAutoModels._create_sequencing_run(self.SEQUENCING_RUN_CAPTURE,
enrichment_kit=enrichment_kit)
qc = TestSeqAutoModels._create_sequencing_qc(sequencing_run,
"hiseq_sample1",
enrichment_kit=enrichment_kit)
qc.load_from_file(None)
qc_exec_summary = qc.qcexecsummary_set.order_by("pk").last()
self.assertAlmostEqual(qc_exec_summary.mean_coverage_across_genes, 262.84, places=2)
def test_gene_coverage_capture(self):
""" Capture uses headers: ["% bases >20x", "% bases <10x"] """
CAPTURE_GENES = os.path.join(self.TEST_DATA, f"idt_exome/{self.SEQUENCING_RUN_CAPTURE}/4_QC/bam_stats/samples/hiseq_sample1.per_gene_coverage.tsv.gz")
enrichment_kit = self._create_enrichment_kit()
sequencing_run = TestSeqAutoModels._create_sequencing_run(self.SEQUENCING_RUN_CAPTURE,
enrichment_kit=enrichment_kit)
qc = TestSeqAutoModels._create_sequencing_qc(sequencing_run,
"hiseq_sample1",
enrichment_kit=enrichment_kit)
qcgc = self._create_qc_gene_coverage(qc, CAPTURE_GENES, self.genome_build)
qcgc.load_from_file(None,
gene_matcher=self.gene_matcher,
canonical_transcript_manager=self.canonical_transcript_manager,
transcript_versions_by_id=self.transcript_versions_by_id)
gcc = qcgc.gene_coverage_collection
num_genes_covered = gcc.genecoverage_set.count()
num_canonical_genes = gcc.genecoveragecanonicaltranscript_set.count()
logging.info("genes: %d, canonical: %d", num_genes_covered, num_canonical_genes)
def test_deleted_gene_coverage(self):
""" #1619 - Ensure missing file sets data_state=DELETED and doesn't throw error. """
MISSING_GENES_FILE = os.path.join(self.TEST_DATA, "no_such_file.txt")
enrichment_kit = self._create_enrichment_kit()
sequencing_run = TestSeqAutoModels._create_sequencing_run(self.SEQUENCING_RUN_CAPTURE,
enrichment_kit=enrichment_kit)
qc = TestSeqAutoModels._create_sequencing_qc(sequencing_run,
"blah_blah",
enrichment_kit=enrichment_kit)
qcgc = self._create_qc_gene_coverage(qc, MISSING_GENES_FILE, self.genome_build)
qcgc.load_from_file(None,
gene_matcher=self.gene_matcher,
canonical_transcript_manager=self.canonical_transcript_manager,
transcript_versions_by_id=self.transcript_versions_by_id)
msg = "Missing file should set data_state to DELETED was %s" % qcgc.get_data_state_display()
self.assertEqual(qcgc.data_state, DataState.DELETED, msg)
def test_gold_coverage(self):
""" This test doesn't work very well as there's no genes/Transcripts/UCSC aliases etc """
SEQUENCING_RUN_NAME = "Exome_20_022_200920_NB501009_0410_AHNLYFBGXG"
GENE_FILES_PATTERN = os.path.join(self.TEST_DATA, "idt_exome", SEQUENCING_RUN_NAME, "4_QC", "bam_stats",
"samples", "%s.per_gene_coverage.tsv.gz")
enrichment_kit = self._create_enrichment_kit()
sequencing_run = TestSeqAutoModels._create_sequencing_run(SEQUENCING_RUN_NAME,
enrichment_kit=enrichment_kit)
sequencing_run.gold_standard = True
sequencing_run.save()
SAMPLE_NAMES = ["hiseq_sample1", "hiseq_sample2"]
for sample_name in SAMPLE_NAMES:
qc = TestSeqAutoModels._create_sequencing_qc(sequencing_run,
sample_name,
enrichment_kit=enrichment_kit)
gene_filename = GENE_FILES_PATTERN % sample_name
logging.info(gene_filename)
qcgc = self._create_qc_gene_coverage(qc, gene_filename, self.genome_build)
print(f"created coverage for qc: {qc} - data state: {qcgc.get_data_state_display()} path: {qc.path}")
qcgc.load_from_file(None,
gene_matcher=self.gene_matcher,
canonical_transcript_manager=self.canonical_transcript_manager,
transcript_versions_by_id=self.transcript_versions_by_id)
# logging.info("%s Coverage:" % sample_name)
# logging.info(gcc.genecoverage_set.filter(gene_symbol__contains="MIR1302").values())
#
# logging.info("%s Canonical:" % sample_name)
# logging.info(gcc.genecoveragecanonicaltranscript_set.filter(gene_symbol__contains="MIR1302").values())
calculate_gold_summary(enrichment_kit.pk)
num_gold_samples = enrichment_kit.goldreference.goldgenecoveragecollection_set.count()
msg = "Created gold samples for each of %s" % ", ".join(SAMPLE_NAMES)
self.assertEqual(num_gold_samples, len(SAMPLE_NAMES), msg)
| 1.882813
| 2
|
01/day-01.py
|
dfinninger/aoc-2017
| 0
|
12783201
|
#!/usr/bin/env python3
import argparse
def _parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('input', help='program input')
return parser.parse_args()
def _solve_first(ipt):
chars = list(str(ipt))
accum = 0
for item in enumerate(chars):
if item[1] == chars[item[0]-1]:
accum += int(item[1])
return accum
def _solve_second(ipt):
chars = list(str(ipt))
accum = 0
for item in enumerate(chars):
idx = (item[0] + len(chars) // 2) % len(chars)
if item[1] == chars[idx]:
accum += int(item[1])
return accum
def main():
args = _parse_args()
ans1 = _solve_first(args.input)
ans2 = _solve_second(args.input)
print('Part One:', ans1)
print('Part Two:', ans2)
if __name__ == '__main__':
main()
| 3.8125
| 4
|
tests/test_matrices.py
|
readthedocs-assistant/tabmat
| 0
|
12783202
|
<reponame>readthedocs-assistant/tabmat<filename>tests/test_matrices.py
import warnings
from typing import List, Optional, Union
import numpy as np
import pandas as pd
import pytest
from scipy import sparse as sps
import tabmat as tm
def base_array(order="F") -> np.ndarray:
return np.array([[0, 0], [0, -1.0], [0, 2.0]], order=order)
def dense_matrix_F() -> tm.DenseMatrix:
return tm.DenseMatrix(base_array())
def dense_matrix_C() -> tm.DenseMatrix:
return tm.DenseMatrix(base_array(order="C"))
def dense_matrix_not_writeable() -> tm.DenseMatrix:
mat = dense_matrix_F()
mat.setflags(write=False)
return mat
def sparse_matrix() -> tm.SparseMatrix:
return tm.SparseMatrix(sps.csc_matrix(base_array()))
def sparse_matrix_64() -> tm.SparseMatrix:
csc = sps.csc_matrix(base_array())
mat = tm.SparseMatrix(
(csc.data, csc.indices.astype(np.int64), csc.indptr.astype(np.int64))
)
return mat
def categorical_matrix():
vec = [1, 0, 1]
return tm.CategoricalMatrix(vec)
def get_unscaled_matrices() -> List[
Union[tm.DenseMatrix, tm.SparseMatrix, tm.CategoricalMatrix]
]:
return [
dense_matrix_F(),
dense_matrix_C(),
dense_matrix_not_writeable(),
sparse_matrix(),
sparse_matrix_64(),
categorical_matrix(),
]
def complex_split_matrix():
return tm.SplitMatrix(get_unscaled_matrices())
def shift_complex_split_matrix():
mat = complex_split_matrix()
np.random.seed(0)
return tm.StandardizedMatrix(mat, np.random.random(mat.shape[1]))
def shift_scale_complex_split_matrix():
mat = complex_split_matrix()
np.random.seed(0)
return tm.StandardizedMatrix(
mat, np.random.random(mat.shape[1]), np.random.random(mat.shape[1])
)
def get_all_matrix_base_subclass_mats():
return get_unscaled_matrices() + [complex_split_matrix()]
def get_standardized_shifted_matrices():
return [tm.StandardizedMatrix(elt, [0.3, 2]) for elt in get_unscaled_matrices()] + [
shift_complex_split_matrix()
]
def get_standardized_shifted_scaled_matrices():
return [
tm.StandardizedMatrix(elt, [0.3, 0.2], [0.6, 1.67])
for elt in get_unscaled_matrices()
] + [shift_scale_complex_split_matrix()]
def get_matrices():
return (
get_all_matrix_base_subclass_mats()
+ get_standardized_shifted_matrices()
+ get_standardized_shifted_scaled_matrices()
)
@pytest.mark.parametrize("mat", get_matrices())
@pytest.mark.parametrize("cols", [None, [], [1], np.array([1])])
def test_matvec_out_parameter_wrong_shape(mat, cols):
out = np.zeros(mat.shape[0] + 1)
v = np.zeros(mat.shape[1])
with pytest.raises(ValueError, match="first dimension of 'out' must be"):
mat.matvec(v, cols, out)
@pytest.mark.parametrize("mat", get_matrices())
@pytest.mark.parametrize("cols", [None, [], [1], np.array([1])])
@pytest.mark.parametrize("rows", [None, [], [1], np.array([1])])
def test_transpose_matvec_out_parameter_wrong_shape(mat, cols, rows):
out = np.zeros(mat.shape[1] + 1)
v = np.zeros(mat.shape[0])
with pytest.raises(ValueError, match="dimension of 'out' must be"):
mat.transpose_matvec(v, rows, cols, out)
@pytest.mark.parametrize("mat", get_matrices())
@pytest.mark.parametrize("cols", [None, [], [1], np.array([1])])
def test_matvec_out_parameter(mat, cols):
out = np.random.rand(mat.shape[0])
out_copy = out.copy()
v = np.random.rand(mat.shape[1])
# This should modify out in place.
out2 = mat.matvec(v, cols=cols, out=out)
assert out.__array_interface__["data"][0] == out2.__array_interface__["data"][0]
assert out.shape == out_copy.shape
correct = out_copy + mat.matvec(v, cols=cols)
np.testing.assert_almost_equal(out, out2)
np.testing.assert_almost_equal(out, correct)
@pytest.mark.parametrize("mat", get_matrices())
@pytest.mark.parametrize("cols", [None, [], [1], np.array([0, 1])])
@pytest.mark.parametrize("rows", [None, [], [1], np.array([0, 2])])
def test_transpose_matvec_out_parameter(mat, cols, rows):
out = np.random.rand(mat.shape[1])
out_copy = out.copy()
v = np.random.rand(mat.shape[0])
# This should modify out in place.
out2 = mat.transpose_matvec(v, rows=rows, cols=cols, out=out)
# Check that modification has been in-place
assert out.__array_interface__["data"][0] == out2.__array_interface__["data"][0]
assert out.shape == out_copy.shape
col_idx = np.arange(mat.shape[1], dtype=int) if cols is None else cols
row_idx = np.arange(mat.shape[0], dtype=int) if rows is None else rows
matvec_part = mat.A[row_idx, :][:, col_idx].T.dot(v[row_idx])
if cols is None:
correct = out_copy + matvec_part
else:
correct = out_copy
correct[cols] += matvec_part
np.testing.assert_almost_equal(out, out2)
np.testing.assert_almost_equal(out, correct)
@pytest.mark.parametrize("mat", get_matrices())
@pytest.mark.parametrize("i", [1, -2])
def test_getcol(mat: Union[tm.MatrixBase, tm.StandardizedMatrix], i):
col = mat.getcol(i)
if not isinstance(col, np.ndarray):
col = col.A
np.testing.assert_almost_equal(col, mat.A[:, [i]])
@pytest.mark.parametrize("mat", get_all_matrix_base_subclass_mats())
def test_to_array_matrix_base(mat: tm.MatrixBase):
assert isinstance(mat.A, np.ndarray)
if isinstance(mat, tm.CategoricalMatrix):
expected = np.array([[0, 1], [1, 0], [0, 1]])
elif isinstance(mat, tm.SplitMatrix):
expected = np.hstack([elt.A for elt in mat.matrices])
else:
expected = base_array()
np.testing.assert_allclose(mat.A, expected)
@pytest.mark.parametrize(
"mat",
get_standardized_shifted_matrices() + get_standardized_shifted_scaled_matrices(),
)
def test_to_array_standardized_mat(mat: tm.StandardizedMatrix):
assert isinstance(mat.A, np.ndarray)
true_mat_part = mat.mat.A
if mat.mult is not None:
true_mat_part = mat.mult[None, :] * mat.mat.A
np.testing.assert_allclose(mat.A, true_mat_part + mat.shift)
@pytest.mark.parametrize("mat", get_matrices())
@pytest.mark.parametrize(
"other_type",
[lambda x: x, np.asarray, tm.DenseMatrix],
)
@pytest.mark.parametrize("cols", [None, [], [1], np.array([1])])
@pytest.mark.parametrize("other_shape", [[], [1], [2]])
def test_matvec(
mat: Union[tm.MatrixBase, tm.StandardizedMatrix], other_type, cols, other_shape
):
"""
Mat.
other_type: Function transforming list to list, array, or DenseMatrix
cols: Argument 1 to matvec, specifying which columns of the matrix (and
which elements of 'other') to use
other_shape: Second dimension of 'other.shape', if any. If other_shape is [], then
other is 1d.
"""
n_row = mat.shape[1]
shape = [n_row] + other_shape
other_as_list = np.random.random(shape).tolist()
other = other_type(other_as_list)
def is_split_with_cat_part(x):
return isinstance(x, tm.SplitMatrix) and any(
isinstance(elt, tm.CategoricalMatrix) for elt in x.matrices
)
has_categorical_component = (
isinstance(mat, tm.CategoricalMatrix)
or is_split_with_cat_part(mat)
or (
isinstance(mat, tm.StandardizedMatrix)
and (
isinstance(mat.mat, tm.CategoricalMatrix)
or is_split_with_cat_part(mat.mat)
)
)
)
if has_categorical_component and len(shape) > 1:
with pytest.raises(NotImplementedError, match="only implemented for 1d"):
mat.matvec(other, cols)
else:
res = mat.matvec(other, cols)
mat_subset, vec_subset = process_mat_vec_subsets(mat, other, None, cols, cols)
expected = mat_subset.dot(vec_subset)
np.testing.assert_allclose(res, expected)
assert isinstance(res, np.ndarray)
if cols is None:
res2 = mat @ other
np.testing.assert_allclose(res2, expected)
def process_mat_vec_subsets(mat, vec, mat_rows, mat_cols, vec_idxs):
mat_subset = mat.A
vec_subset = vec
if mat_rows is not None:
mat_subset = mat_subset[mat_rows, :]
if mat_cols is not None:
mat_subset = mat_subset[:, mat_cols]
if vec_idxs is not None:
vec_subset = np.array(vec_subset)[vec_idxs]
return mat_subset, vec_subset
@pytest.mark.parametrize("mat", get_matrices())
@pytest.mark.parametrize(
"other_type",
[lambda x: x, np.array, tm.DenseMatrix],
)
@pytest.mark.parametrize("rows", [None, [], [2], np.arange(2)])
@pytest.mark.parametrize("cols", [None, [], [1], np.arange(1)])
def test_transpose_matvec(
mat: Union[tm.MatrixBase, tm.StandardizedMatrix], other_type, rows, cols
):
other_as_list = [3.0, -0.1, 0]
other = other_type(other_as_list)
assert np.shape(other)[0] == mat.shape[0]
res = mat.transpose_matvec(other, rows, cols)
mat_subset, vec_subset = process_mat_vec_subsets(
mat, other_as_list, rows, cols, rows
)
expected = mat_subset.T.dot(vec_subset)
np.testing.assert_allclose(res, expected)
assert isinstance(res, np.ndarray)
@pytest.mark.parametrize(
"mat_i, mat_j",
[
(dense_matrix_C(), sparse_matrix()),
(dense_matrix_C(), sparse_matrix_64()),
(dense_matrix_C(), categorical_matrix()),
(dense_matrix_F(), sparse_matrix()),
(dense_matrix_F(), sparse_matrix_64()),
(dense_matrix_F(), categorical_matrix()),
(dense_matrix_not_writeable(), sparse_matrix()),
(dense_matrix_not_writeable(), sparse_matrix_64()),
(dense_matrix_not_writeable(), categorical_matrix()),
(sparse_matrix(), dense_matrix_C()),
(sparse_matrix(), dense_matrix_F()),
(sparse_matrix(), dense_matrix_not_writeable()),
(sparse_matrix(), categorical_matrix()),
(sparse_matrix_64(), dense_matrix_C()),
(sparse_matrix_64(), dense_matrix_F()),
(sparse_matrix_64(), dense_matrix_not_writeable()),
(sparse_matrix_64(), categorical_matrix()),
(categorical_matrix(), dense_matrix_C()),
(categorical_matrix(), dense_matrix_F()),
(categorical_matrix(), dense_matrix_not_writeable()),
(categorical_matrix(), sparse_matrix()),
(categorical_matrix(), sparse_matrix_64()),
(categorical_matrix(), categorical_matrix()),
],
)
@pytest.mark.parametrize("rows", [None, [2], np.arange(2)])
@pytest.mark.parametrize("L_cols", [None, [1], np.arange(1)])
@pytest.mark.parametrize("R_cols", [None, [1], np.arange(1)])
def test_cross_sandwich(
mat_i: Union[tm.DenseMatrix, tm.SparseMatrix, tm.CategoricalMatrix],
mat_j: Union[tm.DenseMatrix, tm.SparseMatrix, tm.CategoricalMatrix],
rows: Optional[np.ndarray],
L_cols: Optional[np.ndarray],
R_cols: Optional[np.ndarray],
):
assert mat_i.shape[0] == mat_j.shape[0]
d = np.random.random(mat_i.shape[0])
mat_i_, _ = process_mat_vec_subsets(mat_i, None, rows, L_cols, None)
mat_j_, d_ = process_mat_vec_subsets(mat_j, d, rows, R_cols, rows)
expected = mat_i_.T @ np.diag(d_) @ mat_j_
res = mat_i._cross_sandwich(mat_j, d, rows, L_cols, R_cols)
np.testing.assert_almost_equal(res, expected)
@pytest.mark.parametrize("mat", get_matrices())
@pytest.mark.parametrize(
"vec_type",
[lambda x: x, np.array, tm.DenseMatrix],
)
@pytest.mark.parametrize("rows", [None, [], [1], np.arange(2)])
@pytest.mark.parametrize("cols", [None, [], [0], np.arange(1)])
def test_self_sandwich(
mat: Union[tm.MatrixBase, tm.StandardizedMatrix], vec_type, rows, cols
):
vec_as_list = [3, 0.1, 1]
vec = vec_type(vec_as_list)
res = mat.sandwich(vec, rows, cols)
mat_subset, vec_subset = process_mat_vec_subsets(mat, vec_as_list, rows, cols, rows)
expected = mat_subset.T @ np.diag(vec_subset) @ mat_subset
if sps.issparse(res):
res = res.A
np.testing.assert_allclose(res, expected)
@pytest.mark.parametrize("rows", [None, [], [0], np.arange(2)])
@pytest.mark.parametrize("cols", [None, [], [0], np.arange(1)])
def test_split_sandwich(rows: Optional[np.ndarray], cols: Optional[np.ndarray]):
mat = complex_split_matrix()
d = np.random.random(mat.shape[0])
result = mat.sandwich(d, rows=rows, cols=cols)
mat_as_dense = mat.A
d_rows = d
if rows is not None:
mat_as_dense = mat_as_dense[rows, :]
d_rows = d[rows]
if cols is not None:
mat_as_dense = mat_as_dense[:, cols]
expected = mat_as_dense.T @ np.diag(d_rows) @ mat_as_dense
np.testing.assert_almost_equal(result, expected)
@pytest.mark.parametrize(
"mat",
[
dense_matrix_F(),
dense_matrix_C(),
dense_matrix_not_writeable(),
sparse_matrix(),
sparse_matrix_64(),
],
)
def test_transpose(mat):
res = mat.T.A
expected = mat.A.T
assert res.shape == (mat.shape[1], mat.shape[0])
np.testing.assert_allclose(res, expected)
@pytest.mark.parametrize("mat", get_matrices())
@pytest.mark.parametrize(
"vec_type",
[lambda x: x, np.array, tm.DenseMatrix],
)
def test_rmatmul(mat: Union[tm.MatrixBase, tm.StandardizedMatrix], vec_type):
vec_as_list = [3.0, -0.1, 0]
vec = vec_type(vec_as_list)
res = mat.__rmatmul__(vec)
res2 = vec @ mat
expected = vec_as_list @ mat.A
np.testing.assert_allclose(res, expected)
np.testing.assert_allclose(res2, expected)
assert isinstance(res, np.ndarray)
@pytest.mark.parametrize("mat", get_matrices())
def test_matvec_raises(mat: Union[tm.MatrixBase, tm.StandardizedMatrix]):
with pytest.raises(ValueError):
mat.matvec(np.ones(11))
@pytest.mark.parametrize("mat", get_matrices())
@pytest.mark.parametrize("dtype", [np.float64, np.float32])
def test_astype(mat: Union[tm.MatrixBase, tm.StandardizedMatrix], dtype):
new_mat = mat.astype(dtype)
assert np.issubdtype(new_mat.dtype, dtype)
vec = np.zeros(mat.shape[1], dtype=dtype)
res = new_mat.matvec(vec)
assert res.dtype == new_mat.dtype
@pytest.mark.parametrize("mat", get_all_matrix_base_subclass_mats())
def test_get_col_means(mat: tm.MatrixBase):
weights = np.random.random(mat.shape[0])
# TODO: make weights sum to 1 within functions
weights /= weights.sum()
means = mat._get_col_means(weights)
expected = mat.A.T.dot(weights)
np.testing.assert_allclose(means, expected)
@pytest.mark.parametrize("mat", get_all_matrix_base_subclass_mats())
def test_get_col_means_unweighted(mat: tm.MatrixBase):
weights = np.ones(mat.shape[0])
# TODO: make weights sum to 1 within functions
weights /= weights.sum()
means = mat._get_col_means(weights)
expected = mat.A.mean(0)
np.testing.assert_allclose(means, expected)
@pytest.mark.parametrize("mat", get_all_matrix_base_subclass_mats())
def test_get_col_stds(mat: tm.MatrixBase):
weights = np.random.random(mat.shape[0])
# TODO: make weights sum to 1
weights /= weights.sum()
means = mat._get_col_means(weights)
expected = np.sqrt((mat.A ** 2).T.dot(weights) - means ** 2)
stds = mat._get_col_stds(weights, means)
np.testing.assert_allclose(stds, expected)
@pytest.mark.parametrize("mat", get_unscaled_matrices())
def test_get_col_stds_unweighted(mat: tm.MatrixBase):
weights = np.ones(mat.shape[0])
# TODO: make weights sum to 1
weights /= weights.sum()
means = mat._get_col_means(weights)
expected = mat.A.std(0)
stds = mat._get_col_stds(weights, means)
np.testing.assert_allclose(stds, expected)
@pytest.mark.parametrize("mat", get_unscaled_matrices())
@pytest.mark.parametrize("center_predictors", [False, True])
@pytest.mark.parametrize("scale_predictors", [False, True])
def test_standardize(
mat: tm.MatrixBase, center_predictors: bool, scale_predictors: bool
):
asarray = mat.A.copy()
weights = np.random.rand(mat.shape[0])
weights /= weights.sum()
true_means = asarray.T.dot(weights)
true_sds = np.sqrt((asarray ** 2).T.dot(weights) - true_means ** 2)
standardized, means, stds = mat.standardize(
weights, center_predictors, scale_predictors
)
assert isinstance(standardized, tm.StandardizedMatrix)
assert isinstance(standardized.mat, type(mat))
if center_predictors:
np.testing.assert_allclose(
standardized.transpose_matvec(weights), 0, atol=1e-11
)
np.testing.assert_allclose(means, asarray.T.dot(weights))
else:
np.testing.assert_almost_equal(means, 0)
if scale_predictors:
np.testing.assert_allclose(stds, true_sds)
else:
assert stds is None
expected_sds = true_sds if scale_predictors else np.ones_like(true_sds)
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=RuntimeWarning)
one_over_sds = np.nan_to_num(1 / expected_sds)
expected_mat = asarray * one_over_sds
if center_predictors:
expected_mat -= true_means * one_over_sds
np.testing.assert_allclose(standardized.A, expected_mat)
unstandardized = standardized.unstandardize()
assert isinstance(unstandardized, type(mat))
np.testing.assert_allclose(unstandardized.A, asarray)
@pytest.mark.parametrize("mat", get_matrices())
def test_indexing_int_row(mat: Union[tm.MatrixBase, tm.StandardizedMatrix]):
res = mat[0, :]
if not isinstance(res, np.ndarray):
res = res.A
expected = mat.A[0, :]
np.testing.assert_allclose(np.squeeze(res), expected)
@pytest.mark.parametrize("mat", get_matrices())
def test_indexing_range_row(mat: Union[tm.MatrixBase, tm.StandardizedMatrix]):
res = mat[0:2, :]
if not isinstance(res, np.ndarray):
res = res.A
expected = mat.A[0:2, :]
np.testing.assert_allclose(np.squeeze(res), expected)
def test_pandas_to_matrix():
n_rows = 50
dense_column = np.linspace(-10, 10, num=n_rows, dtype=np.float64)
dense_column_with_lots_of_zeros = dense_column.copy()
dense_column_with_lots_of_zeros[:44] = 0.0
sparse_column = np.zeros(n_rows, dtype=np.float64)
sparse_column[0] = 1.0
cat_column_lowdim = np.tile(["a", "b"], n_rows // 2)
cat_column_highdim = np.arange(n_rows)
dense_ser = pd.Series(dense_column)
lowdense_ser = pd.Series(dense_column_with_lots_of_zeros)
sparse_ser = pd.Series(sparse_column, dtype=pd.SparseDtype("float", 0.0))
cat_ser_lowdim = pd.Categorical(cat_column_lowdim)
cat_ser_highdim = pd.Categorical(cat_column_highdim)
df = pd.DataFrame(
data={
"d": dense_ser,
"ds": lowdense_ser,
"s": sparse_ser,
"cl_obj": cat_ser_lowdim.astype(object),
"ch": cat_ser_highdim,
}
)
mat = tm.from_pandas(
df, dtype=np.float64, sparse_threshold=0.3, cat_threshold=4, object_as_cat=True
)
assert mat.shape == (n_rows, n_rows + 5)
assert len(mat.matrices) == 3
assert isinstance(mat, tm.SplitMatrix)
nb_col_by_type = {
tm.DenseMatrix: 3, # includes low-dimension categorical
tm.SparseMatrix: 2, # sparse column
tm.CategoricalMatrix: n_rows,
}
for submat in mat.matrices:
assert submat.shape[1] == nb_col_by_type[type(submat)]
# Prevent a regression where the column type of sparsified dense columns
# was being changed in place.
assert df["cl_obj"].dtype == object
assert df["ds"].dtype == np.float64
@pytest.mark.parametrize("mat", get_all_matrix_base_subclass_mats())
def test_split_matrix_creation(mat):
sm = tm.SplitMatrix(matrices=[mat, mat])
assert sm.shape[0] == mat.shape[0]
assert sm.shape[1] == 2 * mat.shape[1]
| 2.234375
| 2
|
src/azure-cli/azure/cli/command_modules/lab/_params.py
|
YuanyuanNi/azure-cli
| 3,287
|
12783203
|
<filename>src/azure-cli/azure/cli/command_modules/lab/_params.py
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from azure.cli.command_modules.lab.validators import validate_artifacts, validate_template_id
from azure.cli.core.util import get_json_object
def load_arguments(self, _):
with self.argument_context('lab custom-image create') as c:
c.argument('name', options_list=['--name', '-n'])
with self.argument_context('lab vm create') as c:
c.argument('name', options_list=['--name', '-n'])
# Authentication related arguments
for arg_name in ['admin_username', 'admin_password', 'authentication_type', 'ssh_key', 'generate_ssh_keys',
'saved_secret']:
c.argument(arg_name, arg_group='Authentication')
c.argument('generate_ssh_keys', action='store_true')
# Add Artifacts from json object
c.argument('artifacts', type=get_json_object)
# Image related arguments
c.ignore('os_type', 'gallery_image_reference', 'custom_image_id')
# Network related arguments
for arg_name in ['ip_configuration', 'subnet', 'vnet_name']:
c.argument(arg_name, arg_group='Network')
c.ignore('lab_subnet_name', 'lab_virtual_network_id', 'disallow_public_ip_address', 'network_interface')
# Creating VM in the different location then lab is an officially unsupported scenario
c.ignore('location')
c.argument('allow_claim', action='store_true')
with self.argument_context('lab vm list') as c:
for arg_name in ['filters', 'all', 'claimable', 'environment']:
c.argument(arg_name, arg_group='Filter')
for arg_name in ['all', 'claimable']:
c.argument(arg_name, action='store_true')
with self.argument_context('lab vm claim') as c:
c.argument('name', options_list=['--name', '-n'], id_part='child_name_1')
c.argument('lab_name', id_part='name')
with self.argument_context('lab vm apply-artifacts') as c:
c.argument('artifacts', type=get_json_object, validator=validate_artifacts)
c.argument('name', options_list=['--name', '-n'])
with self.argument_context('lab formula') as c:
c.argument('name', options_list=['--name', '-n'])
with self.argument_context('lab secret') as c:
from azure.mgmt.devtestlabs.models import Secret
c.argument('name', options_list=['--name', '-n'])
c.argument('secret', options_list=['--value'], type=lambda x: Secret(value=x))
c.ignore('user_name')
with self.argument_context('lab formula export-artifacts') as c:
# Exporting artifacts does not need expand filter
c.ignore('expand')
with self.argument_context('lab environment') as c:
c.argument('name', options_list=['--name', '-n'])
c.ignore('user_name')
with self.argument_context('lab environment create') as c:
c.argument('arm_template', validator=validate_template_id)
c.argument('parameters', type=get_json_object)
with self.argument_context('lab arm-template') as c:
c.argument('name', options_list=['--name', '-n'])
with self.argument_context('lab arm-template show') as c:
c.argument('export_parameters', action='store_true')
| 2.03125
| 2
|
softcolor/soft_color_operators.py
|
PBibiloni/softcolor
| 0
|
12783204
|
<gh_stars>0
import math
import numpy as np
def soft_color_erosion(multivariate_image, structuring_element, fuzzy_implication_function):
return _base_soft_color_operator(multivariate_image=multivariate_image,
structuring_element=structuring_element,
aggregation_function=fuzzy_implication_function,
flag_minimize_first_channel=True,
flag_minimize_euclidean_distance=True,
flag_minimize_lexicographical_order=True)
def soft_color_dilation(multivariate_image, structuring_element, fuzzy_conjunction):
return _base_soft_color_operator(multivariate_image=multivariate_image,
structuring_element=structuring_element,
aggregation_function=fuzzy_conjunction,
flag_minimize_first_channel=False,
flag_minimize_euclidean_distance=True,
flag_minimize_lexicographical_order=False)
def _base_soft_color_operator(multivariate_image, structuring_element,
aggregation_function,
flag_minimize_first_channel,
flag_minimize_euclidean_distance,
flag_minimize_lexicographical_order):
result_image = np.empty_like(multivariate_image)
padded_image = _pad_image_wrt_structuring_element(multivariate_image=multivariate_image,
structuring_element=structuring_element)
step = _compute_optimum_step(structuring_element)
for limit_i in range(0, multivariate_image.shape[0], step):
for limit_j in range(0, multivariate_image.shape[1], step):
_base_soft_color_operator_limited_range(
padded_image=padded_image,
structuring_element=structuring_element,
se_distances_wrt_center=_euclidean_distance_wrt_center(structuring_element.shape),
aggregation_function=aggregation_function,
output=result_image,
range_i=[limit_i, limit_i+step],
range_j=[limit_j, limit_j+step],
flag_minimize_first_channel=flag_minimize_first_channel,
flag_minimize_euclidean_distance=flag_minimize_euclidean_distance,
flag_minimize_lexicographical_order=flag_minimize_lexicographical_order
)
return result_image
def _base_soft_color_operator_limited_range(padded_image,
structuring_element, se_distances_wrt_center,
aggregation_function,
output, range_i, range_j,
flag_minimize_first_channel,
flag_minimize_euclidean_distance,
flag_minimize_lexicographical_order):
num_channels = padded_image.shape[2]
sz_se, se_center_idcs, se_before_center, se_after_center = _sizes_wrt_center(structuring_element.shape)
pad_i, pad_j = _pad_size_wrt_structuring_element(structuring_element=structuring_element)
se_after_center_included = [e+1 for e in se_after_center]
range_i[1] = min(range_i[1], output.shape[0])
range_j[1] = min(range_j[1], output.shape[1])
num_i = range_i[1] - range_i[0]
num_j = range_j[1] - range_j[0]
# Precompute AggregationFunction(ImageWithOffset, SE_uniqueValues)
se_uniques, se_unique_to_idx, se_idx_to_unique = np.unique(structuring_element,
return_index=True, return_inverse=True)
precomputed_unique_se = np.empty(shape=(num_i+pad_i[0]+pad_i[1],
num_j+pad_j[0]+pad_j[1],
se_uniques.size),
dtype=output.dtype)
for idx_unique in range(se_uniques.size):
idx_se_flat = se_unique_to_idx[idx_unique]
idx_i_se, idx_j_se = np.unravel_index(idx_se_flat, dims=sz_se)
if np.isnan(structuring_element[idx_i_se, idx_j_se]):
precomputed_unique_se[:, :, idx_unique] = np.nan
else:
cropped_first_channel = padded_image[
range_i[0]:range_i[1]+se_before_center[0]+se_after_center[0],
range_j[0]:range_j[1]+se_before_center[0]+se_after_center[1],
0].copy()
mask_nans = np.isnan(cropped_first_channel)
cropped_first_channel[~mask_nans] = aggregation_function(
np.full(shape=(np.count_nonzero(~mask_nans), ),
fill_value=structuring_element[idx_i_se, idx_j_se],
dtype=structuring_element.dtype),
cropped_first_channel[~mask_nans],
)
precomputed_unique_se[:, :, idx_unique] = cropped_first_channel
values = np.empty(shape=(num_i, num_j, sz_se[0] * sz_se[1]), dtype=output.dtype)
for idx_i_se in range(sz_se[0]):
for idx_j_se in range(sz_se[1]):
idx_se_flat = np.ravel_multi_index((idx_i_se, idx_j_se), dims=sz_se)
idx_unique = se_idx_to_unique[idx_se_flat]
values[:, :, idx_se_flat] = precomputed_unique_se[
idx_i_se:num_i + idx_i_se,
idx_j_se:num_j + idx_j_se,
idx_unique]
values_allnan_mask = np.all(np.isnan(values), axis=2)
if np.any(values_allnan_mask):
values_flat = values.reshape((-1, 1, values.shape[2]))
mask_flat = values_allnan_mask.reshape((-1, ))
idcs_flat = np.zeros(shape=values_flat.shape[:2], dtype='uint64')
if flag_minimize_first_channel:
idcs_flat[~mask_flat, :] = np.nanargmin(values_flat[~mask_flat, :, :], axis=2)
else:
idcs_flat[~mask_flat, :] = np.nanargmax(values_flat[~mask_flat, :, :], axis=2)
selected_se_idx = idcs_flat.reshape(num_i, num_j)
else:
if flag_minimize_first_channel:
selected_se_idx = np.nanargmin(values, axis=2)
else:
selected_se_idx = np.nanargmax(values, axis=2)
grid_val_j, grid_val_i = np.meshgrid(np.arange(values.shape[1]), np.arange(values.shape[0]))
aggregated_first_channel = values[grid_val_i, grid_val_j, selected_se_idx]
idcs_tied_3d = np.equal(values[:, :, :], aggregated_first_channel[:, :, np.newaxis])
mask_tie = np.sum(idcs_tied_3d, axis=2) != 1
idx_tie_i, idx_tie_j = np.where(mask_tie & ~values_allnan_mask)
for res_i, res_j in zip(idx_tie_i, idx_tie_j):
pad_ini_i = res_i+range_i[0]+se_before_center[0]-se_before_center[0]
pad_end_i = res_i+range_i[0]+se_after_center[0]+se_after_center_included[0]
pad_ini_j = res_j+range_j[0]+se_before_center[1]-se_before_center[1]
pad_end_j = res_j+range_j[0]+se_after_center[1]+se_after_center_included[1]
idcs_se_tied = np.where(idcs_tied_3d[res_i, res_j, :])[0]
if flag_minimize_euclidean_distance != flag_minimize_lexicographical_order:
sign_d = 1
else:
sign_d = -1
compound_data = np.concatenate((sign_d * se_distances_wrt_center[:, :, np.newaxis],
padded_image[pad_ini_i:pad_end_i, pad_ini_j:pad_end_j, 1:]),
axis=2)
compound_data = compound_data.reshape((-1, num_channels)) # num_channels - 1 (first_channel) + 1 (d_se)
compound_data = compound_data[idcs_se_tied, :]
if flag_minimize_lexicographical_order:
best_idx = _lexicographical_argmin(compound_data)
else:
best_idx = _lexicographical_argmax(compound_data)
best_idx = idcs_se_tied[best_idx]
selected_se_idx[res_i, res_j] = best_idx
relative_delta_i, relative_delta_j = np.unravel_index(selected_se_idx, dims=sz_se)
grid_out_i = grid_val_i + range_i[0]
grid_out_j = grid_val_j + range_j[0]
grid_pad_i = grid_out_i + relative_delta_i
grid_pad_j = grid_out_j + relative_delta_j
aggregated_first_channel[values_allnan_mask] = np.nan
output[grid_out_i, grid_out_j, 0] = aggregated_first_channel
for idx_channel in range(1, num_channels):
channel = padded_image[grid_pad_i, grid_pad_j, idx_channel]
channel[values_allnan_mask] = np.nan
output[grid_out_i, grid_out_j, idx_channel] = channel
return output
def _euclidean_distance_wrt_center(spatial_shape):
# Return array containing its distance to the center.
center_idcs = [e // 2 for e in spatial_shape]
i = np.tile(np.arange(-center_idcs[0], spatial_shape[0]-center_idcs[0])[:, np.newaxis], (1, spatial_shape[1]))
j = np.tile(np.arange(-center_idcs[1], spatial_shape[1]-center_idcs[1])[np.newaxis, :], (spatial_shape[0], 1))
coordinates = np.concatenate((i[:, :, np.newaxis], j[:, :, np.newaxis]), axis=2)
return np.linalg.norm(coordinates, axis=2)
def _sizes_wrt_center(spatial_shape):
# Measure lengths wrt the center of the image (i.e. before/after center).
img_sz = spatial_shape[:2]
center_idcs = [e//2 for e in img_sz]
sz_before_center_excluded = center_idcs
after_center_included = [e1 - e2 for e1, e2 in zip(img_sz, center_idcs)]
sz_after_center_excluded = [e-1 for e in after_center_included]
return img_sz, center_idcs, sz_before_center_excluded, sz_after_center_excluded
def _pad_size_wrt_structuring_element(structuring_element):
_, _, se_before_center, se_after_center = _sizes_wrt_center(structuring_element.shape)
return (se_before_center[0], se_after_center[0]), (se_before_center[1], se_after_center[1])
def _pad_image_wrt_structuring_element(multivariate_image, structuring_element):
pad_i, pad_j = _pad_size_wrt_structuring_element(structuring_element=structuring_element)
padded_image = np.pad(multivariate_image,
(pad_i, pad_j, (0, 0)),
'constant', constant_values=(np.nan, np.nan))
return padded_image
def _lexicographical_argmin(data):
# Data must be two-dimensional, being the first axis the one to be sorted
# Returns a numeric index
if data.shape[1] == 1:
return np.argmin(data[:, 0])
min_value = np.nanmin(data[:, 0])
idcs_min = np.where(data[:, 0] == min_value)[0]
if idcs_min.size == 1:
return idcs_min[0]
return idcs_min[_lexicographical_argmin(data[idcs_min, 1:])]
def _lexicographical_argmax(data):
# Data must be two-dimensional, being the first axis the one to be sorted
# Returns a numeric index
if data.shape[1] == 1:
return np.argmax(data[:, 0])
max_value = np.nanmax(data[:, 0])
idcs_max = np.where(data[:, 0] == max_value)[0]
if idcs_max.size == 1:
return idcs_max[0]
return idcs_max[_lexicographical_argmin(data[idcs_max, 1:])]
def _compute_optimum_step(structuring_element):
# Returns appropriate value for batch size B x B (we create potentially large arrays B x B x SE.size).
max_array_size = 3e8
step = math.sqrt(max_array_size / structuring_element.size)
step = math.ceil(step)
step = max(step, structuring_element.size) # To avoid redundant computations due to padded sub-images
return step
| 2.28125
| 2
|
pong/pong_game.py
|
burnpiro/pong-deep-q-learning
| 1
|
12783205
|
<reponame>burnpiro/pong-deep-q-learning
from __future__ import annotations
from typing import Union, Tuple, List, Type
import random
from itertools import count
import gym
from gym.spaces import Discrete, Box
import numpy as np
import atari_py
from pong.gym_agents import *
from gym.envs.atari.atari_env import AtariEnv
ACTION = int
POSSIBLE_PLAYERS = Type[Union[GreedyAgent, AggressiveAgent, RandomAgent, None]]
class PongGame(AtariEnv):
_game_count = count(0)
def __init__(self, second_player: POSSIBLE_PLAYERS = None):
super().__init__(frameskip=1)
# self.ale.setInt('frame_skip', 2)
# self.ale.setFloat('repeat_action_probability', 0.5)
# self.seed()
self._game_id = next(self._game_count)
self._seconf_player_class: POSSIBLE_PLAYERS = second_player
self._is_multiplayer = second_player is not None
self._action_set = self.ale.getMinimalActionSet()
self._action_set2 = [x + 18 for x in self._action_set]
self.current_player = 0 # 0 (P1) or 1 (P2)
self.done = False
self.player_1_action = None
self._player2_bot: POSSIBLE_PLAYERS = second_player(self.action_space,
player=2) if self._is_multiplayer is True else None
self.ball_near_player = False
self.player1_bounces = 0
def step(self, a1: ACTION, a2: Union[ACTION, None] = None):
action1 = self._action_set[a1]
a2 = a2 or a1
action2 = self._action_set2[a2]
reward = self.ale.act2(action1, action2)
ob = self._get_obs()
return ob, reward
def bounce_count(self):
return self.player1_bounces
# Do not make this static because MCTS requires it
def possible_actions(self, player=None) -> List[ACTION]:
if player is not None:
ob = self._get_obs()
if check_if_should_take_action(ob, player=player):
return [DOWN, UP]
return [FIRE]
return [FIRE, DOWN, UP]
def act(self, action: ACTION) -> int:
if self.current_player == 0:
self.player_1_action = action
self.current_player = 1
return False
ob, reward = self.step(self.player_1_action, action)
if ob[RAM_BALL_X_POS] > 155:
self.ball_near_player = True
if self.ball_near_player and ob[RAM_BALL_X_POS] < 155 and not reward:
self.ball_near_player = False
self.player1_bounces += 1
# reward could be only -1, 0 and 1 (-1 and 1 means there is a point scored by one of the sides)
if reward != 0:
self.current_player = 0
self.done = True
return reward
else:
self.current_player = 0
return 0
def act_random(self) -> int:
return self.act(random.choice(self.possible_actions(player=self.current_player)))
def reset(self):
super().reset()
self.player1_bounces = 0
self.ale.press_select()
self.ale.press_select()
self.ale.press_select()
self.ale.soft_reset()
self.step(FIRE, FIRE)
while self._get_ram()[RAM_BALL_Y_POS] == 0:
self.step(FIRE, FIRE)
def copy(self) -> PongGame:
_new_game = PongGame(self._seconf_player_class)
_new_game.restore_full_state(self.clone_full_state())
return _new_game
def get_state(self):
return (self.clone_full_state(), self.player_1_action)
def set_state(self, state, done, current_player):
self.done = done
self.current_player = current_player
self.restore_full_state(state[0])
self.player_1_action = state[1]
def get_winner(self) -> int:
ob = self._get_obs()
return 0 if ob[P_RIGHT_SCORE] > 0 else 1
| 2.84375
| 3
|
bidwire/scrapers/knox_tn_agendas_scraper.py
|
RagtagOpen/bidwire
| 5
|
12783206
|
import logging
import scrapelib
from lxml import html
from urllib import parse
from sqlalchemy.exc import IntegrityError
from document import Document
from scrapers.base_scraper import BaseScraper
from utils import ensure_absolute_url
log = logging.getLogger(__name__)
class KnoxCoTNAgendaScraper(BaseScraper):
SITE_ROOT_URL = 'https://destinyhosted.com/'
MEETING_SCHEDULE_URL = 'https://destinyhosted.com/agenda_publish.cfm?id=56691&mt=ALL'
def __init__(self) -> None:
self.scraper = scrapelib.Scraper()
def scrape(self, session):
page = self.scraper.get(self.MEETING_SCHEDULE_URL)
documents = self._get_docs_from_schedule(page.content)
log.debug("Found %d documents", len(documents))
new_docs = []
for doc in documents:
try:
with session.begin_nested():
session.add(doc)
except IntegrityError:
log.debug('Already have document %s', doc)
else:
new_docs.append(doc)
log.info("New Documents: %s", new_docs)
session.commit()
def _get_docs_from_schedule(self, page_str):
"""
Parse the contents of the meeting schedule page and extract document
links and title.
Parameters:
A string containing the page HTML
Returns:
list of Document objects (not yet persisted to database)
"""
doctree = html.fromstring(page_str)
tables = doctree.findall('body/div/form/table/tbody')
# There should be 3 tables on the page. The second one is the one we want
if len(tables) < 2:
log.error("Knox Agendas: required table not found in page")
raise ValueError('required table not found in page')
table = tables[1]
# We don't care about the table header, it contains the values Agendas and Meetings
rows = table.findall('tr')
documents = []
for row in rows:
# The first cell is a link to the Agenda doc and the anchor text is the date of the meeting
# The second cell is the name of the meeting
agenda, meeting = row.findall('td')[:2]
meeting_name = meeting.text.strip()
agenda_anchor = agenda.find('a')
if agenda_anchor is None:
log.error("Knox Agendas: no document link in the meetings table")
raise ValueError('no document link in the meetings table')
agenda_date = agenda_anchor.text.strip()
doc_url = agenda_anchor.get('href')
if not doc_url:
log.error("Knox Agendas: no href in the anchor tag for %s: %s", agenda_date, meeting_name)
raise ValueError('no href in document anchor')
doc_url = ensure_absolute_url(self.SITE_ROOT_URL, doc_url)
# The anchor title is useless, so use the meeting name in the doc name
doc_name = "{}: {}".format(agenda_date, meeting_name)
documents.append(
Document(
url=doc_url,
title=doc_name,
site=Document.Site.KNOX_CO_TN_AGENDAS.name,
)
)
return documents
| 2.875
| 3
|
lesson2.2_step8.py
|
Vesta080283/stepik-auto-tests-course
| 0
|
12783207
|
from selenium import webdriver
from selenium.webdriver.common.by import By
import time
import os
try:
link = "http://suninjuly.github.io/file_input.html"
browser = webdriver.Chrome()
browser.get(link)
# Ваш код, который заполняет обязательные поля
input1 = browser.find_element(
By.XPATH, "//input[@name='firstname'][@required]")
input1.send_keys("Ivan")
input2 = browser.find_element(
By.XPATH, "//input[@name='lastname'][@required]")
input2.send_keys("Petrov")
input3 = browser.find_element(
By.XPATH, "//input[@name='email'][@required]")
input3.send_keys("<EMAIL>")
input4 = browser.find_element(
By.XPATH, "//input[@type='file']")
# получаем путь к директории текущего исполняемого файла
current_dir = os.path.abspath(os.path.dirname(__file__))
# добавляем к этому пути имя файла
file_path = os.path.join(current_dir, 'testfile.txt')
input4.send_keys(file_path)
# Отправляем заполненную форму
button = browser.find_element(By.XPATH, "//button[@type='submit']")
button.click()
# Проверяем, что смогли зарегистрироваться
# ждем загрузки страницы
time.sleep(2)
# # находим элемент, содержащий текст
# welcome_text_elt = browser.find_element_by_tag_name("h1")
# # записываем в переменную welcome_text текст из элемента welcome_text_elt
# welcome_text = welcome_text_elt.text
# # с помощью assert проверяем, что ожидаемый текст совпадает с текстом на странице сайта
# assert "Congratulations! You have successfully registered!" == welcome_text
finally:
# ожидание чтобы визуально оценить результаты прохождения скрипта
time.sleep(10)
# закрываем браузер после всех манипуляций
browser.quit()
| 2.890625
| 3
|
python/problem31.py
|
chjdev/euler
| 0
|
12783208
|
<reponame>chjdev/euler
# Coin sums
#
# Problem 31
#
# In England the currency is made up of pound, £, and pence, p, and there are eight coins in general circulation:
#
# 1p, 2p, 5p, 10p, 20p, 50p, £1 (100p) and £2 (200p).
# It is possible to make £2 in the following way:
#
# 1×£1 + 1×50p + 2×20p + 1×5p + 1×2p + 3×1p
# How many different ways can £2 be made using any number of coins?
COINS = (1, 2, 5, 10, 20, 50, 100, 200)
def ways_2_split(number):
table = [1] + [0] * number
for coin in COINS:
for num in range(coin, len(table)):
table[num] += table[num - coin]
return table[-1]
print(ways_2_split(200))
| 3.390625
| 3
|
tools/gui-editor/wrapper/item.py
|
jordsti/stigame
| 8
|
12783209
|
<filename>tools/gui-editor/wrapper/item.py
__author__ = 'JordSti'
import surface
import object_wrap
import style
class item(object_wrap.object_wrap):
def __init__(self, obj=None):
object_wrap.object_wrap.__init__(self, obj)
def set_dimension(self, width, height):
self.lib.Item_setDimension(self.obj, width, height)
def set_point(self, x, y):
self.lib.Item_setPoint(self.obj, x, y)
def set_rectangle(self, x, y , width, height):
self.lib.Item_setRectangle(self.obj, x, y, width, height)
def get_x(self):
return self.lib.Item_getX(self.obj)
def get_y(self):
return self.lib.Item_getY(self.obj)
def get_width(self):
return self.lib.Item_getWidth(self.obj)
def get_height(self):
return self.lib.Item_getHeight(self.obj)
def set_minimum_size(self, width, height):
self.lib.Item_setMinimumSize(self.obj, width, height)
def set_maximum_size(self, width, height):
self.lib.Item_setMaximumSize(self.obj, width, height)
def set_fixed_size(self, width, height):
self.lib.Item_setFixedSize(self.obj, width, height)
def render(self):
sdl_sur = self.lib.Item_render(self.obj)
sur = surface.surface(sdl_sur)
return sur
def get_style(self):
#todo style object wrapper, even mutable style
return style.style(self.lib.Item_getStyle(self.obj))
| 2.328125
| 2
|
app/admin/views.py
|
lemocla/business-analysis-project
| 10
|
12783210
|
<gh_stars>1-10
from flask_admin import AdminIndexView, expose
from werkzeug.security import generate_password_hash
from app.admin.decorators import admin_access
from app.admin.forms import UserForm, OrganisationForm
from app.admin.flask_admin.views import CustomModelView
# Customized admin views
class DashboardView(AdminIndexView):
@admin_access
def _handle_view(self, name, **kwargs):
super(DashboardView, self)._handle_view(name, **kwargs)
def is_visible(self):
# This view won't appear in the menu structure
return False
@expose('/')
def index(self):
return self.render('admin/home.html')
class UserView(CustomModelView):
column_list = ('username', 'email',)
column_sortable_list = ('username', 'email',)
form = UserForm
@admin_access
def _handle_view(self, name, **kwargs):
super(UserView, self)._handle_view(name, **kwargs)
# Making the password not editable on edit
def on_form_prefill(self, form, id):
form.password.render_kw = {'readonly': True}
def on_model_change(self, form, model, is_created):
# If creating a new user, hash password
if is_created:
model['password'] = generate_password_hash(form.password.data)
else:
old_password = form.password.object_data
# If password has been changed, hash password
if not old_password == model['password']:
model['password'] = generate_password_hash(form.password.data)
class OrganisationView(CustomModelView):
column_list = ['organisation_name', 'latitude', 'longitude', 'nace_1',
'nace_1_label', 'nace_2', 'nace_2_label', 'nace_3',
'nace_3_label', 'web_address']
column_sortable_list = ('organisation_name', 'latitude', 'longitude',
'nace_1', 'nace_1_label', 'nace_2',
'nace_2_label', 'nace_3', 'nace_3_label',
'web_address')
form = OrganisationForm
@admin_access
def _handle_view(self, name, **kwargs):
super(OrganisationView, self)._handle_view(name, **kwargs)
| 2.25
| 2
|
RCWDiscordBot.py
|
aditya369007/RCWprogressUpdate
| 2
|
12783211
|
import discord
import os
import json
import time
import shutil
# creating a dict to keep track of member details
MemberDict ={
# name:
# unique id
# number of reports
};
timeStr = time.strftime("%H%M%S")
MemberDict['people'] = [];
token = open("token.txt","r").read();
#start the bot client
client = discord.Client();
@client.event
async def on_ready(): # method expected by client. This runs once when connected
print(f'We have logged in as {client.user}') # notification of login.
#initiate guild once in this and then populate list for all the guild members that are not bots
# RCW_guild = client.get_guild(346399441475076097)
# for members in RCW_guild.members:
# print(members.name)
#assign unique member ids to a set variable name
@client.event
async def on_message(message): # event that happens per any message.
# each message has a bunch of attributes. Here are a few.
# check out more by print(dir(message)) for example.
print(f"{message.channel}: {message.author}: {message.content}")
RCW_guild = client.get_guild(346399441475076097)
if "!helpRCWbot" in message.content:
await message.channel.send("You have accessed the help menu");
elif "!UpdateMembers" == message.content:
for members in RCW_guild.members:
if members.bot == 0:
MemberDict['people'].append({'name:' : members.name, 'uniqueID:' : members.id, 'reportCount:' : '0'});
with open("data.txt", mode = "w") as outfile:
outfile.seek(0);
json.dump(MemberDict,outfile);
outfile.truncate();
outfile.close();
elif "!report" in message.content:
await message.channel.send("As no RCW member was mentioned ShadowKnight was reported");
elif "!m" == message.content:
# await message.channel.send(f"```{(RCW_guild.members[0].name)}```")
for members in RCW_guild.members:
if members.bot == 0:
print(f"{members.id} : {members.name}")
client.run(token) # recall my token was saved!
| 2.90625
| 3
|
modules/src/dictionary.py
|
rampreeth/JARVIS-on-Messenger
| 1,465
|
12783212
|
<reponame>rampreeth/JARVIS-on-Messenger
import os
import requests
import requests_cache
import config
from templates.text import TextTemplate
WORDS_API_KEY = os.environ.get('WORDS_API_KEY', config.WORDS_API_KEY)
def process(input, entities):
output = {}
try:
word = entities['word'][0]['value']
with requests_cache.enabled('dictionary_cache', backend='sqlite', expire_after=86400):
r = requests.get('https://wordsapiv1.p.mashape.com/words/' + word + '/definitions', headers={
'X-Mashape-Key': WORDS_API_KEY
})
data = r.json()
output['input'] = input
output['output'] = TextTemplate(
'Definition of ' + word + ':\n' + data['definitions'][0]['definition']).get_message()
output['success'] = True
except:
error_message = 'I couldn\'t find that definition.'
error_message += '\nPlease ask me something else, like:'
error_message += '\n - define comfort'
error_message += '\n - cloud definition'
error_message += '\n - what does an accolade mean?'
output['error_msg'] = TextTemplate(error_message).get_message()
output['success'] = False
return output
| 2.4375
| 2
|
CURSO PYTHON UDEMY/Curso Udemy/Mundo 4 (POO)/103. Atributos de Classe.py
|
nihilboy1455/CURSO-PYTHON-UDEMY
| 0
|
12783213
|
<reponame>nihilboy1455/CURSO-PYTHON-UDEMY
class A:
v = 123
a1 = A()
print(a1.v)
print(A.v)
'''
Eu posso mostrar uma variavel de classe tanto através de uma instância,
quanto da própria classe
'''
A.v = 5
print(A.v)
print(a1.v)
'''
Se eu alterar o valor da variavel de classe através da classe, nas próximas
vezes que ela for chamada, ela tera o novo valor
'''
a1.v = 74
print(a1.v)
print(A.v)
'''
Contudo, se eu alterar o valor da variavel através da instancia, ele só é alterado localmente
'''
| 4.28125
| 4
|
openvisualizer/simengine/timeline.py
|
ftheoleyre/openvisualizer
| 0
|
12783214
|
<filename>openvisualizer/simengine/timeline.py
#!/usr/bin/python
# Copyright (c) 2010-2013, Regents of the University of California.
# All rights reserved.
#
# Released under the BSD 3-Clause license as published at the link below.
# https://openwsn.atlassian.net/wiki/display/OW/License
import logging
import threading
class TimeLineStats(object):
def __init__(self):
self.numEvents = 0
def increment_events(self):
self.numEvents += 1
def get_num_events(self):
return self.numEvents
class TimeLineEvent(object):
def __init__(self, mote_id, at_time, cb, desc):
self.at_time = at_time
self.mote_id = mote_id
self.desc = desc
self.cb = cb
def __str__(self):
return '{0} {1}: {2}'.format(self.at_time, self.mote_id, self.desc)
class TimeLine(threading.Thread):
""" The timeline of the engine. """
def __init__(self):
# store params
from openvisualizer.simengine import simengine
self.engine = simengine.SimEngine()
# local variables
self.current_time = 0 # current time
self.timeline = [] # list of upcoming events
self.first_event_passed = False
self.first_event = threading.Lock()
self.first_event.acquire()
self.first_event_lock = threading.Lock()
self.stats = TimeLineStats()
# logging
self.log = logging.getLogger('Timeline')
self.log.setLevel(logging.DEBUG)
self.log.addHandler(logging.NullHandler())
# initialize parent class
super(TimeLine, self).__init__()
# set thread name
self.setName('TimeLine')
# thread daemon mode
self.setDaemon(True)
def run(self):
# log
if self.log.isEnabledFor(logging.DEBUG):
self.log.debug('starting')
# log
if self.log.isEnabledFor(logging.DEBUG):
self.log.debug('waiting for first event')
# wait for the first event to be scheduled
self.first_event.acquire()
self.engine.indicate_first_event_passed()
# log
if self.log.isEnabledFor(logging.DEBUG):
self.log.debug('first event scheduled')
# apply the delay
self.engine.pause_or_delay()
while True:
# detect the end of the simulation
if len(self.timeline) == 0:
output = ''
output += 'end of simulation reached\n'
output += ' - current_time=' + str(self.get_current_time()) + '\n'
self.log.warning(output)
raise StopIteration(output)
# pop the event at the head of the timeline
event = self.timeline.pop(0)
# make sure that this event is later in time than the previous
if not self.current_time <= event.at_time:
self.log.critical("Current time {} exceeds event time: {}".format(self.current_time, event))
assert False
# record the current time
self.current_time = event.at_time
# log
if self.log.isEnabledFor(logging.DEBUG):
self.log.debug('\n\nnow {0:.6f}, executing {1}@{2}'.format(event.at_time, event.desc, event.mote_id))
# if (event.at_time > 170):
# print('{0:.6f}: executing {1}@{2}'.format(event.at_time, event.desc, event.mote_id))
# call the event's callback
self.engine.get_mote_handler_by_id(event.mote_id).handle_event(event.cb)
# if (event.at_time > 170):
# print("end of the call")
# update statistics
self.stats.increment_events()
# apply the delay
self.engine.pause_or_delay()
# ======================== public ==========================================
def get_current_time(self):
return self.current_time
def schedule_event(self, at_time, mote_id, cb, desc):
"""
Add an event into the timeline
:param at_time: The time at which this event should be called.
:param mote_id: Mote identifier
:param cb: The function to call when this event happens.
:param desc: A unique description (a string) of this event.
"""
# log
if self.log.isEnabledFor(logging.DEBUG):
self.log.debug('scheduling {0}@{1} at {2:.6f}'.format(desc, mote_id, at_time))
# make sure that I'm scheduling an event in the future
try:
assert (self.current_time <= at_time)
except AssertionError:
self.engine.pause()
output = ""
output += "current_time: {}\n".format(str(self.current_time))
output += "at_time: {}\n".format(str(at_time))
output += "mote_id: {}\n".format(mote_id)
output += "desc: {}\n".format(str(desc))
self.log.critical(output)
raise
# create a new event
new_event = TimeLineEvent(mote_id, at_time, cb, desc)
# remove any event already in the queue with same description
for i in range(len(self.timeline)):
if (self.timeline[i].mote_id == mote_id and
self.timeline[i].desc == desc):
self.timeline.pop(i)
break
# look for where to put this event
i = 0
while i < len(self.timeline):
if new_event.at_time > self.timeline[i].at_time:
i += 1
else:
break
# insert the new event
self.timeline.insert(i, new_event)
# start the timeline, if applicable
with self.first_event_lock:
if not self.first_event_passed:
self.first_event_passed = True
self.first_event.release()
def cancel_event(self, mote_id, desc):
"""
Cancels all events identified by their description
:param mote_id: Mote identifier
:param desc: A unique description (a string) of this event.
:returns: The number of events canceled.
"""
# log
if self.log.isEnabledFor(logging.DEBUG):
self.log.debug('cancelEvent {0}@{1}'.format(desc, mote_id))
# initialize return variable
num_events_canceled = 0
# remove any event already the queue with same description
i = 0
while i < len(self.timeline):
if self.timeline[i].mote_id == mote_id and self.timeline[i].desc == desc:
self.timeline.pop(i)
num_events_canceled += 1
else:
i += 1
# return the number of events canceled
return num_events_canceled
def get_events(self):
return [[ev.at_time, ev.mote_id, ev.desc] for ev in self.timeline]
def get_stats(self):
return self.stats
# ======================== private =========================================
def _print_timeline(self):
output = ''
for event in self.timeline:
output += '\n' + str(event)
return output
# ======================== helpers =========================================
| 2.5
| 2
|
backend/Contraband/query/models.py
|
ShcherbaDev/WikiContrib
| 0
|
12783215
|
<reponame>ShcherbaDev/WikiContrib
from django.db import models
from contraband.settings import BASE_URL
from django.utils import timezone
from contraband.settings import DEBUG
class Query(models.Model):
"""
:Summary: Store details of Query.
"""
hash_code = models.CharField(unique=True, max_length=64)
file = models.BooleanField(default=False)
csv_file = models.FileField(upload_to='uploads/', null=True, blank=True)
created_on = models.DateTimeField(default=timezone.now)
@property
def csv_file_uri(self):
base_url = BASE_URL[:-1]
if not DEBUG:
base_url += "/src"
if self.file is not False and self.csv_file != "":
return base_url + self.csv_file.url
return ""
def __str__(self):
return self.hash_code
class QueryUser(models.Model):
"""
:Summary: Store username's of Users in a specific Query.
"""
query = models.ForeignKey(Query, on_delete=models.CASCADE)
fullname = models.CharField(max_length=100, default="")
gerrit_username = models.CharField(max_length=40, default="")
github_username = models.CharField(max_length=40, default="")
phabricator_username = models.CharField(max_length=40, default="")
def __str__(self):
return self.query.__str__() + "--" + self.fullname
class QueryFilter(models.Model):
"""
:Summary: Store filters of a Query.
"""
query = models.OneToOneField(Query, on_delete=models.CASCADE)
start_time = models.DateField(null=True, blank=True)
end_time = models.DateField(null=True, blank=True)
status = models.CharField(max_length=100, null=True, blank=True)
def __str__(self):
return self.query.__str__()
| 2.1875
| 2
|
clients/models.py
|
JaviManobanda/client_manager_CLI
| 0
|
12783216
|
import uuid
class ClientModel:
"""Client models
Args:
name ([type]): [description]
company ([type]): [description]
mail ([type]): [description]
position ([type]): [description]
uid ([type], optional): [description]. Defaults to None.
"""
def __init__(self, name, company, email, position, uid=None):
self.name = name
self.company = company
self.email = email
self.position = position
self.uid = uid or uuid.uuid4()
def to_dict(self):
return vars(self) # convierte nuestro objeto en diccionario
@staticmethod # crea un metodo estatico
def schema():
return ['name', 'company', 'email', 'position', 'uid']
| 3.0625
| 3
|
Maths/All Divisors of a number/Python/main.py
|
CoderMonProjects/DSA-GFG
| 1
|
12783217
|
#1
# num = x*y such that x < y
# there fore * x <=n
def printDivisors(n):
i = 1
res = []
while(i*i <=n):
if (n % i == 0):
if i:
res.append(i)
if int(n/i) != i:
if int(n/i):
res.append(int(n/i))
i += 1
return sorted(res)
print(printDivisors(21321522312))
| 3.671875
| 4
|
manipulator_learning/learning/data/img_depth_dataset.py
|
utiasSTARS/manipulator_learning
| 2
|
12783218
|
<reponame>utiasSTARS/manipulator_learning<gh_stars>1-10
import numpy as np
import os
from PIL import Image
from multiprocessing import Pool
from itertools import repeat
import shutil
class Dataset:
def __init__(self, data_dir, np_filename='data.npz', img_type='.png', imgs_per_folder=1000,
state_dim=None, act_dim=None, reward_dim=1):
# valid_indices are valid to sample if looking to get a random sample. this way,
# we can always just grab the the obs and action from the current index, and the obs from the
# next index to get a full transition, without having to store extra copies of next observations.
# observations at an "invalid index" are only to be used as "next observations", and the action
# at these indices should be considered garbage!!!
#
# if state_dim and act_dim are not None, just means that column header data is saved as well
self.total_ts = 0
self.total_ts_including_last_obs = 0
self.data_dir = data_dir
self.data_file = self.data_dir + '/' + np_filename
self.data = dict(
state_data=None, traj_lens=[], traj_lens_including_last_obs=[], valid_indices=[]
)
if state_dim is not None and act_dim is not None:
self.ind = dict(
s=0, a=state_dim, r=state_dim + act_dim, m=state_dim + act_dim + reward_dim,
d=state_dim + act_dim + reward_dim + 1)
self.data['column_headers'] = np.array(self.ind)
else:
self.ind = None
self.data['column_headers'] = np.array(None)
self.img_dir = data_dir + '/img'
self.depth_dir = data_dir + '/depth'
self.img_type = img_type
self.img_fol_str_len = 5 # up to 1e8 images if 1000 imgs per folder
self.img_str_len = len(str(imgs_per_folder - 1))
self.imgs_per_folder = imgs_per_folder
if os.path.exists(self.data_file):
file_dict = np.load(self.data_file, allow_pickle=True)
self.data = {key: file_dict[key] for key in file_dict.files}
if 'column_headers' not in self.data:
self.data['column_headers'] = np.array(None)
for k in self.data:
if k != 'column_headers':
self.data[k] = list(self.data[k])
self.data['state_data'] = np.array(self.data['state_data'])
self.data['valid_indices'] = list(self.data['valid_indices'])
self.ind = self.data['column_headers'].item()
self.total_ts = sum(self.data['traj_lens'])
self.total_ts_including_last_obs = sum(self.data['traj_lens_including_last_obs'])
print('TOTAL TS: ', self.total_ts)
self.ram_data = None
self.gpu_data = None # calling class can overwrite with gpu loaded data from torch or tf
self._loaded_ram_indices = np.array([])
def __len__(self):
return self.total_ts
def append_traj_data_lists(self, data_list, img_list, depth_list, final_obs_included=False,
new_indices=None):
# if appending single piece of new data, needs to still be a len 1 list
# if new_indices is not None, it is assumed to be a list-like set of integers
# starting from 0 (e.g. if there are a number of invalid indices in various places)
assert len(data_list) == len(img_list) == len(depth_list)
# append new data to internal file
if self.data['state_data'] is not None:
assert self.data['state_data'].shape[1:] == data_list[0].shape
self.data['state_data'] = np.concatenate([self.data['state_data'], np.array(data_list)])
else: # corresponds to first save
os.makedirs(self.data_dir)
os.makedirs(self.img_dir)
os.makedirs(self.depth_dir)
self.data['state_data'] = np.array(data_list)
if new_indices is not None:
number_new_data = len(new_indices)
else:
if final_obs_included:
number_new_data = len(data_list) - 1
else:
number_new_data = len(data_list)
self.data['traj_lens'].append(number_new_data)
self.data['traj_lens_including_last_obs'].append(len(data_list))
# save new imgs to disk
for i in range(len(img_list)):
im = Image.fromarray(img_list[i])
im.save(self.get_img_file(self.total_ts_including_last_obs + i, load=False))
np.save(self.get_depth_file(self.total_ts_including_last_obs + i, load=False), depth_list[i])
if new_indices is not None:
ds_new_indices = np.array(new_indices) + self.total_ts_including_last_obs
else:
ds_new_indices = range(self.total_ts_including_last_obs, self.total_ts_including_last_obs + number_new_data)
self.data['valid_indices'].extend(ds_new_indices)
self.total_ts += number_new_data
self.total_ts_including_last_obs += len(data_list)
# save new data to disk
swap_name = self.data_file.split('.npz')[0] + '_swp.npz' # attempt to prevent catastrophic data loss
np.savez_compressed(swap_name, **self.data)
shutil.copy(swap_name, self.data_file)
# np.savez_compressed(self.data_file, **self.data)
return list(ds_new_indices)
def remove_last_traj(self):
if len(self.data['traj_lens']) == 0:
print('No trajs to remove')
return
# update internal data file
last_i_to_keep = self.total_ts_including_last_obs - self.data['traj_lens_including_last_obs'][-1]
self.data['state_data'] = self.data['state_data'][:last_i_to_keep]
last_traj_len = self.data['traj_lens'][-1]
last_traj_len_including_last_obs = self.data['traj_lens_including_last_obs'][-1]
self.data['traj_lens'].pop()
self.data['traj_lens_including_last_obs'].pop()
self.data['valid_indices'] = self.data['valid_indices'][:-last_traj_len]
for i in range(last_i_to_keep, self.total_ts_including_last_obs):
os.remove(self.get_img_file(i))
os.remove(self.get_depth_file(i))
self.total_ts -= last_traj_len
self.total_ts_including_last_obs -= last_traj_len_including_last_obs
# overwrite data file on disk
swap_name = self.data_file.split('.npz')[0] + '_swp.npz' # attempt to prevent catastrophic data loss
np.savez_compressed(swap_name, **self.data)
shutil.copy(swap_name, self.data_file)
# np.savez_compressed(self.data_file, **self.data)
def remove_trajs(self, traj_indices, new_dataset_dir):
""" remove trajs from a set of indices -- indices should be a list (even if it's just one)
new_dataset_dir must be provided to avoid accidentally deleting old data. """
vi_sorted = np.sort(self.data['valid_indices'])
streaks = np.split(vi_sorted, np.where(np.diff(vi_sorted) != 1)[0] + 1)
for index in sorted(traj_indices, reverse=True):
del streaks[index]
new_valid_indices = np.concatenate(streaks)
print("Generating new dataset at %s with indices at %s removed" % (new_dataset_dir, traj_indices))
self.new_dataset_from_indices(new_dataset_dir, new_valid_indices)
def get_img_file(self, i, load=True):
# if load, then get filename for loading a file, otherwise assumed to be using for making a new file
if load:
assert i < self.total_ts_including_last_obs
folder, ind = self._get_img_folder_index(i)
fol_str = self.img_dir + '/' + str(folder).zfill(self.img_fol_str_len)
if not load:
os.makedirs(fol_str, exist_ok=True)
return fol_str + '/' + str(ind).zfill(self.img_str_len) + self.img_type
def get_depth_file(self, i, load=True):
# if load, then get filename for loading a file, otherwise assumed to be using for making a new file
if load:
assert i < self.total_ts_including_last_obs
folder, ind = self._get_img_folder_index(i)
fol_str = self.depth_dir + '/' + str(folder).zfill(self.img_fol_str_len)
if not load:
os.makedirs(fol_str, exist_ok=True)
return fol_str + '/' + str(ind).zfill(self.img_str_len) + '.npy'
def _get_img_folder_index(self, i):
return i // self.imgs_per_folder, i % self.imgs_per_folder
def load_to_ram_worker(self, index, normalize_img=False, add_depth_dim=False):
""" load a single index """
img = np.array(Image.open(self.get_img_file(index)))
if normalize_img:
img = (img * .003921569).astype('float32') # 1 / 255
depth = np.load(self.get_depth_file(index))
if add_depth_dim:
depth = np.expand_dims(depth, axis=-1).astype('float32')
return img, depth
def load_to_ram_multiind_worker(self, index_range, normalize_img=False, add_depth_dim=False):
""" load multiple indices """
img_depths = []
for i in index_range:
img, depth = self.load_to_ram_worker(i, normalize_img, add_depth_dim)
img_depths.append([img, depth])
return img_depths
def load_to_ram(self, num_workers=4, normalize_img=False, add_depth_dim=False,
load_unloaded=True, selected_valid_indices=None):
""" Load dataset to ram for faster training. Normalizing the image here can save
training time later at the extra expense of storing floats instead of uint8s in memory.
If load_unloaded, load any indices that are not yet loaded. NOTE: this assumes
that the data was loaded contiguously from 0 to a prev value of total_ts_including_last_obs.
Any other way of loading the ram will break this.
selected_valid_indices should be a set of valid indices to load, and the function will only load
indices not previously loaded (and will also ensure the final obs of each traj are loaded as well)."""
if self.ram_data is None or load_unloaded:
if selected_valid_indices is not None:
# TODO create a mapping between the called indices at the actual used indices
# i.e. so all of the wanted indices are loaded contiguously from 0 to len(selected_valid_indices)
# and then create a dict that contains maps from selected indices to actual indices
sorted = np.sort(selected_valid_indices)
streaks = np.split(sorted, np.where(np.diff(sorted) != 1)[0]+1)
with_last = [np.append(e, e[-1]+1) for e in streaks]
wanted_indices = np.hstack(with_last)
indices_to_load = np.setdiff1d(wanted_indices, self._loaded_ram_indices)
else:
if load_unloaded and self.ram_data is not None:
indices_to_load = range(len(self.ram_data['img']), self.total_ts_including_last_obs)
else:
indices_to_load = range(self.total_ts_including_last_obs)
if len(indices_to_load) < 5000:
data = self.load_to_ram_multiind_worker(indices_to_load, normalize_img, add_depth_dim)
else:
with Pool(processes=num_workers) as pool:
# data = pool.map(ImgReplayBufferDisk.load_to_ram_worker, range(self.dataset.total_ts_including_last_obs))
# data = pool.map(self.load_to_ram_worker, range(self.total_ts_including_last_obs))
data = pool.starmap(self.load_to_ram_worker, zip(indices_to_load,
repeat(normalize_img), repeat(add_depth_dim)))
if load_unloaded and self.ram_data is not None:
if len(data) > 0:
self.ram_data['img'] = np.concatenate((self.ram_data['img'], np.array([item[0] for item in data])))
self.ram_data['depth'] = np.concatenate((self.ram_data['depth'], np.array([item[1] for item in data])))
self.ram_data['state'] = self.data['state_data'].astype('float32')
else:
self.ram_data = dict(
img=np.array([item[0] for item in data]),
depth=np.array([item[1] for item in data]),
state=self.data['state_data'].astype('float32'))
print('RAM used by dataset: %.1f MB' % ((self.ram_data['img'].nbytes + self.ram_data['depth'].nbytes +
self.ram_data['state'].nbytes) / 1e6))
else:
print('RAM data already loaded. Flush ram data before calling load to ram again, or use load_unloaded.')
def flush_ram(self):
self.ram_data = None
self.gpu_data = None
self._loaded_ram_indices = np.array([])
def new_dataset_from_indices(self, new_dir, indices):
""" Generate a new dataset obj from a given set of indices"""
if self.ind is not None:
state_dim = self.ind['a']
act_dim = self.ind['r'] - self.ind['a']
else:
state_dim = None
act_dim = None
new_dataset = Dataset(new_dir, img_type=self.img_type, imgs_per_folder=self.imgs_per_folder,
state_dim=state_dim, act_dim=act_dim)
i_sorted = np.sort(indices)
streaks = np.split(i_sorted, np.where(np.diff(i_sorted) != 1)[0] + 1)
with_last = [np.append(e, e[-1] + 1) for e in streaks]
for i_s, s in enumerate(with_last):
data_list = list(self.data['state_data'][s])
img_list = []; depth_list = []
for i in s:
img_list.append(np.array(Image.open(self.get_img_file(i))))
depth_list.append(np.load(self.get_depth_file(i)))
new_dataset.append_traj_data_lists(data_list, img_list, depth_list, final_obs_included=True)
print('Copying traj %d of %d to new dataset at %s' % (i_s + 1, len(with_last), new_dir))
return new_dataset
def get_data(self, inds, normalize_img=True, add_depth_dim=True):
""" Get some data given some indices """
if self.ram_data is None:
img_depths = self.load_to_ram_multiind_worker(inds, normalize_img, add_depth_dim)
return (
np.array([item[0] for item in img_depths]),
np.array([item[1] for item in img_depths]),
self.data['state_data'].astype('float32')[np.array(inds)])
else:
return (
self.ram_data['img'][inds],
self.ram_data['depth'][inds],
self.ram_data['state'][inds])
def get_traj_indices_as_list(self, traj_inds=None):
""" Get data indices correponding to continuous trajectories as a list of arrays. If traj_inds is
None, return all indices. Otherwise, return the indices corresponding to the trajectory indices
(e.g. if you only want the i-th trajectory)"""
inds_list = []
if traj_inds is None:
traj_inds = range(len(self.data['traj_lens']))
for i in traj_inds:
start_i = sum(self.data['traj_lens'][:i])
start_i_valid = self.data['valid_indices'][start_i]
final_i = start_i_valid + self.data['traj_lens'][i]
inds_list.append(list(range(start_i_valid, final_i)))
return inds_list
| 2.625
| 3
|
domain_to_prefix.py
|
ljm625/cisco_sdwan_policy_python
| 11
|
12783219
|
import argparse
from pprint import pprint
try:
import yaml
import sublist3r
import dns.resolver
except Exception as e:
print("Error loading libraries, please run following commands first:")
print("pip install pyyaml dnspython")
print("git clone https://github.com/aboul3la/Sublist3r")
print("cd Sublist3r")
print("python setup.py install")
exit(1)
from cisco_sdwan_policy.List.DataPrefix import DataPrefix
from cisco_sdwan_policy.List.Prefix import Prefix
from cisco_sdwan_policy import PolicyLoader
def config_reader(config_file):
'''
Read config from yaml file
:return: config in dict format
'''
with open(config_file) as file:
config = yaml.load(file.read())
# print(result)
return config
def parse_domain(domain,nameserver):
domain_list=[]
ip_list=set()
if "*" in domain and domain[0:2]!="*.":
raise Exception("Invalid domain: {}".format(domain))
elif "*" in domain:
sub_domains = sublist3r.main(domain[2:], 40, None, ports=None, silent=False, verbose=False,
enable_bruteforce=False, engines=None)
print(sub_domains)
domain_list.extend(sub_domains)
else:
domain_list.append(domain)
# Use DNSPYTHON to get info.
resolver = dns.resolver.Resolver()
resolver.lifetime = resolver.timeout = 20.0
for domain_name in domain_list:
print("Resolving: {}".format(domain_name))
try:
resolver.nameservers=[nameserver]
response =resolver.query(domain_name)
for answer in response.response.answer:
for ip in answer.items:
if ip.rdtype == 1:
ip_list.add(ip.address+"/32")
except:
pass
# try:
# response = dns.resolver.query(domain_name, "CNAME")
# for answer in response.response.answer:
# for ip in answer.items:
# if ip.rdtype == 1:
# ip_list.add(ip.address+"/32")
# except:
# pass
return ip_list
if __name__ == '__main__':
# First read all the configurations from config file.
parser = argparse.ArgumentParser(description='App List Genenrator.')
parser.add_argument('config', metavar='config_file_path', type=str,
help='config yaml path')
args = parser.parse_args()
config_file=args.config
try:
config = config_reader(config_file)
print("Config file {} loaded".format(args.config))
app_ip_info ={}
assert type(config["sdwan_server"])==dict
assert type(config["apps"])==dict
assert type(config["dns_server"])==str
except Exception as e:
print("ERROR : Invalid config file.")
print(e)
exit(1)
for appname,domain_list in config["apps"].items():
app_ips=set()
for domain in domain_list:
ip_list = parse_domain(domain,config["dns_server"])
app_ips = app_ips | ip_list
app_ip_info[appname]=list(app_ips)
pprint(app_ip_info)
print("Start creating Prefix Lists")
pl = PolicyLoader.init(config["sdwan_server"])
pl.load()
existing_list=[i.name for i in pl.list_policies]
for appname,ip_list in app_ip_info.items():
if "{}_prefix".format(appname) not in existing_list:
Prefix("{}_prefix".format(appname),prefix_list=ip_list).save()
print("Created Prefix List: {}_prefix".format(appname))
else:
for i in pl.list_policies:
if i.name=="{}_prefix".format(appname):
i.set_entries(ip_list)
i.save()
print("Updated Prefix List: {}".format(i.name))
if "{}_dataprefix".format(appname) not in existing_list:
DataPrefix("{}_dataprefix".format(appname),prefix_list=ip_list).save()
print("Created Data Prefix List: {}_dataprefix".format(appname))
else:
for i in pl.list_policies:
if i.name=="{}_dataprefix".format(appname):
i.set_entries(ip_list)
i.save()
print("Updated Data Prefix List: {}".format(i.name))
| 2.609375
| 3
|
archive/get_team_names.py
|
jletienne/yff
| 0
|
12783220
|
<reponame>jletienne/yff
def get_team_names(league_id='390.l.XXXXXX'):
teams = {}
num_teams = get_num_teams(league_id)
for i in range(1, num_teams+1):
url = 'https://fantasysports.yahooapis.com/fantasy/v2/team/{0}.t.{1}'.format('390.l.227235',str(i))
response = oauth.session.get(url, params={'format': 'json'})
r = response.json()
team = r['fantasy_content']['team'][0]
team_key = team[0]['team_key']
team_id = team[1]['team_id']
team_name = team[2]['name']
teams[str(team_id)] = {'team_name': team_name, 'team_key': team_key}
return teams
| 3
| 3
|
src/tf_polygon/minimal_distance.py
|
dwferrer/tf-poly
| 0
|
12783221
|
import tensorflow as tf
from tf_polygon.primitives import get_edges, point_in_polygon, point_line_segment_distance
def minimal_distance(poly_a, poly_b):
x_a = tf.convert_to_tensor(poly_a)
x_b = tf.convert_to_tensor(poly_b)
e_a = get_edges(poly_a)
e_b = get_edges(poly_b)
a_in_b = point_in_polygon(e_b[..., None, :, :, :], x_a)
a_in_b = tf.reduce_any(a_in_b, axis=-1)
b_in_a = point_in_polygon(e_a[..., None, :, :, :], x_b)
b_in_a = tf.reduce_any(b_in_a, axis=-1)
intersection = tf.logical_or(a_in_b, b_in_a)
# the minimal distance must occur between an edge of a and vertex of b or visa-versa
d_a_b = point_line_segment_distance(e_a[..., :, None, :, :], x_b[..., None, :])
d_a_b = tf.reduce_min(d_a_b, axis=[-2, -1])
d_b_a = point_line_segment_distance(e_b[..., :, None, :, :], x_a[..., None, :])
d_b_a = tf.reduce_min(d_b_a, axis=[-2, -1])
d = tf.minimum(d_a_b, d_b_a)
d = tf.where(intersection, tf.constant(0., d.dtype), d)
return d
| 2.828125
| 3
|
rtm.py
|
Ultronixon/rtmpy
| 11
|
12783222
|
# -*- coding: utf-8 -*-
"""
@author: <NAME>
"""
from __future__ import division
import finite_difference as fd
import numpy as np
def rtm1d(v,seis,dt,dz):
nt = len(seis)
nx = len(v)
a = fd.alpha(v,dt,dz)
ul, u, up = np.zeros((3,nx))
data = np.zeros((nt,nx))
g = np.zeros(u.shape)
g[0] = 1
ul += g*seis[nt-1]
u += g*seis[nt-2]
for i in xrange(nt-3,-1,-1):
src = g*seis[i]
ul[0]=2*u[0]-up[0]+a[0]**2*(u[1]-2*u[0]) + src[0]
ul[1:nx-1]=2*u[1:nx-1]-up[1:nx-1]+a[1:nx-1]**2*(u[2:nx]-2*u[1:nx-1]+ \
u[0:nx-2]) + src[1:nx-1]
ul = fd.abc1D(u, ul, a, src)
up = np.copy(u)
u = np.copy(ul)
data[i] = np.copy(u)
return data
def rtm2D(v,shotgt,dt,dx,dz):
# rtm 2D with different algorithm
nz,nx = v.shape
nt = shotgt[:,0].size
ul, u, up = np.zeros((3,nz,nx))
up[0,:], u[0,:], ul[0,:] = shotgt[nt-3:nt,:]
a = fd.alpha(v,dt,dx)**2
a2 = 2-4*a
data = np.zeros((nt, nz, nx))
e = (np.exp(-((0.015*(20-np.arange(1,21)))**2) ))**10
c = 2
for i in xrange(nt-2,-1,-1):
c+=1
b = min(c,nz)
for iz in xrange(b):
ul[iz,0:20] = e*ul[iz,0:20]
u[iz,0:20] = e*u[iz,0:20]
ul[iz,nx-20:] = e[::-1]*ul[iz,nx-20:]
u[iz,nx-20:] = e[::-1]*u[iz,nx-20:]
if b >= (nz-20):
for iz in xrange(nz-20,nz):
ul[iz] = e[nz-iz-1]*ul[iz]
u[iz] = e[nz-iz-1]*u[iz]
if b == nz:
d = nz-2
else:
d = b
up[0:b,1:nx-1] = up[0:b,1:nx-1]-ul[0:b,1:nx-1]
u[1:d,1:nx-1] = a2[1:d,1:nx-1]*ul[1:d,1:nx-1]+u[1:d,1:nx-1]+a[1:d,2:nx]*ul[1:d,2:nx]\
+a[1:d,0:nx-2]*ul[1:d,0:nx-2]+a[2:d+1,1:nx-1]*ul[2:d+1,1:nx-1]+\
+a[0:d-1,1:nx-1]*ul[0:d-1,1:nx-1]
u[0,1:nx-1] = a2[0,1:nx-1]*ul[0,1:nx-1]+u[0,1:nx-1]+a[0,2:nx]*ul[0,2:nx]\
+a[0,0:nx-2]*ul[0,0:nx-2]+a[1,1:nx-1]*ul[1,1:nx-1]
if b == nz:
u[nz-1,1:nx-1] = a2[nz-1,1:nx-1]*ul[nz-1,1:nx-1]+u[nz-1,1:nx-1]\
+a[nz-1,2:nx]*ul[nz-1,2:nx]+a[nz-1,0:nx-2]*ul[nz-1,0:nx-2]\
+a[nz-2,1:nx-1]*ul[nz-1,1:nx-1]
u[nz-1,0] = a2[nz-1,0]*ul[nz-1,0]+u[nz-1,0]+a[nz-1,1]*ul[nz-1,1]\
+a[nz-2,0]*ul[nz-2,0]
u[1:d,0] = a2[1:d,0]*ul[1:d,0]+u[1:d,0]+a[1:d,1]*ul[1:d,1]+a[2:d+1,0]\
*ul[2:d+1,0]+a[0:d-1,0]*ul[0:d-1,0]
u[1:d,nx-1] = a2[1:d,nx-1]*ul[1:d,nx-1]+u[1:d,nx-1]+a[1:d,nx-2]*ul[1:d,nx-2]\
+a[2:d+1,nx-1]*ul[2:d+1,nx-1]+a[0:d-1,nx-1]*ul[0:d-1,nx-1]
u[0,0] = a2[0,0]*ul[0,0]+u[0,0]+a[0,1]*ul[0,1]+a[1,0]*ul[1,0]
u[0,nx-1] = a2[0,nx-1]*ul[0,nx-1]+u[0,nx-1]+a[0,nx-1]*ul[0,nx-1]+a[1,nx-1]*ul[1,nx-1]
ul = np.copy(u)
u = np.copy(up)
if i > 1:
up[1:nz-1] = 0;
up[0] = shotgt[i-3,:]
data[i] = ul
return data
| 2.25
| 2
|
vehiculo/migrations/0001_initial.py
|
gusanare1/site1
| 0
|
12783223
|
<gh_stars>0
# Generated by Django 2.0.5 on 2018-05-18 17:33
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Carro',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('anio', models.IntegerField(default=2000)),
('precio', models.FloatField(default=0.0)),
('esta_inspeccionado', models.BooleanField(default=False)),
],
),
migrations.CreateModel(
name='Ciudad',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nombre', models.CharField(max_length=30)),
],
),
migrations.CreateModel(
name='Color',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nombre', models.CharField(max_length=30)),
],
),
migrations.CreateModel(
name='Marca',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nombre', models.CharField(max_length=30)),
],
),
migrations.CreateModel(
name='Modelo',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nombre', models.CharField(max_length=30)),
('idMarca', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='vehiculo.Marca')),
],
),
migrations.CreateModel(
name='Persona',
fields=[
('cedula', models.CharField(max_length=10, primary_key=True, serialize=False)),
('nombre', models.CharField(max_length=30)),
('apellido', models.CharField(max_length=30)),
('correo', models.EmailField(blank=True, max_length=70, unique=True)),
],
),
migrations.CreateModel(
name='Provincia',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nombre', models.CharField(max_length=30)),
],
),
migrations.AddField(
model_name='ciudad',
name='idProvincia',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='vehiculo.Provincia'),
),
migrations.AddField(
model_name='carro',
name='idColor',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='vehiculo.Color'),
),
migrations.AddField(
model_name='carro',
name='idModelo',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='vehiculo.Modelo'),
),
]
| 1.859375
| 2
|
code archieve/temperature_preprocessing_extract_phase_amplitude.py
|
yuanyuansjtu/Angstrom-method
| 2
|
12783224
|
from scipy.io import loadmat
import tables
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import os, os.path
import time
import scipy.signal
from scipy import signal
from lmfit import minimize, Parameters
import scipy.optimize as optimization
import operator
class temperature_preprocessing_extract_phase_amplitude():
def __init__(self,exp_setup,line_info,time_stamp):
self.exp_setup = exp_setup
# exp_setup = {'px':25/10**6,'f_heating':1,'gap':20}
self.line_info = line_info
# line_info = {'N_line_groups':N_line_groups,'N_horizontal_lines':N_horizontal_lines,'N_files':N_files}
self.time_stamp = time_stamp
def butter_highpass(self,cutoff, fs, order=5):
nyq = 0.5 * fs
normal_cutoff = cutoff / nyq
b, a = signal.butter(order, normal_cutoff, btype='high', analog=False)
return b, a
def butter_highpass_filter(self,data, cutoff, fs, order=4):
b, a = self.butter_highpass(cutoff, fs, order=order)
y = signal.filtfilt(b, a, data)
return y
def filter_signal(self,df_rec,f0):
cutoff = f0*0.5
fs = (df_rec.shape[0])/(max(df_rec['reltime'])-min(df_rec['reltime']))
# Plot the frequency response for a few different orders.
time = df_rec['reltime']
N = df_rec.shape[1]-1
df_filtered = pd.DataFrame(data = {'reltime':np.array(df_rec['reltime'])})
for i in range(N):
temp = (self.butter_highpass_filter(df_rec[i],cutoff,fs))
df_filtered[i] = np.array(temp)
return df_filtered
def sin_func(self,x,amplitude,phase,bias,f_heating):
return amplitude*np.sin(2*np.pi*f_heating*x + phase)+bias
def residual(self,params, x, data, eps_data):
amplitude = params['amplitude']
phase = params['phase']
bias = params['bias']
freq = params['frequency']
model = amplitude*np.sin(2*np.pi*freq*x + phase)+bias
return (data-model) / eps_data
def extract_phase_amplitude_sinusoidal_function(self,index,df_temperature):
px = self.exp_setup['px']
f_heating = self.exp_setup['f_heating']
gap = self.exp_setup['gap']
fitting_params_initial = {'amplitude':0.2,'phase':0.1,'bias':0.1}
n_col = df_temperature.shape[1]
tmin = df_temperature['reltime'][0]
time = df_temperature['reltime']-tmin
# A1 = df_temperature.iloc[:,index[0]+3]
# A2 = df_temperature.iloc[:,index[1]+3]
A1 = df_temperature[index[0]]
A2 = df_temperature[index[1]]
A1-= A1.mean()
A2-= A2.mean()
x0 = np.array([1,0,0]) # amplitude,phase,bias
sigma = np.ones(len(time))
params1 = Parameters()
params1.add('amplitude', value=fitting_params_initial['amplitude'])
params1.add('phase', value=fitting_params_initial['phase'])
params1.add('bias', value=fitting_params_initial['bias'])
params1.add('frequency', value=f_heating,vary=False)
res1 = minimize(self.residual, params1, args=(time, A1, sigma))
params2 = Parameters()
params2.add('amplitude', value=fitting_params_initial['amplitude'])
params2.add('phase', value=fitting_params_initial['phase'])
params2.add('bias', value=fitting_params_initial['bias'])
params2.add('frequency', value=f_heating,vary=False)
res2 = minimize(self.residual, params2, args=(time, A2, sigma))
amp1 = np.abs(res1.params['amplitude'].value)
amp2 = np.abs(res2.params['amplitude'].value)
p1 = res1.params['phase'].value
p2 = res2.params['phase'].value
amp_ratio = min(np.abs(amp1/amp2),np.abs(amp2/amp1))
phase_diff = np.abs(p1-p2)
if phase_diff>2*np.pi:
phase_diff = phase_diff - 2*np.pi
if phase_diff>np.pi/2:
phase_diff = np.pi - phase_diff
T_total = np.max(time)-np.min(time)
df = 1/T_total
L = abs(index[0]-index[1])*px*gap
w = 2*np.pi*f_heating
return L, phase_diff,amp_ratio
def extract_phase_amplitude_Fourier_transform(self,index,df_temperature):
px = self.exp_setup['px']
f_heating = self.exp_setup['f_heating']
gap = self.exp_setup['gap']
n_col = df_temperature.shape[1]
tmin = df_temperature['reltime'][0]
time = df_temperature['reltime']-tmin
fft_X1 = np.fft.fft(df_temperature.iloc[:,index[0]+3])
fft_X2 = np.fft.fft(df_temperature.iloc[:,index[1]+3])
T_total = np.max(time)-np.min(time)
df = 1/T_total
N_0 = int(f_heating/df)
magnitude_X1 = np.abs(fft_X1)
magnitude_X2 = np.abs(fft_X2)
phase_X1 = np.angle(fft_X1)
phase_X2 = np.angle(fft_X2)
N1, Amp1 = max(enumerate(magnitude_X1[N_0-5:N_0+5]), key=operator.itemgetter(1))
N2, Amp2 = max(enumerate(magnitude_X2[N_0-5:N_0+5]), key=operator.itemgetter(1))
Nf = N_0+N1-5
amp_ratio = magnitude_X1[Nf]/magnitude_X2[Nf]
phase_diff = phase_X1[Nf]-phase_X2[Nf]
if phase_diff<0:
phase_diff = phase_diff+np.pi*2
L = abs(index[0]-index[1])*px*gap
return L, phase_diff,amp_ratio
def fit_amp_phase_one_batch(self,df_temperature,method):
px = self.exp_setup['px']
f_heating = self.exp_setup['f_heating']
gap = self.exp_setup['gap']
N_lines = df_temperature.shape[1]-1
x_list = np.zeros(N_lines-1)
phase_diff_list = np.zeros(N_lines-1)
amp_ratio_list = np.zeros(N_lines-1)
for i in range(N_lines):
if i>0:
index = [0,i]
if method == 'fft':
x_list[i-1],phase_diff_list[i-1], amp_ratio_list[i-1] = self.extract_phase_amplitude_Fourier_transform(index,df_temperature)
else:
x_list[i-1],phase_diff_list[i-1], amp_ratio_list[i-1] = self.extract_phase_amplitude_sinusoidal_function(index,df_temperature)
return x_list,phase_diff_list,amp_ratio_list
def extract_temperature_from_IR(self,X0,Y0,rec_name,N_avg):
# this function takes the average of N pixels in Y0 direction, typically N = 100
gap = self.exp_setup['gap']
N_line_groups = self.line_info['N_line_groups']
N_horizontal_lines = self.line_info['N_horizontal_lines']
N_files = self.line_info['N_files']
T = np.zeros((N_line_groups,N_horizontal_lines,N_files))
for k in range(N_files):
temp = pd.read_csv(self.line_info['data_path']+rec_name+str(k)+'.csv')
for j in range(N_line_groups):
for i in range(N_horizontal_lines):
T[j,i,k] = temp.iloc[Y0-int(N_avg/2):Y0+int(N_avg/2),X0-j-gap*i].mean() # for T, first dim is line group, 2nd dimension is # of lines, 3rd dim is number of files
return T
def batch_process_horizontal_lines(self,T,method):
#T averaged temperature for N_lines and N_line_groups and N_frames
x_list_all = []
phase_diff_list_all = []
amp_ratio_list_all = []
N_horizontal_lines = self.line_info['N_horizontal_lines']
N_line_groups = self.line_info['N_line_groups']
px = self.exp_setup['px']
f_heating = self.exp_setup['f_heating']
gap = self.exp_setup['gap']
time_stamp = self.time_stamp
for j in range(N_line_groups):
horinzontal_temp = T[j,:,:].T
df = pd.DataFrame(horinzontal_temp)
df['reltime'] = time_stamp['reltime']
df_filtered = self.filter_signal(df,f_heating)
x_list,phase_diff_list,amp_ratio_list = self.fit_amp_phase_one_batch(df_filtered,method)
x_list_all = x_list_all+list(x_list)
phase_diff_list_all = phase_diff_list_all+list(phase_diff_list)
amp_ratio_list_all = amp_ratio_list_all+list(amp_ratio_list)
df_result_IR = pd.DataFrame(data = {'x':x_list_all,'amp_ratio':amp_ratio_list_all,'phase_diff':phase_diff_list_all})
return df_result_IR
| 2.53125
| 3
|
python/ReportParse.py
|
jay4842/sim-engine
| 0
|
12783225
|
import argparse
import sys
import os
import glob
import simParse
import entityParse
parser = argparse.ArgumentParser(description='Process a report')
#parser.add_argument('--input_file', dest='input_file', default='/logs/EntityLog/', help='input file path')
#parser.add_argument('--report_type', dest='report_type', default='entity_report', help='input file type')
parser.add_argument('--input_file', dest='input_file', default='/logs/simReports/sim_report.txt', help='input file path')
parser.add_argument('--report_type', dest='report_type', default='sim_report', help='input file type')
args = parser.parse_args()
def parse_file(input_path, type_of_report, database='sim_test'):
print("parsing: " + input_path)
print("Type : " + type_of_report)
lines = []
''' a sim_report is a detailed report of population information for each turn '''
if(type_of_report is 'sim_report' and '.txt' in input_path):
print('creating sim report parsed file')
with open(input_path, 'r') as file:
for line in file:
lines.append(line)
#
file.close()
simParse.parse_sim_report(lines, database)
elif(type_of_report is 'entity_report' and input_path[-1] is '/'):
files = glob.glob('{}*.txt'.format(input_path))
files.sort()
print('found {} files'.format(len(files)))
for filename in files:
lines = []
with open(filename, 'r') as file:
for line in file:
lines.append(line)
#
file.close()
entityParse.parse_entity_report(lines, filename)
'''
MAIN
'''
if __name__ == "__main__":
path = sys.path[0]
path = path.split('/')
path = "/".join(path[0:len(path)-1])
#parse_file(path + args.input_file, args.report_type)
parse_file(path + '/logs/simReports/sim_report.txt', 'sim_report')
parse_file(path + '/logs/EntityLog/', 'entity_report')
#string = '{3340'
#print(string.split('{')[1])
| 3.296875
| 3
|
Model/CNN-handsign/train.py
|
LihaoWang1991/hand-sign-classification-on-Azure
| 0
|
12783226
|
<filename>Model/CNN-handsign/train.py<gh_stars>0
import math
import numpy as np
import h5py
import scipy
from scipy import ndimage
import matplotlib
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow.python.framework import ops
from cnn_utils import *
import argparse
import os
from azureml.core import Run
import pickle
np.random.seed(1)
# CNN model functions
def create_placeholders(n_H0, n_W0, n_C0, n_y):
"""
Creates the placeholders for the tensorflow session.
Arguments:
n_H0 -- scalar, height of an input image
n_W0 -- scalar, width of an input image
n_C0 -- scalar, number of channels of the input
n_y -- scalar, number of classes
Returns:
X -- placeholder for the data input, of shape [None, n_H0, n_W0, n_C0] and dtype "float"
Y -- placeholder for the input labels, of shape [None, n_y] and dtype "float"
"""
X = tf.placeholder("float", [None, n_H0, n_W0, n_C0])
Y = tf.placeholder("float", [None, n_y])
return X, Y
def initialize_parameters():
"""
Initializes weight parameters to build a neural network with tensorflow. The shapes are:
W1 : [4, 4, 3, 8]
W2 : [2, 2, 8, 16]
Returns:
parameters -- a dictionary of tensors containing W1, W2
"""
tf.set_random_seed(1)
W1 = tf.get_variable("W1", [4, 4, 3, 8], initializer = tf.contrib.layers.xavier_initializer(seed = 0))
W2 = tf.get_variable("W2", [2, 2, 8, 16], initializer = tf.contrib.layers.xavier_initializer(seed = 0))
parameters = {"W1": W1,
"W2": W2}
return parameters
def forward_propagation(X, parameters):
"""
Implements the forward propagation for the model:
CONV2D -> RELU -> MAXPOOL -> CONV2D -> RELU -> MAXPOOL -> FLATTEN -> FULLYCONNECTED
Arguments:
X -- input dataset placeholder, of shape (input size, number of examples)
parameters -- python dictionary containing your parameters "W1", "W2"
the shapes are given in initialize_parameters
Returns:
Z3 -- the output of the last LINEAR unit
"""
# Retrieve the parameters from the dictionary "parameters"
W1 = parameters['W1']
W2 = parameters['W2']
# CONV2D: stride of 1, padding 'SAME'
Z1 = tf.nn.conv2d(X, W1, strides = [1,1,1,1], padding = 'SAME')
# RELU
A1 = tf.nn.relu(Z1)
# MAXPOOL: window 8x8, sride 8, padding 'SAME'
P1 = tf.nn.max_pool(A1, ksize = [1,8,8,1], strides = [1,8,8,1], padding = 'SAME')
# CONV2D: filters W2, stride 1, padding 'SAME'
Z2 = tf.nn.conv2d(P1, W2, strides = [1,1,1,1], padding = 'SAME')
# RELU
A2 = tf.nn.relu(Z2)
# MAXPOOL: window 4x4, stride 4, padding 'SAME'
P2 = tf.nn.max_pool(A2, ksize = [1,4,4,1], strides = [1,4,4,1], padding = 'SAME')
# FLATTEN
P2 = tf.contrib.layers.flatten(P2)
# FULLY-CONNECTED without non-linear activation function (not not call softmax).
# 6 neurons in output layer. Hint: one of the arguments should be "activation_fn=None"
Z3 = tf.contrib.layers.fully_connected(P2, 6, activation_fn = None)
return Z3
def compute_cost(Z3, Y):
"""
Computes the cost
Arguments:
Z3 -- output of forward propagation (output of the last LINEAR unit), of shape (6, number of examples)
Y -- "true" labels vector placeholder, same shape as Z3
Returns:
cost - Tensor of the cost function
"""
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits = Z3, labels = Y))
return cost
def model(X_train, Y_train, X_test, Y_test, learning_rate = 0.009,
num_epochs = 300, minibatch_size = 64, print_cost = True):
"""
Implements a three-layer ConvNet in Tensorflow:
CONV2D -> RELU -> MAXPOOL -> CONV2D -> RELU -> MAXPOOL -> FLATTEN -> FULLYCONNECTED
Arguments:
X_train -- training set, of shape (None, 64, 64, 3)
Y_train -- test set, of shape (None, n_y = 6)
X_test -- training set, of shape (None, 64, 64, 3)
Y_test -- test set, of shape (None, n_y = 6)
learning_rate -- learning rate of the optimization
num_epochs -- number of epochs of the optimization loop
minibatch_size -- size of a minibatch
print_cost -- True to print the cost every 100 epochs
Returns:
train_accuracy -- real number, accuracy on the train set (X_train)
test_accuracy -- real number, testing accuracy on the test set (X_test)
parameters -- parameters learnt by the model. They can then be used to predict.
"""
ops.reset_default_graph() # to be able to rerun the model without overwriting tf variables
tf.set_random_seed(1) # to keep results consistent (tensorflow seed)
seed = 3 # to keep results consistent (numpy seed)
(m, n_H0, n_W0, n_C0) = X_train.shape
n_y = Y_train.shape[1]
costs = [] # To keep track of the cost
# Create Placeholders of the correct shape
X, Y = create_placeholders(n_H0, n_W0, n_C0, n_y)
# Initialize parameters
parameters = initialize_parameters()
# Forward propagation: Build the forward propagation in the tensorflow graph
Z3 = forward_propagation(X, parameters)
# Cost function: Add cost function to tensorflow graph
cost = compute_cost(Z3, Y)
# Backpropagation: Define the tensorflow optimizer. Use an AdamOptimizer that minimizes the cost.
optimizer = tf.train.AdamOptimizer(learning_rate = learning_rate).minimize(cost)
# Initialize all the variables globally
init = tf.global_variables_initializer()
# Start the session to compute the tensorflow graph
with tf.Session() as sess:
# Run the initialization
sess.run(init)
# Do the training loop
for epoch in range(num_epochs):
minibatch_cost = 0.
num_minibatches = int(m / minibatch_size) # number of minibatches of size minibatch_size in the train set
seed = seed + 1
minibatches = random_mini_batches(X_train, Y_train, minibatch_size, seed)
for minibatch in minibatches:
# Select a minibatch
(minibatch_X, minibatch_Y) = minibatch
# IMPORTANT: The line that runs the graph on a minibatch.
# Run the session to execute the optimizer and the cost, the feedict should contain a minibatch for (X,Y).
_ , temp_cost = sess.run([optimizer, cost], feed_dict={X: minibatch_X, Y: minibatch_Y})
minibatch_cost += temp_cost / num_minibatches
# Print the cost every epoch
if print_cost == True and epoch % 5 == 0:
print ("Cost after epoch %i: %f" % (epoch, minibatch_cost))
if print_cost == True and epoch % 1 == 0:
costs.append(minibatch_cost)
# plot the cost
plt.plot(np.squeeze(costs))
plt.ylabel('cost')
plt.xlabel('iterations (per tens)')
plt.title("Learning rate =" + str(learning_rate))
plt.show()
# Calculate the correct predictions
predict_op = tf.argmax(Z3, 1)
correct_prediction = tf.equal(predict_op, tf.argmax(Y, 1))
# Calculate accuracy on the test set
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
print(accuracy)
train_accuracy = accuracy.eval({X: X_train, Y: Y_train})
test_accuracy = accuracy.eval({X: X_test, Y: Y_test})
print("Train Accuracy:", train_accuracy)
print("Test Accuracy:", test_accuracy)
final_param = {}
final_param['W1'] = parameters['W1'].eval()
final_param['W2'] = parameters['W2'].eval()
return train_accuracy, test_accuracy, final_param
# let user feed in 1 parameter, the location of the data files (from datastore)
parser = argparse.ArgumentParser()
parser.add_argument('--data-folder', type=str, dest='data_folder', help='data folder mounting point')
args = parser.parse_args()
data_folder = args.data_folder + '/signs'
print('Data folder:', data_folder)
# load train and test set into numpy arrays
# note we scale the pixel intensity values to 0-1 (by dividing it with 255.0) so the model can converge faster.
X_train_orig, Y_train_orig, X_test_orig, Y_test_orig, classes = load_dataset(data_folder)
X_train = X_train_orig/255.
X_test = X_test_orig/255.
Y_train = convert_to_one_hot(Y_train_orig, 6).T
Y_test = convert_to_one_hot(Y_test_orig, 6).T
# get hold of the current run
run = Run.get_context()
# Train the model
train_accuracy, test_accuracy, final_param = model(X_train, Y_train, X_test, Y_test)
# Result logging
run.log('train_accuracy', np.float(train_accuracy))
run.log('test_accuracy', np.float(test_accuracy))
os.makedirs('outputs', exist_ok=True)
# note file saved in the outputs folder is automatically uploaded into experiment record
f = open('outputs/hand-sign-classification.pkl','wb')
pickle.dump(final_param,f)
f.close()
| 2.859375
| 3
|
search_engines_cli.py
|
csecht/Search-Engines-Scraper
| 2
|
12783227
|
#!/usr/bin/env python3
import argparse
try:
from search_engines.engines import search_engines_dict
from search_engines.multiple_search_engines import MultipleSearchEngines, AllSearchEngines
from search_engines import config
except ImportError as err:
MSG = '\nPlease install `search_engines` to resolve this error.'
raise ImportError(f'{MSG}\n') from err
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-q',
help='query', required=True)
parser.add_argument('-e',
help='search engine(s) - ' + ', '.join(search_engines_dict) + ', or "all"',
default='duckduckgo')
parser.add_argument('-o',
help='output file [html, csv, json]',
default='print')
parser.add_argument('-n',
help='filename for output file',
default=str(config.OUTPUT_DIR / 'output'))
parser.add_argument('-p',
help='number of pages',
default=config.SEARCH_ENGINE_RESULTS_PAGES,
type=int)
parser.add_argument('-f',
help='filter results [url, title, text, host]',
default=None)
parser.add_argument('-i',
help='ignore duplicates, useful when multiple search engines are used',
action='store_true')
parser.add_argument('-proxy',
help='use proxy (protocol://ip:port)',
default=config.PROXY)
args = parser.parse_args()
proxy = args.proxy
timeout = config.TIMEOUT + (10 * bool(proxy))
agent = config.FAKE_USER_AGENT
engines = [
e.strip() for e in args.e.lower().split(',')
if e.strip() in search_engines_dict or e.strip() == 'all'
]
if not engines:
print('Please choose a search engine: ' + ', '.join(search_engines_dict))
else:
if 'all' in engines:
engine = AllSearchEngines(agent, proxy, timeout)
elif len(engines) > 1:
engine = MultipleSearchEngines(engines, agent, proxy, timeout)
else:
engine = search_engines_dict[engines[0]](agent, proxy, timeout)
engine.ignore_duplicate_urls = args.i
if args.f:
engine.set_search_operator(args.f)
engine.search(args.q, args.p)
engine.output(args.o, args.n)
if __name__ == '__main__':
main()
| 2.453125
| 2
|
pyutils/torch_train.py
|
JeremieMelo/pyutility
| 4
|
12783228
|
<filename>pyutils/torch_train.py
"""
Description:
Author: <NAME> (<EMAIL>)
Date: 2021-06-06 03:15:06
LastEditors: <NAME> (<EMAIL>)
LastEditTime: 2021-06-06 03:15:06
"""
import csv
import os
import random
import time
import traceback
from collections import OrderedDict
import numpy as np
import torch
from scipy import interpolate
from torchsummary import summary
from .general import ensure_dir
__all__ = [
"set_torch_deterministic",
"set_torch_stochastic",
"get_random_state",
"summary_model",
"save_model",
"BestKModelSaver",
"load_model",
"count_parameters",
"check_converge",
"ThresholdScheduler",
"ThresholdScheduler_tf",
"ValueRegister",
"ValueTracer",
"EMA",
"export_traces_to_csv",
"set_learning_rate",
"get_learning_rate",
"apply_weight_decay",
"disable_bn",
"enable_bn",
]
def set_torch_deterministic(random_state: int = 0) -> None:
random_state = int(random_state) % (2 ** 32)
torch.manual_seed(random_state)
np.random.seed(random_state)
if torch.cuda.is_available():
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
torch.cuda.manual_seed_all(random_state)
random.seed(random_state)
def set_torch_stochastic():
seed = int(time.time() * 1000) % (2 ** 32)
torch.manual_seed(seed)
np.random.seed(seed)
if torch.cuda.is_available():
torch.backends.cudnn.deterministic = False
torch.cuda.manual_seed_all(seed)
def get_random_state():
return np.random.get_state()[1][0]
def summary_model(model, input):
summary(model, input)
def save_model(model, path="./checkpoint/model.pt", print_msg=True):
"""Save PyTorch model in path
Args:
model (PyTorch model): PyTorch model
path (str, optional): Full path of PyTorch model. Defaults to "./checkpoint/model.pt".
print_msg (bool, optional): Control of message print. Defaults to True.
"""
dir = os.path.dirname(path)
if not os.path.exists(dir):
os.mkdir(dir)
try:
torch.save(model.state_dict(), path)
if print_msg:
print(f"[I] Model saved to {path}")
except Exception as e:
if print_msg:
print(f"[E] Model failed to be saved to {path}")
traceback.print_exc(e)
class BestKModelSaver(object):
def __init__(self, k=1):
super().__init__()
self.k = k
self.model_cache = OrderedDict()
def __insert_model_record(self, acc, dir, checkpoint_name, epoch=None):
acc = round(acc * 100) / 100
if len(self.model_cache) < self.k:
new_checkpoint_name = (
f"{checkpoint_name}_acc-{acc:.2f}{'' if epoch is None else '_epoch-'+str(epoch)}"
)
path = os.path.join(dir, new_checkpoint_name + ".pt")
self.model_cache[path] = (acc, epoch)
return path, None
else:
min_acc, min_epoch = sorted(list(self.model_cache.values()), key=lambda x: x[0])[0]
if acc >= min_acc + 0.01:
del_checkpoint_name = (
f"{checkpoint_name}_acc-{min_acc:.2f}{'' if epoch is None else '_epoch-'+str(min_epoch)}"
)
del_path = os.path.join(dir, del_checkpoint_name + ".pt")
try:
del self.model_cache[del_path]
except:
print("[W] Cannot remove checkpoint: {} from cache".format(del_path), flush=True)
new_checkpoint_name = (
f"{checkpoint_name}_acc-{acc:.2f}{'' if epoch is None else '_epoch-'+str(epoch)}"
)
path = os.path.join(dir, new_checkpoint_name + ".pt")
self.model_cache[path] = (acc, epoch)
return path, del_path
# elif(acc == min_acc):
# new_checkpoint_name = f"{checkpoint_name}_acc-{acc:.2f}{'' if epoch is None else '_epoch-'+str(epoch)}"
# path = os.path.join(dir, new_checkpoint_name+".pt")
# self.model_cache[path] = (acc, epoch)
# return path, None
else:
return None, None
def save_model(
self,
model,
acc,
epoch=None,
path="./checkpoint/model.pt",
other_params=None,
save_model=False,
print_msg=True,
):
"""Save PyTorch model in path
Args:
model (PyTorch model): PyTorch model
acc (scalar): accuracy
epoch (scalar, optional): epoch. Defaults to None
path (str, optional): Full path of PyTorch model. Defaults to "./checkpoint/model.pt".
other_params (dict, optional): Other saved params. Defaults to None
save_model (bool, optional): whether save source code of nn.Module. Defaults to False
print_msg (bool, optional): Control of message print. Defaults to True.
"""
dir = os.path.dirname(path)
ensure_dir(dir)
checkpoint_name = os.path.splitext(os.path.basename(path))[0]
if isinstance(acc, torch.Tensor):
acc = acc.data.item()
new_path, del_path = self.__insert_model_record(acc, dir, checkpoint_name, epoch)
if del_path is not None:
try:
os.remove(del_path)
print(f"[I] Model {del_path} is removed", flush=True)
except Exception as e:
if print_msg:
print(f"[E] Model {del_path} failed to be removed", flush=True)
traceback.print_exc(e)
if new_path is None:
if print_msg:
print(
f"[I] Not best {self.k}: {list(reversed(sorted(list(self.model_cache.values()))))}, skip this model ({acc:.2f}): {path}",
flush=True,
)
else:
try:
# torch.save(model.state_dict(), new_path)
if other_params is not None:
saved_dict = other_params
else:
saved_dict = {}
if save_model:
saved_dict.update({"model": model, "state_dict": model.state_dict()})
torch.save(saved_dict, new_path)
else:
saved_dict.update({"model": None, "state_dict": model.state_dict()})
torch.save(saved_dict, new_path)
if print_msg:
print(
f"[I] Model saved to {new_path}. Current best {self.k}: {list(reversed(sorted(list(self.model_cache.values()))))}",
flush=True,
)
except Exception as e:
if print_msg:
print(f"[E] Model failed to be saved to {new_path}", flush=True)
traceback.print_exc(e)
def load_model(model, path="./checkpoint/model.pt", ignore_size_mismatch: bool = False, print_msg=True):
"""Load PyTorch model in path
Args:
model (PyTorch model): PyTorch model
path (str, optional): Full path of PyTorch model. Defaults to "./checkpoint/model.pt".
ignore_size_mismatch (bool, optional): Whether ignore tensor size mismatch. Defaults to False.
print_msg (bool, optional): Control of message print. Defaults to True.
"""
try:
raw_data = torch.load(path, map_location=lambda storage, location: storage)
if isinstance(raw_data, OrderedDict) and "state_dict" not in raw_data:
### state_dict: OrderedDict
state_dict = raw_data
else:
### {"state_dict": ..., "model": ...}
state_dict = raw_data["state_dict"]
load_keys = set(state_dict.keys())
model_keys = set(model.state_dict().keys())
common_dict = load_keys & model_keys
diff_dict = load_keys ^ model_keys
extra_keys = load_keys - model_keys
lack_keys = model_keys - load_keys
cur_state_dict = model.state_dict()
if ignore_size_mismatch:
size_mismatch_dict = set(
key for key in common_dict if model.state_dict()[key].size() != state_dict[key].size()
)
print(f"[W] {size_mismatch_dict} are ignored due to size mismatch", flush=True)
common_dict = common_dict - size_mismatch_dict
cur_state_dict.update({key: state_dict[key] for key in common_dict})
if len(diff_dict) > 0:
print(
f"[W] Warning! Model is not the same as the checkpoint. not found keys {lack_keys}. extra unused keys {extra_keys}"
)
model.load_state_dict(cur_state_dict)
if print_msg:
print(f"[I] Model loaded from {path}")
except Exception as e:
traceback.print_exc(e)
if print_msg:
print(f"[E] Model failed to be loaded from {path}")
def count_parameters(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
def check_converge(trace, epsilon=0.002):
if len(trace) <= 1:
return False
if np.abs(trace[-1] - trace[-2]) / (np.abs(trace[-1]) + 1e-8) < epsilon:
return True
return False
class ThresholdScheduler(object):
"""Intepolation between begin point and end point. step must be within two endpoints"""
def __init__(self, step_beg, step_end, thres_beg, thres_end, mode="tanh"):
assert mode in {"linear", "tanh"}, "Threshold scheduler only supports linear and tanh modes"
self.mode = mode
self.step_beg = step_beg
self.step_end = step_end
self.thres_beg = thres_beg
self.thres_end = thres_end
self.func = self.createFunc()
def normalize(self, step, factor=2):
return (step - self.step_beg) / (self.step_end - self.step_beg) * factor
def createFunc(self):
if self.mode == "linear":
return lambda x: (self.thres_end - self.thres_beg) * x + self.thres_beg
elif self.mode == "tanh":
x = self.normalize(np.arange(self.step_beg, self.step_end + 1).astype(np.float32))
y = np.tanh(x) * (self.thres_end - self.thres_beg) + self.thres_beg
return interpolate.interp1d(x, y)
def __call__(self, x):
return self.func(self.normalize(x)).tolist()
class ThresholdScheduler_tf(object):
"""smooth increasing threshold with tensorflow model pruning scheduler"""
def __init__(self, step_beg, step_end, thres_beg, thres_end):
import tensorflow as tf
import tensorflow_model_optimization as tfmot
gpus = tf.config.list_physical_devices('GPU')
if gpus:
# Restrict TensorFlow to only allocate 1GB of memory on the first GPU
try:
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
except RuntimeError as e:
# Virtual devices must be set before GPUs have been initialized
print(e)
self.step_beg = step_beg
self.step_end = step_end
self.thres_beg = thres_beg
self.thres_end = thres_end
if thres_beg < thres_end:
self.thres_min = thres_beg
self.thres_range = thres_end - thres_beg
self.descend = False
else:
self.thres_min = thres_end
self.thres_range = thres_beg - thres_end
self.descend = True
self.pruning_schedule = tfmot.sparsity.keras.PolynomialDecay(
initial_sparsity=0, final_sparsity=0.9999999, begin_step=self.step_beg, end_step=self.step_end
)
def __call__(self, x):
if x < self.step_beg:
return self.thres_beg
elif x > self.step_end:
return self.thres_end
res_norm = self.pruning_schedule(x)[1].numpy()
if self.descend == False:
res = res_norm * self.thres_range + self.thres_beg
else:
res = self.thres_beg - res_norm * self.thres_range
if np.abs(res - self.thres_end) <= 1e-6:
res = self.thres_end
return res
class ValueRegister(object):
def __init__(self, operator, name="", show=True):
self.op = operator
self.cache = None
self.show = show
self.name = name if len(name) > 0 else "value"
def register_value(self, x):
self.cache = self.op(x, self.cache) if self.cache is not None else x
if self.show:
print(f"Recorded {self.name} is {self.cache}")
class ValueTracer(object):
def __init__(self, show=True):
self.cache = {}
self.show = show
def add_value(self, name, value, step):
if name not in self.cache:
self.cache[name] = {}
self.cache[name][step] = value
if self.show:
print(f"Recorded {name}: step = {step}, value = {value}")
def get_trace_by_name(self, name):
return self.cache.get(name, {})
def get_all_traces(self):
return self.cache
def __len__(self):
return len(self.cache)
def get_num_trace(self):
return len(self.cache)
def get_len_trace_by_name(self, name):
return len(self.cache.get(name, {}))
def dump_trace_to_file(self, name, file):
if name not in self.cache:
print(f"[W] Trace name '{name}' not found in tracer")
return
torch.save(self.cache[name], file)
print(f"[I] Trace {name} saved to {file}")
def dump_all_traces_to_file(self, file):
torch.save(self.cache, file)
print(f"[I] All traces saved to {file}")
def load_all_traces_from_file(self, file):
self.cache = torch.load(file)
return self.cache
class EMA(object):
def __init__(self, mu):
super().__init__()
self.mu = mu
self.shadow = {}
def register(self, name, val):
self.shadow[name] = val.clone().data
def __call__(self, name, x, mask=None):
if name not in self.shadow:
self.register(name, x)
return x.data
old_average = self.shadow[name]
new_average = (1 - self.mu) * x + self.mu * old_average
if mask is not None:
new_average[mask].copy_(old_average[mask])
self.shadow[name] = new_average.clone()
return new_average.data
def export_traces_to_csv(trace_file, csv_file, fieldnames=None):
traces = torch.load(trace_file)
with open(csv_file, "w", newline="") as csvfile:
if fieldnames is None:
fieldnames = list(traces.keys())
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
max_len = max([len(traces[field]) for field in fieldnames])
for idx in range(max_len):
row = {}
for field in fieldnames:
value = traces[field][idx] if idx < len(traces[field]) else ""
row[field] = value.data.item() if isinstance(value, torch.Tensor) else value
writer.writerow(row)
def set_learning_rate(lr, optimizer):
for param_group in optimizer.param_groups:
param_group["lr"] = lr
def get_learning_rate(optimizer):
return optimizer.param_groups[0]["lr"]
def apply_weight_decay(W, decay_rate, learning_rate, mask=None):
# in mask, 1 represents fixed variables, 0 represents trainable variables
if mask is not None:
W[~mask] -= W[~mask] * decay_rate * learning_rate
else:
W -= W * decay_rate * learning_rate
def disable_bn(model: torch.nn.Module) -> None:
for m in model.modules():
if isinstance(
m,
(
torch.nn.BatchNorm1d,
torch.nn.BatchNorm2d,
torch.nn.BatchNorm3d,
torch.nn.SyncBatchNorm,
),
):
m.eval()
def enable_bn(model: torch.nn.Module) -> None:
for m in model.modules():
if isinstance(
m,
(
torch.nn.BatchNorm1d,
torch.nn.BatchNorm2d,
torch.nn.BatchNorm3d,
torch.nn.SyncBatchNorm,
),
):
m.train()
| 2.125
| 2
|
sourcecode/21/21.8.1/threading_2.py
|
ydong08/PythonCode
| 0
|
12783229
|
#!/usr/bin/python
#encoding=utf-8
import threading
import time
class Counter: #计数器类
def __init__(self):
self.value = 0
def increment(self):
self.value = self.value + 1 #将value值加1
value = self.value #并返回这个value值
return value
counter = Counter()
class ThreadDemo(threading.Thread):
#省略了__init__构造函数
def run(self):
time.sleep(1)
value = counter.increment()
print (time.time()-self.create_time),"\t",self.index, “\tvalue: ”, value
for index in range(100): #将生成100个线程
thread = ThreadDemo (index, time.time())
thread.start() #启动线程
| 3.9375
| 4
|
spiders/croatia.py
|
kristoff-it/customs-news-crawler
| 1
|
12783230
|
<reponame>kristoff-it/customs-news-crawler<gh_stars>1-10
# -*- coding: utf-8 -*-
import scrapy
class Item(scrapy.Item):
country = scrapy.Field()
title = scrapy.Field()
date = scrapy.Field()
body = scrapy.Field()
link = scrapy.Field()
class CroatiaSpider(scrapy.Spider):
name = "croatia"
allowed_domains = ["carina.gov.hr"]
start_urls = (
'https://carina.gov.hr/vijesti/8?trazi=1&tip=&tip2=&tema=&datumod=&datumdo=&pojam=&page=1',
)
def parse(self, response):
for href in response.css('.news_item > a::attr(href)'):
url = response.urljoin(href.extract())
yield scrapy.Request(url, callback=self.parse_item)
try:
next_page = response.urljoin(response.css('.news_page_nav').xpath(".//a[text()[contains(.,'Sljedeća »')]]").xpath(".//@href")[0].extract())
yield scrapy.Request(next_page, callback=self.parse)
except:
pass
def parse_item(self, response):
item = Item()
item['title'] = response.css('#content > div > h1').xpath('.//text()').extract()[0].strip()
item['date'] = response.css('.time_info').xpath('.//text()').extract()[0].strip()
item['body'] = " ".join(response.css('.page_content').xpath('.//text()').extract()).strip()
item['link'] = response.url
item['country'] = 'croatia'
yield item
| 2.671875
| 3
|
src/data/1017.py
|
NULLCT/LOMC
| 0
|
12783231
|
n, q = map(int, input().split())
g = [[] for i in range(n)]
for i in range(n - 1):
ai, bi = map(int, input().split())
ai -= 1
bi -= 1
g[ai].append(bi)
g[bi].append(ai)
# N: 頂点数
# G[v]: 頂点vの子頂点 (親頂点は含まない)
#
# - construct
# prv[u] = v: 頂点uの一つ上の祖先頂点v
# - lca
# kprv[k][u] = v: 頂点uの2^k個上の祖先頂点v
# depth[u]: 頂点uの深さ (根頂点は0)
N = n
LV = (N - 1).bit_length()
def construct(prv):
kprv = [prv]
S = prv
for k in range(LV):
T = [0] * N
for i in range(N):
if S[i] is None:
continue
T[i] = S[S[i]]
kprv.append(T)
S = T
return kprv
def lca(u, v, kprv, depth):
dd = depth[v] - depth[u]
if dd < 0:
u, v = v, u
dd = -dd
# assert depth[u] <= depth[v]
for k in range(LV + 1):
if dd & 1:
v = kprv[k][v]
dd >>= 1
# assert depth[u] == depth[v]
if u == v:
return u
for k in range(LV - 1, -1, -1):
pu = kprv[k][u]
pv = kprv[k][v]
if pu != pv:
u = pu
v = pv
# assert kprv[0][u] == kprv[0][v]
return kprv[0][u]
# BFS
infty = 10**10
depth = [infty for i in range(n)]
prev = [infty for i in range(n)]
prev[0] = 0
depth[0] = 0
from collections import deque
dq = deque()
dq.append(0)
while len(dq):
u = dq.popleft()
for v in g[u]:
if depth[v] == infty:
depth[v] = depth[u] + 1
prev[v] = u
dq.append(v)
kprv = construct(prev)
for i in range(q):
ci, di = map(int, input().split())
ci -= 1
di -= 1
lc = lca(ci, di, kprv, depth)
dist = depth[ci] + depth[di] - depth[lc] * 2
if dist % 2 == 0:
print("Town")
else:
print("Road")
| 2.203125
| 2
|
stanCode_Projects/weather_master/weather_master.py
|
wilson51678/sc-projects
| 0
|
12783232
|
<reponame>wilson51678/sc-projects<filename>stanCode_Projects/weather_master/weather_master.py
"""
File: weather_master.py
Name: <NAME>
-----------------------
This program should implement a console program
that asks weather data from user to compute the
average, highest, lowest, cold days among the inputs.
Output format should match what is shown in the sample
run in the Assignment 2 Handout.
"""
# The value to stop program
EXIT = -100
def main():
"""
This program should implement a console program
that asks weather data from user to compute the
average, highest, lowest, cold days among the inputs.
"""
print("stanCode \"weather master 4.0")
new_data = int(input('Next temperature: (or '+str(EXIT)+' to quit)?'))
# no temperature enter
if new_data == EXIT:
print('No temperature were entered')
# first temperature enter
else:
maximum = new_data
minimum = new_data
total = new_data
days = 0
# determine whether first temperature is cold day
if new_data < 16:
days = 1
times = 1
while True:
# second temperature enter
new_data = int(input('Next temperature: (or ' + str(EXIT) + ' to quit)?'))
# no temperature enter
if new_data == EXIT:
break
# new temperature would become maximum
if new_data > maximum:
maximum = new_data
# new temperature would counts as cold day
if new_data < 16:
days += 1
# new temperature would become minimum
if new_data < minimum:
minimum = new_data
# total and times are made for counting average of all temperatures
total = new_data + total
times += 1
average = total / times
print('Highest temperature = '+str(maximum))
print('Lowest temperature ='+str(minimum))
print('Average ='+str(average))
print(str(days)+' cold day(s)')
###### DO NOT EDIT CODE BELOW THIS LINE ######
if __name__ == "__main__":
main()
| 4
| 4
|
tests/optionparser_tests.py
|
Anavros/malt
| 0
|
12783233
|
import pytest
from malt.parser.optionparser import parse, parse_all
from malt.exceptions import EmptyOptionString
def test_signature():
"""
Parsing takes an option string and creates a Signature. Basic operation test.
"""
option = "pow i:number i:power=2"
result = parse(option)
assert result.head == 'pow'
assert result.body[0].position == 0
assert result.body[0].key == 'number'
assert result.body[0].value == None
assert result.body[0].cast == 'i'
assert result.body[1].position == 1
assert result.body[1].key == 'power'
assert result.body[1].value == '2'
assert result.body[1].cast == 'i'
def test_parse_on_no_args():
"""
Parsing a command with no arguments should not raise any errors.
"""
result = parse("command")
assert result.head == "command"
assert result.body == []
def test_failure_empty_input():
"""
Raise EmptyOptionString when given empty input.
"""
with pytest.raises(EmptyOptionString):
parse('')
| 2.859375
| 3
|
BusinessIndexData.py
|
pesikj/InformationParserOfNonprofitOrganization
| 0
|
12783234
|
import requests
import urllib.request, json, re
def load_organizational_data(identification_number):
address = 'https://or.justice.cz/ias/ui/rejstrik-$firma?ico={0}'.format(identification_number)
with urllib.request.urlopen(address) as url:
page = url.read().decode('utf-8')
detail_page_pattern = r'subjektId=\d*'
result = re.search(detail_page_pattern, page)
zadost_string = result.group(0)
print(zadost_string)
load_organizational_data(22758518)
| 2.78125
| 3
|
hexrd/utils/decorators.py
|
johnkit/hexrd
| 0
|
12783235
|
<reponame>johnkit/hexrd<filename>hexrd/utils/decorators.py
# encoding: utf-8
"""Decorators that don't go anywhere else.
This module contains decorators that don't really go with another module
in :mod:`hexrd.utils`. Before putting something here please see if it should
go into another topical module in :mod:`hexrd.utils`.
"""
import collections
def undoc(func):
"""Mark a function or class as undocumented.
This is found by inspecting the AST, so for now it must be used directly
as @undoc, not as e.g. @decorators.undoc
"""
return func
class memoized(object):
"""Decorator. Caches a function's return value each time it is called.
If called later with the same arguments, the cached value is returned
(not reevaluated).
"""
def __init__(self, func):
self.func = func
self.cache = {}
def __call__(self, *args, **kw):
if not isinstance(args, collections.Hashable):
# uncacheable. a list, for instance.
# better to not cache than blow up.
return self.func(*args, **kw)
key = (args, frozenset(kw.items()))
if key in self.cache:
return self.cache[key]
else:
value = self.func(*args, **kw)
self.cache[key] = value
return value
| 2.578125
| 3
|
main/views.py
|
zzaegun/RMS
| 0
|
12783236
|
<filename>main/views.py<gh_stars>0
from django.shortcuts import render
from .models import *
from django.http import HttpResponseRedirect, HttpResponse
from django.urls import reverse
from django.utils import timezone
from datetime import datetime
def get_latest_order_no():
#comparing phase
if T_ORDER_INFO.objects.all().count() != 0:
return T_ORDER_INFO.objects.latest('ORDER_ID').ORDER_ID #earliest
else:
return 0
# Create your views here.
def test(request): #login and test
return render(request, 'tpage.html', {})
def login(request): #login and test
return render(request, 'login.html', {})
def cReq(request):
items = T_PROD_TYPE.objects.all()
#comparing phase
last_order = get_latest_order_no()
template = 'b1.html'
return render(request, template, {'items': items, 'last_order': last_order, 'new_order': last_order+1})
def cList(request):
lists = T_ORDER_INFO.objects.all()
return render(request, 'b2.html', {'lists': lists})
def cModify(request):
items = T_PROD_TYPE.objects.all()
item = T_ORDER_INFO.objects.all().get(pk=request.POST['item'])
return render(request, 'b3.html', {'items': items, 'val': item})
def aPur(request):
lists = T_ORDER_INFO.objects.all()
return render(request, 'c1.html', {'lists': lists})
def aPurMan1(request):
last_available = True
fin_items = T_ORDER_INFO.objects.all().filter(ORDER_FIN = 1)
if fin_items.count() != 0:
last_item = fin_items.latest('ORDER_ID')
else:
last_item = 0
last_available = False
item = T_ORDER_INFO.objects.all().get(pk=request.POST['item'])
proc = item.ITEM_ID.PROCESS_ID.SUBPROC_ORDER
subproc = T_SUB_PROCESS.objects.all()
proc_array = proc.split(',')
return render(request, 'c2_1.html', {'last_available': last_available, 'sprc_list': subproc, 'last_val': last_item, 'now_val': item, 'subprc': proc_array})
def aPurMan2(request):
item = T_ORDER_INFO.objects.all().get(pk=request.POST['item'])
proc = item.ITEM_ID.PROCESS_ID.SUBPROC_ORDER
subproc = T_SUB_PROCESS.objects.all()
proc_array = [int(i) for i in proc.split(',')]
return render(request, 'c2_2.html', {'now_val': item, 'subprc': proc_array, 'sprc_list': subproc})
def aPurMan3(request):
item = T_ORDER_INFO.objects.all().get(pk=request.POST['item'])
proc = item.ITEM_ID.PROCESS_ID.SUBPROC_ORDER
subproc = T_SUB_PROCESS.objects.all()
proc_array = [int(i) for i in proc.split(',')]
machine = T_MACHINE_STATUS.objects.all()
#get selected machine number
selected = []
for i in proc_array:
st = machine.get(pk=i).MACHINE_SCHED
lt = machine.get(pk=i).MACHINE_LT
et = st + timezone.timedelta(minutes = lt)
selected.append({'subprc': i, 'machine': request.POST['sel_sp_'+str(i)], 'start_time':st, 'end_time':et})
return render(request, 'c2_3.html', {'now_val': item, 'subprc': proc_array, 'sprc_list': subproc, 'machine_info': selected})
def admin1(request):
schedules = T_PROD_SCHEDULE.objects.all()
for schedule in schedules:
name, start_time, end_time, chain, end_flag = calculate(schedule.SCHEDULE_ID)
#machine = schedule.MACHINE_USE.split(',')
schedule.MACHINE = []
for i, v in enumerate(start_time):
schedule.MACHINE.append({'name': name[i],'start_time':start_time[i],'end_time':end_time[i], 'chain':chain[i], 'end_flag':end_flag[i]})
return render(request, 'd1.html', {'schedules': schedules})
def admin2(request):
return render(request, 'd2.html', {})
def calculate(num):
schedule = T_PROD_SCHEDULE.objects.get(pk=num)
use = schedule.MACHINE_USE
name = []
start_time = []
end_time = []
chain = []
end_flag = []
tmp = schedule.PROD_START
for i in use.split(','):
if i.find('-') != -1:
max_prod_time = 0
for j in i.split('-'):
start_time.append(tmp)
m = T_MACHINE_STATUS.objects.get(pk=j)
name.append(m.MACHINE_NAME)
tmp2 = tmp + timezone.timedelta(minutes = m.MACHINE_LT)
end_time.append(tmp2)
chain.append(True)
#check max production time
if max_prod_time < m.MACHINE_LT:
max_prod_time = m.MACHINE_LT
if i.index(j) == len(i)-1:
end_flag.append(True)
tmp = tmp+timezone.timedelta(minutes = max_prod_time)
else:
end_flag.append(False)
else:
start_time.append(tmp)
m = T_MACHINE_STATUS.objects.get(pk=i)
name.append(m.MACHINE_NAME)
tmp = tmp + timezone.timedelta(minutes = m.MACHINE_LT)
end_time.append(tmp)
chain.append(False)
end_flag.append(False)
print(chain)
return name, start_time, end_time, chain, end_flag
def notwk(request):
machines = T_MACHINE_STATUS.objects.all()
schedules = T_PROD_SCHEDULE.objects.all()
for machine in machines:
result = []
for schedule in schedules:
#print(machine.MACHINE_ID, schedule.MACHINE_USE.split(','))
if str(machine.MACHINE_ID) in schedule.MACHINE_USE.split(','):
result.append(schedule.SCHEDULE_ID)
machine.MACHINE_RELATED = result
return render(request, 'd3.html', {'machines': machines, 'schedules': schedules})
############## LOGICAL
def cReq_newRequest(request):
"""
ORDER_ID = models.IntegerField(db_index=True, primary_key=True)
ORDER_DATE = models.IntegerField(default = 0)
ITEM_ID = models.ForeignKey(T_PROD_TYPE, on_delete=models.CASCADE)
ORDER_QTY = models.IntegerField(default = 0)
ORDER_FIN = models.DateTimeField(default = True)
FIN_TIME = models.DateTimeField(null=True)
"""
item = request.POST['item']
num = request.POST['num']
order_id = get_latest_order_no() + 1
order_date = timezone.now()
prod = T_PROD_TYPE.objects.get(pk=item)
prod.t_order_info_set.create(
ORDER_ID = order_id,
ORDER_DATE = order_date,
ITEM_ID = item,
ORDER_QTY = num,
ORDER_FIN = False,
)
print(item, num, order_date) #주문수량, 주문제품번호
return HttpResponseRedirect(reverse('main:cList'))
def cModify_Request(request):
order = request.POST['ord']
item = request.POST['item']
num = request.POST['num']
mod = T_ORDER_INFO.objects.get(pk=order)
new_prod_type = T_PROD_TYPE.objects.get(pk=item)
#확약주문에 대해서 아래의 sequence 따른다.
if mod.ORDER_FIN == True:
prod_modify = T_PROD_SCHEDULE.objects.get(ORDER_ID=order)
#기존 아이템과 달라지는 경우 -> 레이아웃 변경
if int(item) != int(mod.ITEM_ID.ITEM_ID):
print("기존 아이템과 달라지는 경우")
subproc = new_prod_type.PROCESS_ID.SUBPROC_ORDER
prod_modify.PROCESS_ID = T_PROD_TYPE.objects.get(pk=item).PROCESS_ID
machine = []
for s in subproc.split(','):
if s.find('-') != -1:
subm = []
for s_ in s.split('-'):
m_no = T_MACHINE_STATUS.objects.filter(MACHINE_ENABLED=1).filter(SUBPROC_ID=s)
for m_sep in m_no:
if prod_modify.MACHINE_USE.find(m_sep.MACHINE_ID) != -1:
m_str = m_sep.MACHINE_ID
subm.append(str(m_str))
subm_str = '-'.join(subm)
machine.append(subm_str)
else:
m_no = T_MACHINE_STATUS.objects.filter(MACHINE_ENABLED=1).filter(SUBPROC_ID=s)
for m_sep in m_no:
if prod_modify.MACHINE_USE.find(str(m_sep.MACHINE_ID)) != -1:
m_str = m_sep.MACHINE_ID
machine.append(str(m_str))
print(machine)
prod_modify.MACHINE_USE = ','.join(machine)
#수량이 달라지는 경우 -> 생산량 조정
elif int(num) > int(mod.ORDER_QTY):
print("수량이 달라지는 경우")
subproc = new_prod_type.PROCESS_ID.SUBPROC_ORDER
machine = []
capa = {}
for s in subproc.split(','):
if s.find('-') != -1:
continue
else:
m_no = T_MACHINE_STATUS.objects.filter(MACHINE_ENABLED=1).filter(SUBPROC_ID=s)
val = 0
for m_sep in m_no:
val += m_sep.MACHINE_CAPA
print("주문비교: ", val, num)
#주문량이 한개보다는 크고 두개보다는 작을 때
if val < num:
print("개발중")
#원래는 주문량이 아예 초과하면 오류를 return해야 함
prod_modify.save()
mod.ORDER_QTY = num
mod.ITEM_ID = new_prod_type
mod.save()
return HttpResponseRedirect(reverse('main:cList'))
def notwk_setUnabled(request):
item = request.POST['item']
m = T_MACHINE_STATUS.objects.get(pk=item)
m.MACHINE_ENABLED = 0
m.save()
return HttpResponseRedirect(reverse('main:notwk'))
def notwk_setEnabled(request):
item = request.POST['item']
m = T_MACHINE_STATUS.objects.get(pk=item)
m.MACHINE_ENABLED = 1
m.save()
return HttpResponseRedirect(reverse('main:notwk'))
def notwk_selectAlternate(request):
#adding..
print(request.POST['old'], request.POST['item'])
return HttpResponseRedirect(reverse('main:notwk'))
def aPurMan3_addSchedule(request):
machine = request.POST['machine']
order_no = request.POST['order_no']
start_time = request.POST['start_time'].replace(u'오전','am').replace(u'오후','pm')
end_time = request.POST['end_time'].replace(u'오전','am').replace(u'오후','pm')
print(start_time, end_time)
order = T_ORDER_INFO.objects.get(pk=order_no)
T_PROD_SCHEDULE.objects.create(
PROD_START = start_time,
PROD_END = end_time,
MACHINE_USE = ''.join([str(i) for i in machine]),
MACHINE_QTY = order.ORDER_QTY,
ORDER_ID = order,
PROCESS_ID = order.ITEM_ID.PROCESS_ID,
)
order.ORDER_FIN = 1
order.save()
return HttpResponseRedirect(reverse('main:admin1'))
| 2.140625
| 2
|
AutomationKit_InfrastructureServices@SVU/SampleTomcatBuild.py
|
dipAch/Infrastructure-Automation-Kit
| 0
|
12783237
|
<reponame>dipAch/Infrastructure-Automation-Kit
#!/usr/bin/env python3
# Use-Case: Sample `TOMCAT` & `JDK / JRE` Downloader and Installation script.
# Author: <NAME>, <@tweeting_dipa>
"""
This is a sample `TOMCAT` Installation Use-Case script.
Do not use this file. Just take this as a reference on how to
use the Automation-Kit to install the softwares that you need
and the ones that this utility supports (as of now).
Use the `AutomateBuild.py` file (same directory as this file),
to house the Build Script suitable for your Environment / Project's needs.
"""
################################## MODULE IMPORT SECTION ##################################
# Generic `TOMCAT` configurations module.
import helpers.BuildConfig.Tomcat.TomcatConfig
# Import the Supervisor Script to Co-ordinate and control the Build Process flow.
# Importing both, to test out the Generic Supervisor as well as the @SVU specific Supervisor.
import helpers.BuildSupervisor, helpers.SVUCustomBuildSupervisor
###########################################################################################
###################################### START `TOMCAT` INSTALLATION PROCESS ###################################
# Initiate `TOMCAT` and its dependency (i.e., `JDK / JRE`) Download and Install.
# Wait for the Magic to Happen!!!
helpers.BuildSupervisor.TomcatAutomate.__init__(helpers.BuildConfig.Tomcat.TomcatConfig.ENVIRONMENT)
helpers.BuildSupervisor.TomcatAutomate.initiate_build_workflow()
| 1.828125
| 2
|
neurovault/apps/statmaps/migrations/0024_auto_20150219_2047.py
|
aphroditepv/NeuroVault
| 68
|
12783238
|
<filename>neurovault/apps/statmaps/migrations/0024_auto_20150219_2047.py
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('statmaps', '0023_auto_20150218_1943'),
]
operations = [
migrations.CreateModel(
name='CognitiveAtlasTask',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=200)),
('cog_atlas_id', models.CharField(max_length=200)),
],
options={
},
bases=(models.Model,),
),
migrations.AlterField(
model_name='statisticmap',
name='modality',
field=models.CharField(help_text=b'Brain imaging procedure that was used to acquire the data.', max_length=200, verbose_name=b'Modality & Acquisition Type', choices=[(b'fMRI-BOLD', b'fMRI-BOLD'), (b'fMRI-CBF', b'fMRI-CBF'), (b'fMRI-CBV', b'fMRI-CBV'), (b'Diffusion MRI', b'Diffusion MRI'), (b'Structural MRI', b'Structural MRI'), (b'PET FDG', b'PET FDG'), (b'PET [15O]-water', b'PET [15O]-water'), (b'PET other', b'PET other'), (b'MEG', b'MEG'), (b'EEG', b'EEG')]),
preserve_default=True,
)
]
| 1.710938
| 2
|
datasets/data_generator.py
|
RedaAlb/IntelligentTransportationInfrastructure_ITI
| 1
|
12783239
|
<reponame>RedaAlb/IntelligentTransportationInfrastructure_ITI<gh_stars>1-10
import os
import random
import shutil
from collections import Counter
import Automold as am
import cv2
import matplotlib.pyplot as plt
class DataGenerator:
""" Used to generate new samples using various methods.
"""
def __init__(self):
self.lp_chars_labels = ["0","1","2","3","4","5","6","7","8","9", "A","B","C","D","E","F","G","H",
"I","J", "K","L","M","N","O","P","Q","R","S","T","U","V","W","X","Y","Z"]
def get_lp_annotations(self, imgs_dir):
""" Get all LP character annotations (img path, char id, x, y, w, h).
Args:
imgs_dir (str): Path to the directory where all the images are located alongside their .txt annotation files.
Returns:
list: All annotations for all LPs in the given image directory.
"""
annos = []
filenames = os.listdir(imgs_dir)
filenames = [x for x in filenames if x.split(".")[-1] != "txt"] # Removing all .txt files from the list.
for filename in filenames:
img = cv2.imread(f"{imgs_dir}/{filename}")
img_h, img_w, _ = img.shape
# Excrating the annotations for each character in the LP.
lp_annotations = [] # Will hold all the annotations for all character of the LP.
img_name = filename.split(".")[0]
anno_filename = img_name + ".txt"
with open(f"{imgs_dir}/{anno_filename}", "r") as file:
anno_lines = file.read().split("\n")
char_annotations = []
# Each line is a character with data (char_id, x, y, w, h) seperated by spaces.
for line in anno_lines:
if line == "": continue # Skip empty lines.
char_data = line.split(" ")
char_id = char_data[0]
# Converting the coordinates to top left corner of each character rather then the centre.
bb_w, bb_h = float(char_data[3]) * img_w, float(char_data[4]) * img_h
bb_x, bb_y = float(char_data[1]) * img_w - (bb_w/2), float(char_data[2]) * img_h - (bb_h/2)
# Storing the annos as [img_path, char_id, x, y, w, h].
char_anno = [char_id, int(bb_x), int(bb_y), int(bb_w), int(bb_h)]
char_annotations.append(char_anno)
lp_annotations.append(char_annotations)
lp_annotations.insert(0, f"{imgs_dir}/{filename}")
annos.append(lp_annotations)
return annos
def get_chars_occ_from_imgs(self, imgs_dir, low_ap_letters):
""" Get the occurances of all characters from images and their .txt annotation files.
Args:
imgs_dir (str): Path to the directory where all the images are located alongside their .txt annotation files.
low_ap_letters (list): The low performing (low average precision) letters.
Returns:
Counter: Number of occurances (occ) for each character, where the key is the character and value is the num of occ.
"""
samples_data = self.get_lp_annotations(imgs_dir)
n_samples = len(samples_data)
digits_count = []
letters_count = []
l_aps_count = [] # Number of low ap chars in each LP.
all_chars = ""
for sample in samples_data:
img_path = sample[0]
chars = sample[1]
# Seperating the LP characters into digits, letters and the low ap letters.
digits = []
letters = []
l_ap_letters = []
for char_data in chars:
class_id = int(char_data[0])
char = self.lp_chars_labels[class_id]
all_chars += char
if class_id < 10: # Digits
digits.append(char_data)
else:
if char in low_ap_letters:
l_ap_letters.append(char_data)
else:
letters.append(char_data)
l_ap_letters = l_ap_letters * 2
digits_count.append(len(digits))
letters_count.append(len(letters))
l_aps_count.append(len(l_ap_letters))
print(imgs_dir)
total_digits = sum(digits_count)
total_letters = sum(letters_count)
total_l_aps = sum(l_aps_count)
print("Average number of digits in an LP:", total_digits/n_samples, ", total:", total_digits)
print("Average number of letters in an LP:", total_letters/n_samples, ", total:", total_letters)
print("Average number of low AP letters in an LP:", total_l_aps/n_samples, ", total:", total_l_aps)
print("Total characters in dataset:", sum([total_digits, total_letters, total_l_aps]))
print("\n")
return Counter(all_chars)
def gen_permutations(self,
imgs_dir,
save_dir_name,
low_ap_letters,
low_ap_dupl=1,
lp_all_letters=False,
samples_to_display=-1,
exclude_a=False,
replace_1=True,
only_low_ap=False,
save_org=True):
""" Generate permutations.
Args:
imgs_dir (str): Path to the directory where all the images are located alongside their .txt annotation files.
save_dir_name (str): Saving directory for the newly generated data.
low_ap_letters (list): The low performing (low average precision) letters.
low_ap_dupl (int, optional): How many times a low AP character is allowed to duplicate in the same LP. Defaults to 1.
lp_all_letters (bool, optional): Whether to make the whole LP just letters, so replace all digits in the LP. Defaults to False.
samples_to_display (int, optional): How many samples to display of the generated data, use -1 for no display. Defaults to -1.
exclude_a (bool, optional): Exclude the character "a", since it appears significantly more than other chars. Defaults to False.
replace_1 (bool, optional): Whether to replace the digit "1" as it is very narrow and distorts most letters. Defaults to True.
only_low_ap (bool, optional): Make the LP only made up of low AP characters. Defaults to False.
save_org (bool, optional): Whether to save the original LP patch as a seperate sample. Defaults to True.
"""
samples_data = self.get_lp_annotations(imgs_dir)
dataset_dir = "/".join(imgs_dir.split("/")[1:])
full_save_dir = f"{save_dir_name}/{dataset_dir}"
try: os.makedirs(full_save_dir)
except FileExistsError: pass
if samples_to_display != -1:
_, ax = plt.subplots(samples_to_display, 2, figsize=(10, 2.5 * samples_to_display))
for i, sample in enumerate(samples_data):
img_path = sample[0]
img = cv2.imread(img_path)
if samples_to_display != -1:
ax[i][0].imshow(img)
ax[i][0].set_title("Original")
chars = sample[1]
a_digit_replaced = False
if save_org:
self.save_sample(img, img_path, chars, full_save_dir)
# These will store the digits, all chars, and the low ap chars in the LP in seperate list.
digits = []
letters = []
l_ap_letters = []
# Seperating the LP characters into digits, letters and the low ap letters.
for char in chars:
class_id = int(char[0])
if class_id < (10 + exclude_a): # Digits
if replace_1:
digits.append(char)
else:
if class_id != 1:
digits.append(char)
else:
letter = self.lp_chars_labels[class_id]
if letter in low_ap_letters:
l_ap_letters.append(char)
elif not only_low_ap:
letters.append(char)
l_ap_letters = l_ap_letters * low_ap_dupl
backup_letters = letters[:]
backup_ap_letters = l_ap_letters[:]
for digit in digits:
letter = None
if len(l_ap_letters) != 0:
letter = l_ap_letters.pop()
elif len(letters) != 0:
letter = letters.pop()
if lp_all_letters: # Making the whole LP characters letters, keep replacing digits until no digits left.
# When letters is empty, it means both l_ap_letters and letters are empty.
# So resetting both to go again when all l_ap_letters and letters are exhausted.
if len(letters) == 0:
letters = backup_letters[:]
l_ap_letters = backup_ap_letters[:]
if letter is not None:
# Replacing the digit with the letter.
img = self.replace_digit(img, digit, letter)
a_digit_replaced = True
# Ensuring the label for the digit is changed to the letter.
digit_index = chars.index(digit)
chars[digit_index] = [letter[0], digit[1], digit[2], digit[3], digit[4]]
if a_digit_replaced:
self.save_sample(img, img_path, chars, full_save_dir, img_prefix="gen_")
if samples_to_display != -1:
ax[i][1].imshow(img)
ax[i][1].set_title("Auto generated")
if i+1 == samples_to_display:
break
def replace_digit(self, img, digit, letter):
""" Replace a digit of an LP patch with a letter.
Args:
img (numpy.ndarray): The LP patch, where the digit and character are in.
digit (list): The digit patch bounding box info in this format [x, y, w, h], based on the top left corner.
letter (list): The letter patch bounding box info in this format [x, y, w, h], based on the top left corner.
Returns:
numpy.ndarray: The same passed in img, but with the digit patch replaced by the letter patch.
"""
d_x, d_y = digit[1], digit[2]
d_w, d_h = digit[3], digit[4]
l_x, l_y = letter[1], letter[2]
l_w, l_h = letter[3], letter[4]
digit_patch = img[d_y:d_y+d_h, d_x:d_x+d_w]
d_h, d_w, _ = digit_patch.shape
letter_patch = img[l_y:l_y+l_h, l_x:l_x+l_w]
# Resizing the letter patch to match the digit patch.
letter_patch = cv2.resize(letter_patch, (d_w, d_h))
# Replacing the digit patch with the letter patch in the original image.
img[d_y:d_y+d_h, d_x:d_x+d_w] = letter_patch
return img
def save_sample(self, img, img_path, annos, save_dir, img_prefix=""):
""" Save a sample with its annotation file.
Args:
img (numpy.ndarray): The image to be saved.
img_path (str): The image path.
annos (list): The annotations for the image in this format [class id, x, y, w, h].
save_dir (str): Saving directory for the sample.
img_prefix (str, optional): A prefix before the saved image filename. Defaults to "".
"""
img_h, img_w, _ = img.shape
anno_lines = []
for anno in annos:
class_id = anno[0]
x, y = anno[1], anno[2]
w, h = anno[3], anno[4]
# Converting coordinates to centre of BB and relative to img width and height.
centre_x, centre_y = x + (w/2), y + (h/2)
rel_x, rel_y = centre_x / img_w, centre_y / img_h
rel_w, rel_h = w / img_w, h / img_h
anno_line = f"{class_id} {rel_x} {rel_y} {rel_w} {rel_h}"
anno_lines.append(anno_line)
filename = img_path.split("/")[-1].split(".")[0]
save_path = f"{save_dir}/{img_prefix}{filename}"
cv2.imwrite(f"{save_path}.jpg", img)
with open(f"{save_path}.txt", "w") as file:
file_content = "\n".join(anno_lines)
file.write(file_content)
def get_low_ap_paths(self, root_dir, path_to_imgs, low_ap_letters):
""" Get all the image paths that contain the low average precision (AP) letters.
Args:
root_dir (dir): The root directory of all the datasets.
path_to_imgs (list): The path to the images per dataset from the root_dir.
low_ap_letters (list): The low performing (low AP) letters.
Returns:
list: All the low AP image paths.
"""
low_ap_img_paths = []
for path in path_to_imgs:
annos = self.get_lp_annotations(f"{root_dir}/{path}")
dataset_paths = []
for anno in annos:
img_path = anno[0]
lp_chars = anno[1]
for lp_char in lp_chars:
char = self.lp_chars_labels[int(lp_char[0])]
if char in low_ap_letters:
dataset_paths.append(img_path)
break
print(path, "- number of low AP characters:", len(dataset_paths))
low_ap_img_paths.append(dataset_paths)
return low_ap_img_paths
def gen_rand_aug_imgs(self, root_dir, path_to_imgs, low_ap_letters, output_dir):
""" Generate random augmentation images by adding shadow, redish colour, or blur to the images.
Args:
root_dir (dir): The root directory of all the datasets.
path_to_imgs (list): The path to the images per dataset from the root_dir.
low_ap_letters (list): The low performing (low AP) letters.
output_dir (str): The saving directory for the newly generated data.
"""
low_ap_img_paths = self.get_low_ap_paths(root_dir, path_to_imgs, low_ap_letters)
for dataset_paths in low_ap_img_paths:
for path in dataset_paths:
img = cv2.cvtColor(cv2.imread(path), cv2.COLOR_BGR2RGB)
rand = random.randint(1, 100)
# Each augmentation having 33% chance of being applied.
if rand < 33:
proc_img = am.add_shadow(img, no_of_shadows=4, rectangular_roi=(-1,-1,-1,-1), shadow_dimension=4)
elif rand >= 33 and rand < 66:
proc_img = am.add_autumn(img)
else:
proc_img = am.m_add_blur(img, low_kernel=3, high_kernel=5, w_thresh=150)[0]
dataset_name = path.split("/")[1]
new_path = f"{output_dir}/{dataset_name}"
try: os.makedirs(new_path)
except FileExistsError: pass
img_filename = path.split("/")[-1]
img_name = img_filename.split(".")[0]
img_ext = img_filename.split(".")[-1]
new_img_path = f"{output_dir}/{dataset_name}/{img_name}_gen.{img_ext}"
plt.imsave(new_img_path, proc_img)
txt_filepath = "/".join(path.split("/")[:-1]) + "/" + img_name + ".txt"
new_txt_filepath = f"{output_dir}/{dataset_name}/{img_name}_gen.txt"
shutil.copy(txt_filepath, new_txt_filepath)
print("\nRandom data augmentation generation done.")
| 2.625
| 3
|
mowl/corpus/base.py
|
bio-ontology-research-group/OntoML
| 0
|
12783240
|
<reponame>bio-ontology-research-group/OntoML<gh_stars>0
from org.semanticweb.owlapi.manchestersyntax.renderer import ManchesterOWLSyntaxOWLObjectRendererImpl
from org.semanticweb.owlapi.model import OWLLiteral
from org.semanticweb.owlapi.search import EntitySearcher
from jpype.types import *
from org.mowl import MOWLShortFormProvider
import logging
def extract_axiom_corpus(ontology, out_file):
logging.info("Generating axioms corpus")
renderer = ManchesterOWLSyntaxOWLObjectRendererImpl()
shortFormProvider = MOWLShortFormProvider()
renderer.setShortFormProvider(shortFormProvider)
with open(out_file, 'w') as f:
for owl_class in ontology.getClassesInSignature():
axioms = ontology.getAxioms(owl_class)
for axiom in axioms:
rax = renderer.render(axiom)
rax = rax.replaceAll(JString("[\\r\\n|\\r|\\n()|<|>]"), JString(""))
f.write(f'{rax}\n')
def extract_annotation_corpus(ontology, out_file, mode = "append"):
if mode == "append":
mode = "a"
else:
mode = "w"
with open(out_file, mode) as f:
for owl_class in ontology.getClassesInSignature():
cls = str(owl_class)
annotations = EntitySearcher.getAnnotations(owl_class, ontology)
for annotation in annotations:
if isinstance(annotation.getValue(), OWLLiteral):
property = str(annotation.getProperty()).replace("\n", " ")
# could filter on property
value = str(annotation.getValue().getLiteral()).replace("\n", " ")
f.write(f'{cls} {property} {value}\n')
| 2.171875
| 2
|
AMBER/amber/utils/sampler.py
|
rtu715/NAS-Bench-360
| 10
|
12783241
|
<reponame>rtu715/NAS-Bench-360
# Author: <NAME>
# Created on June 5, 2020
"""
This module provides the `BioIntervalSource` class and its children.
These are essentially wrappers for sets of sequence intervals and
associated labels.
"""
#import keras
import tensorflow as tf
import numpy
from .sequences import EncodedHDF5Genome
import h5py
class BioIntervalSource(object):
"""A generic class for labeled examples of biological intervals.
The amount of padding added to the end of the intervals is able to
be changed during runtime. This allows these functions to be passed
to objects such as a model controller.
Parameters
----------
example_file : str
A path to a file that contains the examples in BED-like format.
Specifically, this file will have one example per line, with
the chromosome, start, end, and label for the example. Each
column is separated by tabs.
reference_sequence : Sequence
The reference sequence used to generate the input sequences
from the example coordinates.
seed : int, optional
Default is `1337`. The value used to seed random number generation.
n_examples : int, optional
Default is `None`. The number of examples. If left as `None`, will
use all of the examples in the file. If fewer than `n_examples`
are found, and error will be thrown.
Attributes
----------
reference_sequence : Sequence
The reference sequence used to generate the input sequences
from the example coordinates.
examples : list
A list of the example coordinates.
labels : list
A list of the labels for the examples.
left_pad : int
The length of padding added to the left side of the interval.
right_pad : int
The length of padding added to the right side of the interval.
random_state : numpy.random.RandomState
A random number generator to use.
seed : int
The value used to seed the random number generator.
"""
def __init__(self, example_file, reference_sequence, n_examples=None, seed=1337, pad=400):
if type(reference_sequence) is str:
self.reference_sequence = EncodedHDF5Genome(input_path=reference_sequence, in_memory=False)
else:
self.reference_sequence = reference_sequence
self.left_pad = 0
self.right_pad = 0
# Setup RNG.
self.seed = seed
self.random_state = numpy.random.RandomState(seed=self.seed)
# Load examples.
self.labels = list()
self.examples = list()
with open(example_file, "r") as read_file:
for line in read_file:
line = line.strip()
if not line.startswith("#"):
if line:
line = line.split("\t")
chrom, start, end, strand = line[:4]
label = [int(x) for x in line[4:]]
self.labels.append(numpy.array(label))
self.examples.append((chrom, int(start), int(end), strand))
# TODO: Consider using separate random states for index shuffling and this part?
if n_examples is not None:
if len(self.examples) < n_examples:
s = ("Specified value of examples was {}".format(n_examples) +
", but only {} were found in \"{}\".".format(len(self.examples),
example_file))
raise RuntimeError(s)
elif len(self.examples) > n_examples:
idx = self.random_state.choice(len(self.examples),
n_examples,
replace=False)
idx.sort()
self.examples = numpy.array(self.examples, dtype='O')[idx].tolist()
self.labels = numpy.array(self.labels, dtype='O')[idx].tolist()
self.labels = [numpy.array(x) for x in self.labels]
else:
# Ensure random state not affected by using input with length of n_examples.
idx = self.random_state.choice(2, 1, replace=False)
del idx
else: # Ensure random state not affected by not using n_examples.
idx = self.random.state.choice(2, 1, replace=False)
del idx
self.set_pad(pad)
def padding_is_valid(self, value):
"""Determine if the specified value is a valid value for padding
intervals.
Parameters
----------
value : int
Proposed amount of padding.
Returns
-------
bool
Whether the input value is valid.
"""
if value < 0:
return False
else:
return True
def _test_padding(self, value):
"""Tests if padding is valid or not. If invalid, raises an error.
Parameters
----------
value : int
Amount of padding to test.
Returns
-------
None
Raises
------
ValueError
This method throws an error if the proposed amount of padding is
not a valid amount.
"""
if not self.padding_is_valid(value):
s = "Invalid padding amount : {}".format(value)
raise ValueError(s)
def set_left_pad(self, value):
"""Sets the length of the padding added to the left
side of the input sequence.
Parameters
----------
value : int
The length of the padding to add to the left side of an example
interval.
"""
self._test_padding(value)
self.left_pad = value
def set_right_pad(self, value):
"""Sets the length of the padding added to the right side of an
example interval.
Parameters
----------
value : int
The length of the padding to add to the right side of an example
interval.
"""
self._test_padding(value)
self.right_pad = value
def set_pad(self, value):
"""Sets the length of padding added to both the left and right sides of
example intervals.
Parameters
----------
value : int
The length of the padding to add to the left and right sides of
input example intervals.
"""
self._test_padding(value)
self.left_pad = value
self.right_pad = value
def __len__(self):
"""Number of examples available.
Returns
-------
int
The number of examples available.
"""
return len(self.examples)
def _load_unshuffled(self, item):
"""Loads example `item` from the unshuffled list of examples.
Parameters
----------
item : int
The index of the example to load.
Returns
-------
tuple(numpy.ndarray, numpy.ndarray)
"""
chrom, start, end, strand = self.examples[item]
x = self.reference_sequence.get_sequence_from_coords(chrom, start - self.left_pad, end + self.right_pad, strand)
y = self.labels[item]
return x, y
class BioIntervalSequence(BioIntervalSource, tf.keras.utils.Sequence):
"""This data sequence type holds intervals in a genome and a
label associated with each interval. Unlike a generator, this
is based off of `keras.utils.Sequence`, which shifts things like
shuffling elsewhere. The amount of padding added to the end of
the intervals is able to be changed during runtime. This allows
these functions to be passed to objects such as a model
controller.
Parameters
----------
example_file : str
A path to a file that contains the examples in BED-like format.
Specifically, this file will have one example per line, with
the chromosome, start, end, and label for the example. Each
column is separated by tabs.
reference_sequence : Sequence
The reference sequence used to generate the input sequences
from the example coordinates.
n_examples : int, optional
Default is `None`. The number of examples. If left as `None`, will
use all of the examples in the file. If fewer than `n_examples` are
found, an error will be thrown.
seed : int, optional
Default is `1337`. The value used to seed random number generation.
Attributes
----------
reference_sequence : Sequence
The reference sequence used to generate the input sequences
from the example coordinates.
examples : list
A list of the example coordinates.
labels : list
A list of the labels for the examples.
left_pad : int
The length of padding added to the left side of the interval.
right_pad : int
The length of padding added to the right side of the interval.
random_state : numpy.random.RandomState
A random number generator to use.
seed : int
The value used to seed the random number generator.
"""
def __init__(self, example_file, reference_sequence, n_examples=None, seed=1337):
super(BioIntervalSequence, self).__init__(
example_file=example_file,
reference_sequence=reference_sequence,
n_examples=n_examples, seed=seed)
def __getitem__(self, item):
"""
Indexes into the set of examples and labels.
Parameters
----------
item : int
The index in the example/label pairs to fetch.
Returns
-------
tuple(numpy.ndarray, numpy.ndarray)
A tuple consisting of the example and the target label.
"""
return self._load_unshuffled(item)
class BioIntervalGenerator(BioIntervalSource):
"""This data generator type holds intervals in a genome and a
label associated with each interval. This essentially acts as
an iterator over the inputs examples. This approach is useful
and preferable to `BioIntervalSequence` when there are a very
large number of examples in the input. The amount of padding
added to the end of the intervals is able to be changed during
runtime. This allows these functions to be passed to objects
such as a model controller.
Parameters
----------
example_file : str
A path to a file that contains the examples in BED-like format.
Specifically, this file will have one example per line, with
the chromosome, start, end, and label for the example. Each
column is separated by tabs.
reference_sequence : Sequence
The reference sequence used to generate the input sequences
from the example coordinates.
n_examples : int, optional
Default is `None`. The number of examples. If left as `None`, will
use all of the examples in the file. If fewer than `n_examples` are
found, an error will be thrown.
seed : int, optional
Default is `1337`. The value used to seed random number generation.
Attributes
----------
reference_sequence : Sequence
The reference sequence used to generate the input sequences
from the example coordinates.
examples : list
A list of the example coordinates.
labels : list
A list of the labels for the examples.
left_pad : int
The length of padding added to the left side of the interval.
right_pad : int
The length of padding added to the right side of the interval.
random_state : numpy.random.RandomState
A random number generator to use.
seed : int
The value used to seed random number generation.
"""
def __init__(self, example_file, reference_sequence, n_examples=None, seed=1337):
super(BioIntervalGenerator, self).__init__(
example_file=example_file,
reference_sequence=reference_sequence,
n_examples=n_examples,
seed=seed)
raise NotImplementedError
class BatchedBioIntervalSequence(BioIntervalSource, tf.keras.utils.Sequence):
"""This data sequence type holds intervals in a genome and a
label associated with each interval. Unlike a generator, this
is based off of `keras.utils.Sequence`, which shifts things like
shuffling elsewhere. The amount of padding added to the end of
the intervals is able to be changed during runtime. This allows
these functions to be passed to objects such as a model
controller. Examples are divided into batches.
Parameters
----------
example_file : str
A path to a file that contains the examples in BED-like format.
Specifically, this file will have one example per line, with
the chromosome, start, end, and label for the example. Each
column is separated by tabs.
reference_sequence : Sequence or str
The reference sequence used to generate the input sequences
from the example coordinates; could be a Sequence instance or a
filepath to reference sequence.
batch_size : int
Specifies size of the mini-batches.
shuffle : bool
Specifies whether to shuffle the mini-batches.
n_examples : int, optional
Default is `None`. The number of examples. If left as `None`, will
use all of the examples in the file. If fewer than `n_examples` are
found, an error will be thrown.
seed : int, optional
Default is `1337`. The value used to seed random number generation.
Attributes
----------
reference_sequence : Sequence
The reference sequence used to generate the input sequences
from the example coordinates.
examples : list
A list of the example coordinates.
labels : list
A list of the labels for the examples.
left_pad : int
The length of padding added to the left side of the interval.
right_pad : int
The length of padding added to the right side of the interval.
batch_size : int
Specifies size of the mini-batches.
shuffle : bool
Specifies whether to shuffle the mini-batches.
random_state : numpy.random.RandomState
A random number generator to use.
seed : int
The value used to seed the random number generator.
"""
def __init__(self, example_file, reference_sequence,
batch_size, shuffle=True, n_examples=None, seed=1337, pad=0):
super(BatchedBioIntervalSequence, self).__init__(
example_file=example_file,
reference_sequence=reference_sequence,
n_examples=n_examples,
seed=seed,
pad=pad
)
self.batch_size = batch_size
self.shuffle = shuffle
self.index = numpy.arange(len(self.examples))
self.total_batches = len(self)
def __len__(self):
"""Number of examples available.
Returns
-------
int
The number of examples available.
"""
l = super(BatchedBioIntervalSequence, self).__len__()
return l // self.batch_size
def __getitem__(self, item):
"""
Indexes into the set of examples and labels.
Parameters
----------
item : int
The index in the example/label pairs to fetch.
Returns
-------
tuple(numpy.ndarray, numpy.ndarray)
A tuple consisting of the example and the target label.
"""
x = list()
y = list()
#for i in range(self.batch_size):
# cur_x, cur_y = self._load_unshuffled(self.index[item + i])
for i in range(item*self.batch_size, (item+1)*self.batch_size):
cur_x, cur_y = self._load_unshuffled(self.index[i])
x.append(cur_x)
y.append(cur_y)
x = numpy.stack(x)
y = numpy.stack(y)
return x, y
def on_epoch_end(self):
"""
If applicable, shuffle the examples at the end of an epoch.
"""
if self.shuffle:
self.index = self.random_state.choice(len(self.examples),
len(self.examples),
replace=False)
def close(self):
"""
Close the file connection of Sequence
"""
self.reference_sequence.close()
class BatchedBioIntervalSequenceGenerator(BatchedBioIntervalSequence):
"""This class modifies on top of BatchedBioIntervalSequence by performing the
generator loop infinitely
"""
def __init__(self, *args, **kwargs):
super(BatchedBioIntervalSequenceGenerator, self).__init__(*args, **kwargs)
self.step = 0
def __getitem__(self, item):
x = list()
y = list()
if self.step >= self.total_batches:
#print(self.step)
if self.shuffle: self._shuffle()
self.step = 0
for i in range(self.step*self.batch_size, (self.step+1)*self.batch_size):
cur_x, cur_y = self._load_unshuffled(self.index[i])
x.append(cur_x)
y.append(cur_y)
x = numpy.stack(x)
y = numpy.stack(y)
self.step += 1
return x, y
def _shuffle(self):
#print("Shuffled")
self.index = self.random_state.choice(len(self.examples),
len(self.examples),
replace=False)
def on_epoch_end(self):
pass
class Selector:
"""A helper class for making x/y selector easier for different hdf5 layouts
Parameters
----------
label : str
key label to get to array in hdf5 store
index : tuple
array index for specific data
Notes
-----
We will always assume that the first dimension is the sample_index dimension and thus will be preserved
for batch extraction
"""
def __init__(self, label, index=None):
self.label = label
self.index = index
def __call__(self, store, samp_idx=None):
if self.index is None:
return store[self.label] if samp_idx is None else store[self.label][samp_idx]
else:
return store[self.label][:, self.index] if samp_idx is None else store[self.label][samp_idx][:, self.index]
class BatchedHDF5Generator(tf.keras.utils.Sequence):
def __init__(self, hdf5_fp, batch_size, shuffle=True, in_memory=False, seed=None, x_selector=None, y_selector=None):
super(BatchedHDF5Generator, self).__init__()
self.x_selector = x_selector or Selector('x')
self.y_selector = y_selector or Selector('y')
self.in_memory = in_memory
if self.in_memory is True:
with h5py.File(hdf5_fp, "r") as f:
self.h5py_store = (self.x_selector(f)[:], self.y_selector(f)[:])
else:
self.hdf5_store = h5py.File(hdf5_fp, "r")
self.batch_size = batch_size
self.shuffle = shuffle
self.seed = seed
self.random_state = numpy.random.RandomState(seed=self.seed)
self.n_total_samp = (self.x_selector(self.hdf5_store).shape[0] // self.batch_size)*self.batch_size
#self.n_total_samp = self.hdf5_store['x'].shape[0]
self.n_total_batch = len(self)
# Method 2: only shuffle batch order, not batch composition, allowing chunk storage in hdf5
self.index = numpy.arange(self.n_total_samp).reshape((self.n_total_batch, -1))
# Method 1: column-wise shuffle
#self.index = numpy.arange(self.n_total_samp).reshape((-1, self.n_total_batch)).T
if self.shuffle is True:
self._shuffle()
self.step = 0
def __len__(self):
return int(numpy.ceil(self.n_total_samp / self.batch_size))
def __getitem__(self, index):
if self.step >= self.n_total_batch:
if self.shuffle: self._shuffle()
self.step = 0
samp_idx = self.index[self.step].tolist()
x_batch = self.x_selector(self.hdf5_store, samp_idx)
y_batch = self.y_selector(self.hdf5_store, samp_idx)
self.step += 1
return x_batch, y_batch
def _shuffle(self):
# for method 1
#_ = numpy.apply_along_axis(
# self.random_state.shuffle,
# 0,
# self.index)
_ = self.random_state.shuffle(self.index)
def close(self):
if self.in_memory is False:
self.hdf5_store.close()
| 3
| 3
|
tests/test_io.py
|
A-Breeze/premierconverter
| 0
|
12783242
|
<reponame>A-Breeze/premierconverter<filename>tests/test_io.py
"""Unit tests on input/output file functions"""
# pylint: disable=assignment-from-none
#########
# Setup #
#########
# Import external modules
import pytest
# Import project modules
import premierconverter as PCon
##########################
# in_filepath validation #
##########################
def test_io00_in_filepath_does_not_exist(tmp_dir_path):
"""Check an error is raised if there is no file at the in_filepath"""
# Given: Input file does not exist
in_filepath = tmp_dir_path / 'foo.csv'
# When: Attempt to validate the filepath
# Then: Error thrown
with pytest.raises(FileNotFoundError) as err:
PCon.validate_input_options(in_filepath)
assert err is not None # An error was thrown...
assert isinstance(err.value, FileNotFoundError) # ...of this specific type
assert ( # The error message is helpful...
'There is no file at the input location' in str(err.value)
)
assert str(in_filepath.absolute()) in str(err.value) # ...and contains the filepath
print("Correct: Helpful error message was thrown")
def test_io01_in_filepath_unrecognised_extension(tmp_dir_path):
"""Check an error is raised if there is no file at the in_filepath"""
# Given: Input file exists but does not have a recognised extension
in_filepath = tmp_dir_path / 'foo.foo'
in_filepath.write_text("Some text")
assert in_filepath.is_file()
# When: Attempt to validate the filepath
# Then: Warning is thrown (not an exception)
with pytest.warns(UserWarning) as wrns:
PCon.validate_input_options(in_filepath)
assert len(wrns) == 1 # Exactly 1 warning message was thrown
assert (
f"file extension '{in_filepath.suffix}' "
"is not one of the recognised file extensions"
in wrns[0].message.args[0]
)
print("Correct: Helpful warning was thrown")
@pytest.mark.parametrize("filename", ['foo.csv', 'fo o', 'foo.CsV', '01_f.tXt'])
def test_io02_in_filepath_valid(tmp_dir_path, filename):
"""Check the function correctly accepts a valid in_filepath"""
# Given: Input file exists and has a recognised extension
in_filepath = tmp_dir_path / filename
in_filepath.write_text("Some text")
assert in_filepath.is_file()
# When: Attempt to validate the filepath
# Then: No warnings or errors are thrown
rtn_val = 1
with pytest.warns(None) as wrns:
rtn_val = PCon.validate_input_options(in_filepath)
assert len(wrns) == 0 # No warnings are produced
assert rtn_val is None # Validation function completed
print("Correct: `in_filepath` was validated")
##########################
# out_filepath validation #
##########################
@pytest.mark.parametrize("filename", ['foo.csv', 'fo o', 'foo.CsV', '01_f.tXt'])
def test_io10_out_filepath_already_exists(tmp_dir_path, filename):
"""
Check an error is raised if there is already a file at out_filepath...
...but you can stop this error by explicitly passing force_overwrite.
"""
# Given: Output file already exists
out_filepath = tmp_dir_path / filename
out_filepath.write_text("Some text")
assert out_filepath.is_file()
# When: Attempt to validate the filepath (and no force_overwrite passed)
# Then: Error thrown
with pytest.raises(FileExistsError) as err:
PCon.validate_output_options(out_filepath)
assert err is not None # An error was thrown...
assert isinstance(err.value, FileExistsError) # ...of this specific type
assert ( # The error message contains is helpful...
'File already exists at the output location' in str(err.value)
)
assert str(out_filepath.absolute()) in str(err.value) # ...and contains the filepath
assert 'If you want to overwrite it, re-run with `force_overwrite = True`' in str(err.value)
print("Correct: Helpful error message was thrown")
# When: Attempt to validate the filepath with force_overwrite
# Then: No warnings or errors are thrown
rtn_val = 1
with pytest.warns(None) as wrns:
rtn_val = PCon.validate_output_options(out_filepath, force_overwrite=True)
assert len(wrns) == 0 # No warnings are produced
assert rtn_val is None # Validation function completed
print("Correct: `out_filepath` was validated")
def test_io11_out_filepath_no_folder(tmp_dir_path):
"""Check an error is thrown if the folder of out_filepath does not exist"""
# Given: Output file location is in a folder that does not exist
# (so certainly the output file does not exist)
out_dir = tmp_dir_path / 'another folder'
out_filepath = out_dir / 'foo.csv'
# When: Attempt to validate the filepath
# Then: Error thrown
with pytest.raises(FileNotFoundError) as err:
PCon.validate_output_options(out_filepath)
assert err is not None # An error was thrown...
assert isinstance(err.value, FileNotFoundError) # ...of this specific type
assert ( # The error message contains is helpful...
'The folder of the output file does not exist' in str(err.value)
)
assert str(out_filepath.parent.absolute()) in str(err.value) # ...and contains the filepath
print("Correct: Helpful error message was thrown")
@pytest.mark.parametrize("filename", ['foo.xlsx', 'fo .o', 'foo.gzip', '01_f.zip'])
def test_io12_out_filepath_unrecognised_extension(tmp_dir_path, filename):
"""Check an error is thrown if the folder of out_filepath does not exist"""
# Given: Output file deos not have a recognised extension
out_filepath = tmp_dir_path / filename
# When: Attempt to validate the filepath
# Then: Warning is thrown (not an exception)
with pytest.warns(UserWarning) as wrns:
PCon.validate_output_options(out_filepath)
assert len(wrns) == 1 # Exactly 1 warning message was thrown
assert (
f"file extension '{out_filepath.suffix}' "
"is not one of the recognised file extensions"
in wrns[0].message.args[0]
)
print("Correct: Helpful warning was thrown")
@pytest.mark.parametrize("filename", ['foo.csv', 'fo o', 'foo.CsV', '01_f.tXt'])
def test_io13_out_filepath_valid(tmp_dir_path, filename):
"""Check the function correctly accepts a valid in_filepath"""
# Given: Output file has a recognised extension and does not exist
out_filepath = tmp_dir_path / filename
# When: Attempt to validate the filepath
# Then: No warnings or errors are thrown
rtn_val = 1
with pytest.warns(None) as wrns:
rtn_val = PCon.validate_output_options(out_filepath)
assert len(wrns) == 0 # No warnings are produced
assert rtn_val is None # Validation function completed
print("Correct: `out_filepath` was validated")
| 2.46875
| 2
|
packages/syft/src/syft/proto/core/auth/signed_message_pb2.py
|
vishalbelsare/PySyft
| 8,428
|
12783243
|
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: proto/core/auth/signed_message.proto
"""Generated protocol buffer code."""
# third party
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
# third party
from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2
# syft absolute
from syft.proto.core.common import (
common_object_pb2 as proto_dot_core_dot_common_dot_common__object__pb2,
)
DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(
b'\n$proto/core/auth/signed_message.proto\x12\x0esyft.core.auth\x1a%proto/core/common/common_object.proto\x1a\x1bgoogle/protobuf/empty.proto"\x80\x01\n\rSignedMessage\x12%\n\x06msg_id\x18\x01 \x01(\x0b\x32\x15.syft.core.common.UID\x12\x10\n\x08obj_type\x18\x02 \x01(\t\x12\x11\n\tsignature\x18\x03 \x01(\x0c\x12\x12\n\nverify_key\x18\x04 \x01(\x0c\x12\x0f\n\x07message\x18\x05 \x01(\x0c"\x1f\n\tVerifyKey\x12\x12\n\nverify_key\x18\x01 \x01(\x0c"0\n\tVerifyAll\x12#\n\x03\x61ll\x18\x01 \x01(\x0b\x32\x16.google.protobuf.Emptyb\x06proto3'
)
_SIGNEDMESSAGE = DESCRIPTOR.message_types_by_name["SignedMessage"]
_VERIFYKEY = DESCRIPTOR.message_types_by_name["VerifyKey"]
_VERIFYALL = DESCRIPTOR.message_types_by_name["VerifyAll"]
SignedMessage = _reflection.GeneratedProtocolMessageType(
"SignedMessage",
(_message.Message,),
{
"DESCRIPTOR": _SIGNEDMESSAGE,
"__module__": "proto.core.auth.signed_message_pb2"
# @@protoc_insertion_point(class_scope:syft.core.auth.SignedMessage)
},
)
_sym_db.RegisterMessage(SignedMessage)
VerifyKey = _reflection.GeneratedProtocolMessageType(
"VerifyKey",
(_message.Message,),
{
"DESCRIPTOR": _VERIFYKEY,
"__module__": "proto.core.auth.signed_message_pb2"
# @@protoc_insertion_point(class_scope:syft.core.auth.VerifyKey)
},
)
_sym_db.RegisterMessage(VerifyKey)
VerifyAll = _reflection.GeneratedProtocolMessageType(
"VerifyAll",
(_message.Message,),
{
"DESCRIPTOR": _VERIFYALL,
"__module__": "proto.core.auth.signed_message_pb2"
# @@protoc_insertion_point(class_scope:syft.core.auth.VerifyAll)
},
)
_sym_db.RegisterMessage(VerifyAll)
if _descriptor._USE_C_DESCRIPTORS == False:
DESCRIPTOR._options = None
_SIGNEDMESSAGE._serialized_start = 125
_SIGNEDMESSAGE._serialized_end = 253
_VERIFYKEY._serialized_start = 255
_VERIFYKEY._serialized_end = 286
_VERIFYALL._serialized_start = 288
_VERIFYALL._serialized_end = 336
# @@protoc_insertion_point(module_scope)
| 1.109375
| 1
|
tests/aqmesh/test_scraper_aq.py
|
openghg/gather
| 0
|
12783244
|
from pathlib import Path
def test_scrape(aqmesh_scraper_setup, tmpdir):
data = Path(aqmesh_scraper_setup["co2"]["data"])
metadata = Path(aqmesh_scraper_setup["co2"]["metadata"])
assert data.exists()
assert metadata.exists()
assert data.name == "20210515_20211024_CO2_AQMesh_Scaled_Dataset_PPM.csv"
assert metadata.name == "20210515_20211024_CO2_pod_metadata.csv"
| 2.328125
| 2
|
pyDrivers/rgb.py
|
steelee/minnow_max_maker
| 4
|
12783245
|
<gh_stars>1-10
import mraa
import time
from led import led
class rgb(object):
def __init__(self, red_pin, green_pin, blue_pin, red_low=False, green_low=False, blue_low=False):
self._r = led(red_pin, red_low)
self._g = led(green_pin, green_low)
self._b = led(blue_pin, blue_low)
def off(self):
self._r.off()
self._b.off()
self._g.off()
def on(self):
self._r.on()
self._g.on()
self._b.on()
def red(self, s=0, blend=False):
if not blend:
self.off()
self._r.on(s)
def green(self, s=0, blend=False):
if not blend:
self.off()
self._g.on(s)
def blue(self, s=0, blend=False):
if not blend:
self.off()
self._b.on(s)
| 3.03125
| 3
|
test/test_char_lstm.py
|
epwalsh/pytorch-crf
| 69
|
12783246
|
"""Tests for CharLSTM class."""
def test_forward(char_lstm, vocab_dataset):
"""Test `CharLSTM.forward()` method."""
dataset = vocab_dataset[1]
for src, tgt in dataset:
res = char_lstm(*src[:-2])
n_words, dim = res.size()
assert n_words == tgt.size()[0]
assert dim == char_lstm.output_size
| 3.109375
| 3
|
Models/regressionTemplateTF/model.py
|
UTS-AnimalLogicAcademy/nuke-ML-server
| 123
|
12783247
|
# Copyright (c) 2020 Foundry.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
from __future__ import print_function
import sys
import os
import time
import scipy.misc
import numpy as np
import cv2
import tensorflow as tf
tf.compat.v1.disable_eager_execution() # For TF 2.x compatibility
from models.baseModel import BaseModel
from models.common.model_builder import baseline_model
from models.common.util import print_, get_ckpt_list, linear_to_srgb, srgb_to_linear
import message_pb2
class Model(BaseModel):
"""Load your trained model and do inference in Nuke"""
def __init__(self):
super(Model, self).__init__()
self.name = 'Regression Template TF'
self.n_levels = 3
self.scale = 0.5
dir_path = os.path.dirname(os.path.realpath(__file__))
self.checkpoints_dir = os.path.join(dir_path, 'checkpoints')
self.patch_size = 50
self.output_param_number = 1
# Initialise checkpoint name to the latest checkpoint
ckpt_names = get_ckpt_list(self.checkpoints_dir)
if not ckpt_names: # empty list
self.checkpoint_name = ''
else:
latest_ckpt = tf.compat.v1.train.latest_checkpoint(self.checkpoints_dir)
if latest_ckpt is not None:
self.checkpoint_name = latest_ckpt.split('/')[-1]
else:
self.checkpoint_name = ckpt_names[-1]
self.prev_ckpt_name = self.checkpoint_name
# Silence TF log when creating tf.Session()
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
# Define options
self.gamma_to_predict = 1.0
self.predict = False
self.options = ('checkpoint_name', 'gamma_to_predict',)
self.buttons = ('predict',)
# Define inputs/outputs
self.inputs = {'input': 3}
self.outputs = {'output': 3}
def load(self, model):
# Check if empty or invalid checkpoint name
if self.checkpoint_name=='':
ckpt_names = get_ckpt_list(self.checkpoints_dir)
if not ckpt_names:
raise ValueError("No checkpoints found in {}".format(self.checkpoints_dir))
else:
raise ValueError("Empty checkpoint name, try an available checkpoint in {} (ex: {})"
.format(self.checkpoints_dir, ckpt_names[-1]))
print_("Loading trained model checkpoint...\n", 'm')
# Load from given checkpoint file name
self.saver.restore(self.sess, os.path.join(self.checkpoints_dir, self.checkpoint_name))
print_("...Checkpoint {} loaded\n".format(self.checkpoint_name), 'm')
def inference(self, image_list):
"""Do an inference on the model with a set of inputs.
# Arguments:
image_list: The input image list
Return the result of the inference.
"""
image = image_list[0]
image = linear_to_srgb(image).copy()
if not hasattr(self, 'sess'):
# Initialise tensorflow graph
tf.compat.v1.reset_default_graph()
config = tf.compat.v1.ConfigProto()
config.gpu_options.allow_growth=True
self.sess=tf.compat.v1.Session(config=config)
# Input is stacked histograms of original and gamma-graded images.
input_shape = [1, 2, 100]
# Initialise input placeholder size
self.input = tf.compat.v1.placeholder(tf.float32, shape=input_shape)
self.model = baseline_model(
input_shape=input_shape[1:],
output_param_number=self.output_param_number)
self.infer_op = self.model(self.input)
# Load latest model checkpoint
self.saver = tf.compat.v1.train.Saver()
self.load(self.model)
self.prev_ckpt_name = self.checkpoint_name
# If checkpoint name has changed, load new checkpoint
if self.prev_ckpt_name != self.checkpoint_name or self.checkpoint_name == '':
self.load(self.model)
# If checkpoint correctly loaded, update previous checkpoint name
self.prev_ckpt_name = self.checkpoint_name
# Preprocess image same way we preprocessed it for training
# Here for gamma correction compute histograms
def histogram(x, value_range=[0.0, 1.0], nbins=100):
"""Return histogram of tensor x"""
h, w, c = x.shape
hist = tf.histogram_fixed_width(x, value_range, nbins=nbins)
hist = tf.divide(hist, h * w * c)
return hist
with tf.compat.v1.Session() as sess:
# Convert to grayscale
img_gray = tf.image.rgb_to_grayscale(image)
img_gray = tf.image.resize(img_gray, [self.patch_size, self.patch_size])
# Apply gamma correction
img_gray_grade = tf.math.pow(img_gray, self.gamma_to_predict)
img_grade = tf.math.pow(image, self.gamma_to_predict)
# Compute histograms
img_hist = histogram(img_gray)
img_grade_hist = histogram(img_gray_grade)
hists_op = tf.stack([img_hist, img_grade_hist], axis=0)
hists, img_grade = sess.run([hists_op, img_grade])
res_img = srgb_to_linear(img_grade)
hists_batch = np.expand_dims(hists, 0)
start = time.time()
# Run model inference
inference = self.sess.run(self.infer_op, feed_dict={self.input: hists_batch})
duration = time.time() - start
print('Inference duration: {:4.3f}s'.format(duration))
res = inference[-1]
print("Predicted gamma: {}".format(res))
# If predict button is pressed in Nuke
if self.predict:
script_msg = message_pb2.FieldValuePairAttrib()
script_msg.name = "PythonScript"
# Create a Python script message to run in Nuke
python_script = self.nuke_script(res)
script_msg_val = script_msg.values.add()
script_msg_str = script_msg_val.string_attributes.add()
script_msg_str.values.extend([python_script])
return [res_img, script_msg]
return [res_img]
def nuke_script(self, res):
"""Return the Python script function to create a pop up window in Nuke."""
popup_msg = "Predicted gamma: {}".format(res)
script = "nuke.message('{}')\n".format(popup_msg)
return script
| 1.828125
| 2
|
utils/input_output/io.py
|
Tudor67/Object-Counting
| 7
|
12783248
|
import json
import numpy as np
import os
import skimage
def save_np_arrays(images, img_names, save_path):
for img, img_name in zip(images, img_names):
np.save(f'{save_path}/{img_name}', img)
def load_np_arrays(path, num=None):
images = []
img_names = sorted(os.listdir(path))
if num is None:
num = len(img_names)
for idx in range(num):
img_name = img_names[idx]
img = np.load(f'{path}/{img_name}')
images.append(img)
return np.array(images)
def load_images(path, img_names, num_images=None):
images = []
if num_images is None:
num_images = len(img_names)
for idx in range(num_images):
img_name = img_names[idx]
img_path = f'{path}/{img_name}'
img = skimage.io.imread(img_path) / 255.
images.append(img)
return images
def load_images_and_density_maps(path, num_images):
img_names = sorted(os.listdir(f'{path}/images'))[:num_images]
density_map_names = sorted(os.listdir(f'{path}/gt_density_maps'))[:num_images]
images = []
density_maps = []
for img_name, density_map_name in zip(img_names, density_map_names):
img = skimage.io.imread(f'{path}/images/{img_name}') / 255.
density_map = np.load(f'{path}/gt_density_maps/{density_map_name}')
images.append(img)
density_maps.append(density_map)
return images, density_maps
def save_gt_counts(counts, img_names, save_path):
for img_name, count in zip(img_names, counts):
txt_name = f'{img_name.split(".")[0]}.txt'
txt_path = f'{save_path}/{txt_name}'
with open(txt_path, 'w') as fo:
fo.write(str(int(count)))
def load_gt_counts(counts_path):
txt_names = sorted(os.listdir(counts_path))
counts = np.empty(len(txt_names), dtype=np.int)
for i, txt_name in enumerate(txt_names):
txt_path = f'{counts_path}/{txt_name}'
with open(txt_path, 'r') as fi:
counts[i] = int(fi.read().split()[0])
return counts
def read_json(filename):
with open(filename, 'r') as fi:
data = json.load(fi)
return data
def write_json(data, filename):
dirname = os.path.dirname(filename)
if not os.path.isdir(dirname):
os.makedirs(dirname)
with open(filename, 'w') as fo:
json.dump(data, fo)
| 2.640625
| 3
|
ckanext/tnext/dbsuggest.py
|
WilJoey/ckanext-tnext
| 0
|
12783249
|
<gh_stars>0
# -*- coding: utf-8 -*-
import constants
import sqlalchemy as sa
import uuid
import logging
from sqlalchemy import func, ForeignKey
from sqlalchemy.orm import relationship, backref
Suggest = None
Comment = None
log = logging.getLogger(__name__)
def uuid4():
return str(uuid.uuid4())
def init_db(model):
global Suggest
global Comment
if Suggest is None:
class _Suggest(model.DomainObject):
@classmethod
def get(cls, **kw):
'''Finds all the instances required.'''
query = model.Session.query(cls).autoflush(False)
return query.filter_by(**kw).all()
@classmethod
def views_plus(cls, id):
model.Session.execute("UPDATE suggests SET views=views+1 WHERE id=:id", {'id': id})
model.Session.commit()
return False
@classmethod
def suggest_exists(cls, title):
'''Returns true if there is a Data Request with the same title (case insensitive)'''
query = model.Session.query(cls).autoflush(False)
return query.filter(func.lower(cls.title) == func.lower(title)).first() is not None
@classmethod
def get_ordered_by_date(cls, **kw):
sql = "SELECT id, user_id, title, open_time, views, (select count(*) from suggests_comments where suggest_id = id) as comments FROM suggests WHERE closed=False ORDER BY open_time DESC"
#query = model.Session.query(cls).autoflush(False)
query = model.Session.query(cls).autoflush(False)
return query.filter_by(**kw).order_by(cls.open_time.desc()).all()
# @classmethod
# def query_by_sql(cls, **kw):
# sql = "SELECT id, user_id, title, open_time, views, (select count(*) from suggests_comments where suggest_id = id) as comments FROM suggests WHERE closed=False ORDER BY open_time DESC"
# return None
Suggest = _Suggest
# FIXME: References to the other tables...
suggests_table = sa.Table('suggests', model.meta.metadata,
sa.Column('user_id', sa.types.UnicodeText, primary_key=False, default=u''),
sa.Column('id', sa.types.UnicodeText, primary_key=True, default=uuid4),
sa.Column('title', sa.types.UnicodeText(constants.NAME_MAX_LENGTH), primary_key=True, default=u''),
sa.Column('description', sa.types.UnicodeText(constants.DESCRIPTION_MAX_LENGTH), primary_key=False, default=u''),
sa.Column('dataset_name', sa.types.UnicodeText(constants.DATASET_NAME_MAX_LENGTH), primary_key=False, default=u''),
sa.Column('suggest_columns', sa.types.UnicodeText(constants.SUGGEST_COLUMNS_MAX_LENGTH), primary_key=False, default=u''),
sa.Column('open_time', sa.types.DateTime, primary_key=False, default=None),
sa.Column('views', sa.types.Integer, primary_key=False, default=0),
sa.Column('close_time', sa.types.DateTime, primary_key=False, default=None),
sa.Column('closed', sa.types.Boolean, primary_key=False, default=False),
sa.Column('org_id', sa.types.UnicodeText, primary_key=False, default=False),
sa.Column('send_mail', sa.types.Integer, primary_key=False, default=0),
sa.Column('mail_time', sa.types.DateTime, primary_key=False, default=None),
sa.Column('email', sa.types.UnicodeText, primary_key=False, default=u'')
)
#suggests_table.comments = relationship('suggests_comments', backref='suggests')
# Create the table only if it does not exist
suggests_table.create(checkfirst=True)
model.meta.mapper(Suggest, suggests_table,)
if Comment is None:
class _Comment(model.DomainObject):
@classmethod
def get(cls, **kw):
'''Finds all the instances required.'''
query = model.Session.query(cls).autoflush(False)
return query.filter_by(**kw).all()
@classmethod
def get_ordered_by_date(cls, **kw):
'''Personalized query'''
query = model.Session.query(cls).autoflush(False)
return query.filter_by(**kw).order_by(cls.time.desc()).all()
@classmethod
def get_count_by_suggest(cls, **kw):
query = model.Session.query(cls).autoflush(False)
return query.filter_by(**kw).count()
Comment = _Comment
# FIXME: References to the other tables...
comments_table = sa.Table('suggests_comments', model.meta.metadata,
sa.Column('id', sa.types.UnicodeText, primary_key=True, default=uuid4),
sa.Column('user_id', sa.types.UnicodeText, primary_key=False, default=u''),
sa.Column('suggest_id', sa.types.UnicodeText, primary_key=True, default=uuid4),
sa.Column('time', sa.types.DateTime, primary_key=True, default=u''),
sa.Column('comment', sa.types.UnicodeText(constants.COMMENT_MAX_LENGTH), primary_key=False, default=u'')
)
# Create the table only if it does not exist
comments_table.create(checkfirst=True)
model.meta.mapper(Comment, comments_table,)
| 2.328125
| 2
|
stage3b.py
|
TG-Techie/TG-Gui-LiveStream-Code-2021Jan31
| 1
|
12783250
|
# The MIT License (MIT)
#
# Copyright (c) 2021 <NAME> (TG-Techie)
#
# See the file in the root directory of this project for the full licsense text
from tg_gui_std.all import *
import tg_gui_pyportal as setup
@setup.appwrapper
class Application(Layout):
some_data = State(0.5)
# now let's make the label show the value of the slider
our_label = Label(
text=DerivedState(some_data, lambda d: f"value: {round(d*100, 2)}")
)
our_slider = Slider(value=some_data)
def _any_(self):
our_label = self.our_label(top, (self.width, self.height // 2))
our_slider = self.our_slider(bottom, (9 * self.width // 10, self.height // 2))
setup.run_app_loop()
| 2.65625
| 3
|