max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
|---|---|---|---|---|---|---|
tests/simulators/test_tcm.py
|
jason-neal/companion_simulations
| 1
|
12783351
|
import os
import pytest
import simulators
from simulators.tcm_module import (tcm_helper_function, setup_tcm_dirs)
from simulators.tcm_script import parse_args
@pytest.mark.parametrize("star, obs, chip", [
("HD30501", 1, 1),
("HD4747", "a", 4)])
def test_tcm_helper_function(sim_config, star, obs, chip):
simulators = sim_config
obs_name, params, output_prefix = tcm_helper_function(star, obs, chip)
assert isinstance(obs_name, str)
assert isinstance(output_prefix, str)
assert simulators.paths["spectra"] in obs_name
assert "-mixavg-tellcorr_" in obs_name
assert str(star) in obs_name
assert str(obs) in obs_name
assert str(chip) in obs_name
assert os.path.join(star, "tcm", star) in output_prefix
assert "tcm_chisqr_results" in output_prefix
assert params["name"] == star.lower()
def test_setup_tcm_dirs_creates_dirs(sim_config, tmpdir):
simulators = sim_config
simulators.paths["output_dir"] = str(tmpdir)
star = "TestStar"
assert not tmpdir.join(star.upper()).check()
assert not tmpdir.join(star.upper(), "tcm", "plots").check()
result = setup_tcm_dirs(star)
assert tmpdir.join(star.upper()).check(dir=True)
assert tmpdir.join(star.upper(), "tcm", "plots").check(dir=True)
assert result is None
def test_tcm_script_parser():
parsed = parse_args([])
assert parsed.chip is None
assert parsed.error_off is False
assert parsed.disable_wav_scale is False
def test_tcm_script_parser_toggle():
args = ["--chip", "2", "--error_off", "--disable_wav_scale"]
parsed = parse_args(args)
assert parsed.chip is "2"
assert parsed.error_off is True
assert parsed.disable_wav_scale is True
| 2.25
| 2
|
src/orders/migrations/0007_OrderPromoCodes.py
|
denkasyanov/education-backend
| 151
|
12783352
|
<gh_stars>100-1000
# Generated by Django 2.2.13 on 2020-09-30 13:14
import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('orders', '0006_PromoCodeComments'),
]
operations = [
migrations.AddField(
model_name='order',
name='promocode',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, to='orders.PromoCode'),
),
]
| 1.515625
| 2
|
yah/types/responses.py
|
sunsx0/yah
| 0
|
12783353
|
<reponame>sunsx0/yah
import typing
import dataclasses as dc
from .household import Household
@dc.dataclass
class ResponseBase:
status: str
request_id: str
@dc.dataclass
class HouseholdsResponse(ResponseBase):
households: typing.List[Household] = dc.field(default_factory=list)
| 2.4375
| 2
|
fiasco_api/tags/urls.py
|
xelnod/fiasco_backend
| 0
|
12783354
|
<filename>fiasco_api/tags/urls.py
from django.urls import path
from .views import TagListCreateView, TagRetrieveUpdateDestroyView
urlpatterns = [
path('', TagListCreateView.as_view()),
path('<int:pk>/', TagRetrieveUpdateDestroyView.as_view()),
]
| 1.640625
| 2
|
devscripts/make_supportedsites.py
|
olipfei/yt-dlp
| 11
|
12783355
|
<filename>devscripts/make_supportedsites.py
#!/usr/bin/env python3
import optparse
import os
import sys
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from yt_dlp.extractor import list_extractor_classes
def main():
parser = optparse.OptionParser(usage='%prog OUTFILE.md')
_, args = parser.parse_args()
if len(args) != 1:
parser.error('Expected an output filename')
out = '\n'.join(ie.description() for ie in list_extractor_classes() if ie.IE_DESC is not False)
with open(args[0], 'w', encoding='utf-8') as outf:
outf.write(f'# Supported sites\n{out}\n')
if __name__ == '__main__':
main()
| 2.484375
| 2
|
src/diary/api/models.py
|
hoest/online-dagboek
| 1
|
12783356
|
import datetime
import diary
import itsdangerous
ROLE_USER = 0
ROLE_ADMIN = 1
"""
User-diary many-to-many relationship
"""
dairy_user_table = diary.db.Table(
"dairy_user",
diary.db.Model.metadata,
diary.db.Column("diary_id",
diary.db.Integer,
diary.db.ForeignKey("diary.id")),
diary.db.Column("user_id",
diary.db.Integer,
diary.db.ForeignKey("user.id")))
class User(diary.db.Model):
"""
The User object
"""
__tablename__ = "user"
id = diary.db.Column(diary.db.Integer, primary_key=True)
firstname = diary.db.Column(diary.db.Unicode(256), nullable=False)
lastname = diary.db.Column(diary.db.Unicode(256), nullable=False, index=True)
emailaddress = diary.db.Column(diary.db.Unicode(1024), nullable=False, index=True, unique=True)
facebook_id = diary.db.Column(diary.db.Unicode, nullable=True)
role = diary.db.Column(diary.db.SmallInteger, default=ROLE_USER)
active = diary.db.Column(diary.db.Boolean, default=True)
created = diary.db.Column(diary.db.DateTime, default=datetime.datetime.utcnow)
# relations
diaries = diary.db.relationship("Diary",
secondary=dairy_user_table,
lazy="dynamic",
backref="users")
tokens = diary.db.relationship("Auth", lazy="dynamic")
posts = diary.db.relationship("Post", lazy="dynamic")
def has_access(self, diary_id):
return len(self.diaries.filter(Diary.id == diary_id).all()) is 1
def generate_auth_token(self, expiration=600):
sec_key = diary.app.config["SECRET_KEY"]
s = itsdangerous.TimedJSONWebSignatureSerializer(sec_key, expires_in=expiration)
user_data = {
"user_id": self.id,
"user_facebook_id": self.facebook_id,
"user_email": self.emailaddress,
}
return unicode(s.dumps(user_data), "utf-8")
@staticmethod
def verify_auth_token(token):
sec_key = diary.app.config["SECRET_KEY"]
s = itsdangerous.TimedJSONWebSignatureSerializer(sec_key)
try:
data = s.loads(token)
except itsdangerous.SignatureExpired:
return None # valid token, but expired
except itsdangerous.BadSignature:
return None # invalid token
user = User.query.get(data["user_id"])
return user if data["user_facebook_id"] == user.facebook_id else None
class Auth(diary.db.Model):
"""
Auth tokens
"""
__tablename__ = "auth_token"
id = diary.db.Column(diary.db.Integer, primary_key=True)
owner_id = diary.db.Column(diary.db.Integer, diary.db.ForeignKey("user.id"))
facebook_token = diary.db.Column(diary.db.Unicode, nullable=False)
token = diary.db.Column(diary.db.Unicode, nullable=False)
modified = diary.db.Column(diary.db.DateTime, default=datetime.datetime.utcnow)
class Diary(diary.db.Model):
"""
The Diary object
"""
__tablename__ = "diary"
id = diary.db.Column(diary.db.Integer, primary_key=True)
owner_id = diary.db.Column(diary.db.Integer, diary.db.ForeignKey("user.id"))
title = diary.db.Column(diary.db.Unicode(1024), nullable=False, index=True)
created = diary.db.Column(diary.db.DateTime, default=datetime.datetime.utcnow)
# relations
posts = diary.db.relationship("Post", lazy="dynamic")
def sorted_posts(self, page):
return self.posts.order_by(Post.date.desc(), Post.id.desc()).paginate(page, 10, False).items
def to_dict(self):
return {
"id": self.id,
"owner_id": self.owner_id,
"title": self.title,
"created": self.created,
}
class Post(diary.db.Model):
"""
The Post object
"""
__tablename__ = "post"
id = diary.db.Column(diary.db.Integer, primary_key=True)
user_id = diary.db.Column(diary.db.Integer, diary.db.ForeignKey("user.id"))
diary_id = diary.db.Column(diary.db.Integer, diary.db.ForeignKey("diary.id"))
title = diary.db.Column(diary.db.Unicode(1024), nullable=False, index=True)
body = diary.db.Column(diary.db.Text, nullable=False)
date = diary.db.Column(diary.db.Date, default=datetime.datetime.utcnow)
created = diary.db.Column(diary.db.DateTime, default=datetime.datetime.utcnow)
modified = diary.db.Column(diary.db.DateTime, default=datetime.datetime.utcnow)
# relations
pictures = diary.db.relationship("Picture", lazy="dynamic")
def to_dict(self):
pics = []
for pic in self.pictures.all():
pics.append(pic.to_dict())
return {
"id": self.id,
"user_id": self.user_id,
"diary_id": self.diary_id,
"title": self.title,
"body": self.body,
"date": self.date.isoformat(),
"created": self.created.isoformat(),
"modified": self.modified.isoformat(),
"pictures": pics,
}
class Picture(diary.db.Model):
"""
The Picture object
"""
__tablename__ = "picture"
id = diary.db.Column(diary.db.Integer, primary_key=True)
post_id = diary.db.Column(diary.db.Integer, diary.db.ForeignKey("post.id"))
title = diary.db.Column(diary.db.Unicode(1024), nullable=False, index=True)
file_url = diary.db.Column(diary.db.Unicode(1024), nullable=False)
thumb_url = diary.db.Column(diary.db.Unicode(1024), nullable=True)
def to_dict(self):
return {
"id": self.id,
"title": self.title,
"file_url": self.file_url,
"thumb_url": self.thumb_url,
}
| 2.546875
| 3
|
eng_to_kana_test/test_morae_kana_converter.py
|
yokolet/transcript
| 2
|
12783357
|
import os
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
import unittest
from eng_to_kana.morae_kana_converter import MoraeKanaConverter
class TestMoraeKanaConverter(unittest.TestCase):
def setUp(self):
self.func = MoraeKanaConverter().convertMorae
def test_1(self):
expected_pairs = {
'fa.za.a': 'ファザー',
'a.a.mu': 'アーム',
'ko.ma.N.da.a': 'コマンダー',
'shi.i': 'シー',
'pi.g.gu': 'ピッグ',
'be.d.do': 'ベッド',
'ba.a.do': 'バード',
'ha.N.ba.a.ga.a': 'ハンバーガー',
'kya.t.to': 'キャット',
'gya.N.bu.ru': 'ギャンブル',
'gya.ru': 'ギャル',
'ka.N.to.ri.i': 'カントリー',
'mo.N.ki.i': 'モンキー',
'fu.ro.N.to': 'フロント',
'ro.N.do.N': 'ロンドン',
'bo.k.ku.su': 'ボックス',
'su.to.ro': 'ストロ',
'po.o.to': 'ポート',
'bu.k.ku': 'ブック',
'ba.ru.u.N': 'バルーン',
'a.ba.u.to': 'アバウト',
'pa.i.ro.t.to': 'パイロット',
'wi.na.a': 'ウィナー',
'ma.ma': 'ママ',
'pu.u.ma': 'プーマ',
'de.i': 'デイ',
'de.i.bi.d.do': 'デイビッド',
'ma.i': 'マイ',
'bo.i': 'ボイ',
'to.i': 'トイ',
'fo.o.N': 'フォーン',
'no.o': 'ノー',
'na.u': 'ナウ',
'kwi.a.a': 'クィアー',
'he.a.a': 'ヘアー',
'tu.a.a': 'ツアー',
'kyu.u.bu': 'キューブ',
'a.ma.zo.N': 'アマゾン',
'bo.k.ku.si.N.gu': 'ボックシング',
'gu.u.gu.ru': 'グーグル',
'ma.i.ku.ro.o.so.fu.to': 'マイクローソフト',
'ne.i.sho.N': 'ネイション',
'ro.o.ma': 'ローマ',
'wu.d.do': 'ウッド',
'wu.z.zu': 'ウッズ',
'si.N': 'シン',
're.fu.to': 'レフト',
'mi.ru.ku': 'ミルク',
'so.N.gu': 'ソング',
'da.a.ri.N.gu': 'ダーリング',
'i.i.su.to': 'イースト',
'i.e.su': 'イエス',
'hu.u.pu': 'フープ',
'po.p.pu': 'ポップ',
'ka.t.to': 'カット',
'pa.k.ku': 'パック',
'ki.su': 'キス',
'pa.c.chi': 'パッチ',
'me.s.shu': 'メッシュ'
}
for key, value in expected_pairs.items():
self.assertEqual(value, self.func(key))
def test_2(self):
expected_pairs = {
'wa.t.to': 'ワット',
'ho.wa.t.to': 'ホワット',
'wi.i.to': 'ウィート',
'ho.wi.i.to': 'ホウィート',
'wi.i.za.zu': 'ウィーザズ',
'ho.wi.i.za.zu': 'ホウィーザズ',
'wi.i.zi.zu': 'ウィージズ',
'we.N': 'ウェン',
'ho.we.N': 'ホウェン',
'wi.N': 'ウィン',
'ho.wi.N': 'ホウィン',
'wu.u': 'ウー',
'ho.wu.u': 'ホウー',
'hyu.u': 'ヒュー',
'wi.c.chi': 'ウィッチ',
'ho.wi.c.chi': 'ホウィッチ',
'wa.i.ti.i': 'ワイティー',
'ho.wa.i.ti.i': 'ホワイティー',
'wo.o': 'ウォー',
'ho.wo.o': 'ホウォー',
'ho.o': 'ホー',
'wa.i': 'ワイ',
'ho.wa.i': 'ホワイ',
'wi.zu': 'ウィズ',
'wi.su': 'ウィス'
}
for key, value in expected_pairs.items():
self.assertEqual(value, self.func(key))
def test_3(self):
expected_pairs = {
'a.jo.i.N': 'アジョイン',
'e.ri.i.a': 'エリーア',
'ba.N.kwe.t.to': 'バンクェット',
'kya.ri.j.ji': 'キャリッジ',
'ke.a.a.ji': 'ケアージ',
'e.j.ji': 'エッジ',
'da.i.e.t.to': 'ダイエット',
'jya.j.ji': 'ジャッジ',
'ma.jo.ri.ti.i': 'マジョリティー',
'myu.u.to': 'ミュート',
'o.ra.N.ji': 'オランジ',
'o.ri.N.ji': 'オリンジ',
'po.o.e.t.to': 'ポーエット',
'kwa.k.ku': 'クァック',
'kwe.i.ku': 'クェイク',
'kwi.i.ri.i': 'クィーリー',
'kwi.i.N': 'クィーン',
'kwa.i.e.t.to': 'クァイエット',
'kwi.p.pu': 'クィップ',
'kwo.o.ta': 'クォータ',
'kwo.o.to': 'クォート',
'so.ro.o': 'ソロー',
'wi.ji.t.to': 'ウィジット'
}
for key, value in expected_pairs.items():
self.assertEqual(value, self.func(key))
def test_4(self):
expected_pairs = {
'ko.ru': 'コル',
'di.d.do': 'ディッド',
'fi.N.ga.a': 'フィンガー',
'hi.a.a': 'ヒアー',
'a.i.do.ru': 'アイドル',
're.i.di.i': 'レイディー',
'ri.to.ru': 'リトル',
'ma.za.a': 'マザー',
'pu.re.ja.a': 'プレジャー',
'pyu.a.a': 'ピュアー',
'tu.ri.su.to': 'ツリスト',
'bi.jo.N': 'ビジョン'
}
for key, value in expected_pairs.items():
self.assertEqual(value, self.func(key))
def test_5(self):
expected_pairs = {
'a.be.i.jya.a': 'アベイジャー',
'a.kyu.a.si.i.zu': 'アキュアシーズ',
'e.i.na.a': 'エイナー',
'e.nye.i': 'エニェイ',
'e.i.ku.na.a': 'エイクナー',
'a.ki.i.ya.ma': 'アキーヤマ',
'a.re.i.yo.o': 'アレイヨー',
'a.u.ga.su.ti.ni.i.a.k.ku': 'アウガスティニーアック',
'a.bi.nyo.N': 'アビニョン',
'a.i.ya.s.shu': 'アイヤッシュ',
'bo.o.ryu': 'ボーリュ',
'bi.i.di.e.N.to': 'ビーディエント',
'ka.mo.N': 'カモン',
'cho.i.na.t.to.su.ki.i': 'チョイナットスキー',
'do.ra.i.bu': 'ドライブ',
'da.k.ku.ta.a': 'ダックター',
'fi.ri.i.a.bu': 'フィリーアブ',
'fi.ryo.o': 'フィリョー',
'fa.i.ti.k.ku': 'ファイティック',
'ho.re.i.ji': 'ホレイジ'
}
for key, value in expected_pairs.items():
self.assertEqual(value, self.func(key))
def test_6(self):
expected_pairs = {
'a.pu.ro.o.do': 'アプロード',
'a.p.pu.ru': 'アップル',
'a.p.pu.ru.bi.i': 'アップルビー',
'a.pu.re.t.to': 'アプレット',
'a.pu.ri.ke.i.sho.N': 'アプリケイション',
'a.pu.ra.i': 'アプライ',
'pa.i.na.p.pu.ru': 'パイナップル'
}
for key, value in expected_pairs.items():
self.assertEqual(value, self.func(key))
def test_7(self):
expected_pairs = {
'a.bu.ri.j.ji.do': 'アブリッジド',
'a.bu.ri.j.ji.me.N.to': 'アブリッジメント',
'a.byu.u.ra.z.zi.i': 'アビューラッジー',
'a.byu.u.su': 'アビュース',
'a.byu.u.zu': 'アビューズ',
'ko.N.fyu.u.zu': 'コンフューズ',
'fyu.u': 'フュー',
'ye.i': 'イェイ'
}
for key, value in expected_pairs.items():
self.assertEqual(value, self.func(key))
if __name__ == '__main__':
unittest.main()
| 2.46875
| 2
|
uptee/lib/templatetags/revision.py
|
teeworldsCNFun/upTee
| 2
|
12783358
|
from django import template
from django.core.cache import cache
register = template.Library()
@register.simple_tag
def revision():
rev = cache.get('current_revision')
if rev == None:
from lib.revision_hook import get_revision, set_cache
rev = get_revision()
set_cache(rev)
return "Revision: <a href=\"https://github.com/upTee/upTee/commit/{0}\">{1}</a>".format(rev, rev[:7])
| 2.0625
| 2
|
Paddle_Industry_Practice_Sample_Library/nlp_projects/nlu/bilstm_with_crf/model.py
|
linuxonly801/awesome-DeepLearning
| 1
|
12783359
|
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle
import paddle.nn as nn
import paddle_crf as crf
import paddle.nn.functional as F
class JointModel(paddle.nn.Layer):
def __init__(self, vocab_size, embedding_size, hidden_size, num_intents, num_slots, num_layers=1, drop_p=0.1):
super(JointModel, self).__init__()
self.vocab_size = vocab_size
self.embedding_size = embedding_size
self.hidden_size = hidden_size
self.drop_p = drop_p
self.num_intents = num_intents
self.num_slots = num_slots
self.embedding = nn.Embedding(vocab_size, embedding_size)
self.dropout = nn.Dropout(p=drop_p)
self.layer_norm = nn.LayerNorm(2*hidden_size)
self.bilstm = nn.LSTM(input_size=embedding_size, hidden_size=hidden_size, direction="bidirectional", num_layers=num_layers, dropout=drop_p)
self.ner_classifier = nn.Linear(hidden_size*2, num_slots+2)
self.intent_classifier = nn.Linear(hidden_size*2, num_intents)
self.crf = crf.LinearChainCrf(num_slots, crf_lr=0.001, with_start_stop_tag=True)
self.crf_loss = crf.LinearChainCrfLoss(self.crf)
self.viterbi_decoder = crf.ViterbiDecoder(self.crf.transitions)
def forward(self, inputs, lens):
batch_size, seq_len = inputs.shape
inputs_embedding = self.embedding(inputs)
if self.drop_p:
inputs_embedding = self.dropout(inputs_embedding)
lstm_outputs, _ = self.bilstm(inputs_embedding)
lstm_outputs = self.layer_norm(lstm_outputs)
emissions = self.ner_classifier(lstm_outputs)
indices = paddle.stack([paddle.arange(batch_size), lens-1], axis=1)
last_step_hiddens = paddle.gather_nd(lstm_outputs, indices)
intent_logits = self.intent_classifier(last_step_hiddens)
return emissions, intent_logits
def get_slot_loss(self, features, lens, tags):
slot_loss = self.crf_loss(features, lens, tags)
slot_loss = paddle.mean(slot_loss)
return slot_loss
def get_intent_loss(self, intent_logits, intent_labels):
return F.cross_entropy(intent_logits, intent_labels)
| 2.203125
| 2
|
globus_search_cli/commands/index/role/delete.py
|
globus/globus-search-cli
| 1
|
12783360
|
<filename>globus_search_cli/commands/index/role/delete.py
import click
from globus_search_cli.config import get_search_client
from globus_search_cli.parsing import globus_cmd, index_argument
from globus_search_cli.printing import format_output
@globus_cmd("delete", help="Delete a role (requires admin or owner)")
@index_argument
@click.argument("ROLE_ID")
def delete_cmd(index_id, role_id):
search_client = get_search_client()
format_output(search_client.delete(f"/v1/index/{index_id}/role/{role_id}").data)
| 2.078125
| 2
|
experiment_automator/image_uploader.py
|
mpolatcan/ml-notifier
| 1
|
12783361
|
<filename>experiment_automator/image_uploader.py
from experiment_automator.exceptions import ImageUploaderException
from experiment_automator.constants import SlackConstants, OAuthConstants, FlickrConstants, OtherConstants
from experiment_automator.oauth_client import OAuthClient
from experiment_automator.utils import DebugLogCat
from xml.etree import ElementTree
class ImageUploader:
def __init__(self, debug, image_uploader_config):
self.debug = debug
if image_uploader_config is None:
raise ImageUploaderException("%s - Image uploader config is None. Please specify api key in config!" % self.__class_name())
self.image_uploader_config = image_uploader_config
self.oauth_config = image_uploader_config.get(OAuthConstants.KEY_OAUTH)
# If Flickr service defined get configurations from Constants
if not (self.oauth_config is None) and self.oauth_config.get(OAuthConstants.KEY_OAUTH_SERVICE_NAME) == OtherConstants.IMAGE_SERVICE_FLICKR:
self.oauth_config[OAuthConstants.KEY_OAUTH_REQUEST_TOKEN_URL] = FlickrConstants.FLICKR_OAUTH_REQUEST_TOKEN_URL
self.oauth_config[OAuthConstants.KEY_OAUTH_AUTHORIZATION_URL] = FlickrConstants.FLICKR_OAUTH_AUTHORIZATION_URL
self.oauth_config[OAuthConstants.KEY_OAUTH_ACCESS_TOKEN_URL] = FlickrConstants.FLICKR_OAUTH_ACCESS_TOKEN_URL
self.oauth_config[OAuthConstants.KEY_OAUTH_BASE_URL] = FlickrConstants.FLICKR_OAUTH_BASE_URL
self.image_uploader_config[SlackConstants.KEY_SLACK_IMAGE_SERVICE_UPLOAD_URL] = FlickrConstants.FLICKR_UPLOAD_URL
def __class_name(self):
return self.__class__.__name__
def get_session(self, callback, **params):
return OAuthClient(self.debug, self.image_uploader_config.get(OAuthConstants.KEY_OAUTH)).connect(callback, **params)
class FlickrImageUploader(ImageUploader):
def __init__(self, debug, image_uploader_config):
super().__init__(debug, image_uploader_config)
self.session = super().get_session(OAuthConstants.KEY_OAUTH_CALLBACK_OOB, perms="write")
def __class_name(self):
return self.__class__.__name__
def __get_image_static_url(self, photo_id, image_type):
data = {
"photo_id": photo_id,
"method": FlickrConstants.FLICKR_GET_IMAGE_SIZES_METHOD
}
response = self.session.post(self.oauth_config.get(OAuthConstants.KEY_OAUTH_BASE_URL), data=data).text
sizes = {}
for size in ElementTree.fromstring(response).find('sizes').findall('size'):
sizes[size.attrib["label"]] = size.attrib["source"]
return sizes[image_type]
def upload_image(self, image_path, image_type):
DebugLogCat.log(self.debug, self.__class_name(), "Uploading image \"%s\"" % image_path)
files = {
"photo": open(image_path, 'rb'),
}
photo_id = ElementTree.fromstring(self.session.post(self.image_uploader_config.get(SlackConstants.KEY_SLACK_IMAGE_SERVICE_UPLOAD_URL), files=files).text).find("photoid").text
if photo_id is None:
DebugLogCat.log(self.debug, self.__class_name(), "Image upload could not be completed!" % photo_id)
else:
DebugLogCat.log(self.debug, self.__class_name(), "Image upload completed. Photo id: \"%s\" !" % photo_id)
return self.__get_image_static_url(photo_id, image_type)
| 2.171875
| 2
|
Solver/threshold.py
|
steveknipmeyer/ModelRelief
| 0
|
12783362
|
<reponame>steveknipmeyer/ModelRelief
#!/usr/bin/env python
"""
.. module:: Threshold
:synopsis: Support for applying thresholds to image components.
.. moduleauthor:: <NAME> <<EMAIL>>
"""
import numpy as np
from services import Services
class Threshold:
"""
A class for Support for applying thresholds to image components.
"""
def __init__(self, services : Services) -> None:
"""
Initialize an instance of a Threshold.
Parameters
----------
services
Service provider (logging, timers, etc.)
"""
self.debug = True
self.services = services
def apply (self, original: np.ndarray, limit : float) -> np.ndarray:
"""
Applies a threshold to all elements in an ndarray.
All absolute values above the threshold are set to 0.
Parameters
----------
original
The array to apply the threshold against.
limit
The highest value above which an element will be filtered.
"""
# copy
threshold_array = np.array(original)
threshold_array[np.abs(threshold_array) > limit] = 0
return threshold_array
| 2.671875
| 3
|
honeysnap/__init__.py
|
honeynet/honeysnap
| 7
|
12783363
|
# $Id$
import main
| 1.117188
| 1
|
apps/product/admin.py
|
gurnitha/django-russian-ecommerce
| 1
|
12783364
|
<gh_stars>1-10
# apps/home/admin.py
# Django locals
from django.contrib import admin
# Django local
from apps.product.models import Category, Product, Images
class CategoryAdmin(admin.ModelAdmin):
list_display = ['title','parent', 'status']
list_filter = ['status']
class ProductImageInline(admin.TabularInline):
model = Images
extra = 5
class ProductAdmin(admin.ModelAdmin):
list_display = ['title','category', 'status','image_tag']
list_filter = ['category']
readonly_fields = ('image_tag',)
inlines = [ProductImageInline]
# Register your models here.
admin.site.register(Category, CategoryAdmin)
admin.site.register(Product, ProductAdmin)
admin.site.register(Images)
| 1.921875
| 2
|
deployutils/apps/django/settings.py
|
nomadigital/djaodjin-deployutils
| 5
|
12783365
|
<gh_stars>1-10
# Copyright (c) 2019, Djaodjin Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
Convenience module for access of deployutils app settings, which enforces
default settings when the main settings module does not contain
the appropriate settings.
In a production environment, the static resources (images, css, js) are served
directly by nginx from MULTITIER_RESOURCES_ROOT. Furthermore the CMS pages are
served by one process while the app is served by another process. This requires
to install the templates from the app repo into the CMS template directory
(MULTITIER_THEMES_DIR) after the TemplateNodes related to the assets
pipeline have been resolved.
"""
import os
from django.conf import settings
_SETTINGS = {
'ALLOWED_NO_SESSION': [],
'APP_NAME': getattr(settings, 'APP_NAME',
os.path.basename(settings.BASE_DIR)),
'BACKEND_SESSION_STORE': None,
'DEBUG': getattr(settings, 'DEBUG', None),
'DEPLOYED_WEBAPP_ROOT': '/var/www',
'DEPLOYED_SERVERS': None,
'DJAODJIN_SECRET_KEY': os.getenv('DJAODJIN_SECRET_KEY',
getattr(settings, 'DJAODJIN_SECRET_KEY', None)),
'DRY_RUN': getattr(settings, 'DEPLOYUTILS_DRY_RUN', False),
'INSTALLED_APPS': getattr(settings, 'DEPLOYUTILS_INSTALLED_APPS',
settings.INSTALLED_APPS),
'MOCKUP_SESSIONS': {},
'MULTITIER_RESOURCES_ROOT': getattr(settings, 'DEPLOYUTILS_RESOURCES_ROOT',
settings.BASE_DIR + '/htdocs/'),
'MULTITIER_ASSETS_DIR': os.path.join(settings.BASE_DIR, 'htdocs'),
'MULTITIER_THEMES_DIR': os.path.join(settings.BASE_DIR, 'themes'),
'RESOURCES_REMOTE_LOCATION': getattr(settings,
'DEPLOYUTILS_RESOURCES_REMOTE_LOCATION', None),
'SESSION_COOKIE_NAME': 'sessionid',
}
_SETTINGS.update(getattr(settings, 'DEPLOYUTILS', {}))
ALLOWED_NO_SESSION = _SETTINGS.get('ALLOWED_NO_SESSION')
APP_NAME = _SETTINGS.get('APP_NAME')
BACKEND_SESSION_STORE = _SETTINGS.get('BACKEND_SESSION_STORE')
DEBUG = _SETTINGS.get('DEBUG')
DEPLOYED_WEBAPP_ROOT = _SETTINGS.get('DEPLOYED_WEBAPP_ROOT')
DEPLOYED_SERVERS = _SETTINGS.get('DEPLOYED_SERVERS')
DJAODJIN_SECRET_KEY = _SETTINGS.get('DJAODJIN_SECRET_KEY')
DRY_RUN = _SETTINGS.get('DRY_RUN')
MOCKUP_SESSIONS = _SETTINGS.get('MOCKUP_SESSIONS')
MULTITIER_ASSETS_DIR = _SETTINGS.get('MULTITIER_ASSETS_DIR')
MULTITIER_THEMES_DIR = _SETTINGS.get('MULTITIER_THEMES_DIR')
MULTITIER_RESOURCES_ROOT = _SETTINGS.get('MULTITIER_RESOURCES_ROOT')
if not MULTITIER_RESOURCES_ROOT.endswith('/'):
MULTITIER_RESOURCES_ROOT = MULTITIER_RESOURCES_ROOT + '/'
RESOURCES_REMOTE_LOCATION = _SETTINGS.get('RESOURCES_REMOTE_LOCATION')
SESSION_COOKIE_NAME = _SETTINGS.get('SESSION_COOKIE_NAME')
INSTALLED_APPS = _SETTINGS.get('INSTALLED_APPS')
SESSION_SAVE_EVERY_REQUEST = getattr(
settings, 'SESSION_SAVE_EVERY_REQUEST', False)
| 1.203125
| 1
|
scripts/addons/animation_nodes/nodes/object/object_visibility_output.py
|
Tilapiatsu/blender-custom_conf
| 2
|
12783366
|
import bpy
from bpy.props import *
from ... base_types import AnimationNode, VectorizedSocket
attributes = [
("Hide Viewport", "hide", "hide_viewport", "useHideViewportList"),
("Hide Render", "hideRender", "hide_render", "useHideRenderList"),
("Hide Select", "hideSelect", "hide_select", "useHideSelectList"),
("Show Name", "showName", "show_name", "useShowNameList"),
("Show Axis", "showAxis", "show_axis", "useShowAxisList"),
("Show In Front", "showInFront", "show_in_front", "useShowInFrontList")
]
class ObjectVisibilityOutputNode(bpy.types.Node, AnimationNode):
bl_idname = "an_ObjectVisibilityOutputNode"
bl_label = "Object Visibility Output"
codeEffects = [VectorizedSocket.CodeEffect]
useObjectList: VectorizedSocket.newProperty()
for *_, useListName in attributes:
__annotations__[useListName] = VectorizedSocket.newProperty()
def create(self):
self.newInput(VectorizedSocket("Object", "useObjectList",
("Object", "object", dict(defaultDrawType = "PROPERTY_ONLY")),
("Objects", "objects"),
codeProperties = dict(allowListExtension = False)))
for name, identifier, _, useListName in attributes:
self.newInput(VectorizedSocket("Boolean", [useListName, "useObjectList"],
(name, identifier), (name, identifier)))
self.newOutput(VectorizedSocket("Object", "useObjectList",
("Object", "object"), ("Objects", "objects")))
for socket in self.inputs[1:]:
socket.useIsUsedProperty = True
socket.isUsed = False
socket.value = False
for socket in self.inputs[3:]:
socket.hide = True
def getExecutionCode(self, required):
yield "if object is not None:"
for name, identifier, attr, _ in attributes:
if self.inputs[name].isUsed:
yield " object.{} = {}".format(attr, identifier)
yield " pass"
def getBakeCode(self):
yield "if object is not None:"
for name, _, attr, _ in attributes:
if self.inputs[name].isUsed:
yield " object.keyframe_insert('{}')".format(attr)
yield " pass"
| 2.28125
| 2
|
fusion/fusion/settings.py
|
souluanf/django-essencial
| 0
|
12783367
|
<filename>fusion/fusion/settings.py
import os
import dj_database_url
try:
from .settings_local import *
except ImportError:
pass
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
ENVIRONMENT = os.environ.get('ENVIRONMENT', 'development')
INTERNAL_IPS = ['127.0.0.1']
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'core',
'django_adminlte',
'django_adminlte_theme',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'fusion.urls'
SECRET_KEY = '41p9#s0+r0(re=yp2(3)3qjhm!28wv5wp6t9pl#&&b@t_3l#i8'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': ['templates'],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'fusion.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
if ENVIRONMENT == 'development':
# DATABASES = localdb
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
elif ENVIRONMENT == 'test':
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
else:
DATABASES = {
'default': dj_database_url.config()
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'pt-br'
TIME_ZONE = 'America/Sao_Paulo'
USE_I18N = True
USE_L10N = True
USE_TZ = True
LOCALE_PATHS = (
os.path.join(BASE_DIR, 'locale'),
)
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
# EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
LOGOUT_REDIRECT = 'index'
EMAIL_HOST = 'smtp.yandex.com'
EMAIL_HOST_USER = '<EMAIL>'
EMAIL_HOST_PASSWORD = '<PASSWORD>'
EMAIL_PORT = 465
EMAIL_USE_SSL = True
EMAIL_USE_TLS = False
| 1.664063
| 2
|
users/views.py
|
Fhoughton/Babble
| 0
|
12783368
|
<reponame>Fhoughton/Babble
from django.shortcuts import render, redirect
#from django.contrib.auth.forms import UserCreationForm
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from .forms import UserRegisterForm, UserUpdateForm, ProfileUpdateForm
def register(request):
if request.method == 'POST':
form = UserRegisterForm(request.POST)
if form.is_valid():
form.save()
username = form.cleaned_data.get("username")
messages.success(request,f'Account successfully registered.')
return redirect("blog-home")
else:
form = UserRegisterForm()
return render(request, 'users/register.html', {"form": form})
@login_required
def profile(request):
if request.method == 'POST':
userupdateform = UserUpdateForm(request.POST,instance=request.user)
profileupdateform = ProfileUpdateForm(request.POST,request.FILES,instance=request.user.profile)
if userupdateform.is_valid and profileupdateform.is_valid:
userupdateform.save()
profileupdateform.save()
messages.success(request,f'Account successfully updated.')
return redirect("Profile")
else:
userupdateform = UserUpdateForm(instance=request.user)
profileupdateform = ProfileUpdateForm(instance=request.user.profile)
context = {
'userupdateform' : userupdateform,
'profileupdateform' : profileupdateform,
}
return render(request, 'users/profile.html', context)
| 2.171875
| 2
|
lifetracker/tracker/urls.py
|
KenN7/LifeTracker
| 0
|
12783369
|
from django.conf.urls import url, include
import django.contrib.auth.views
import tracker.views
urlpatterns = [
url(r'^$', tracker.views.tracker_page, name='tracker-page'),
url(r'^door-opener$', tracker.views.door_opener, name='door-opener'),
url(r'^login$', django.contrib.auth.views.login, {'template_name': 'login.html'}, name='login'),
url(r'^logout$', django.contrib.auth.views.logout, {'next_page': '/'}, name='logout'),
#url('^', include('django.contrib.auth.urls')),
]
| 1.71875
| 2
|
prune_model/export_saved_model.py
|
smsaladi/EASTdb
| 0
|
12783370
|
<filename>prune_model/export_saved_model.py
"""
Convert Keras model to tensorflow format for serving
"""
import argparse
import tensorflow.compat.v1 as tf
def convert_tfserving(model_fn, export_path):
"""Converts a tensorflow model into a format for tensorflow_serving
`model_fn` is the model filename (hdf5 probably)
`export_path` is the location to write to
"""
# The export path contains the name and the version of the model
tf.keras.backend.set_learning_phase(0) # Ignore dropout at inference
model = tf.keras.models.load_model(model_fn)
# Fetch the Keras session and save the model
# The signature definition is defined by the input and output tensors
# And stored with the default serving key
with tf.keras.backend.get_session() as sess:
tf.saved_model.simple_save(
sess,
export_path,
inputs={'input_seq_batch': model.input},
outputs={t.name: t for t in model.outputs})
return
def main():
parser = argparse.ArgumentParser()
parser.add_argument('model')
parser.add_argument('export_path')
args = parser.parse_args()
convert_tfserving(args.model, args.export_path)
return
if __name__ == '__main__':
main()
| 3.265625
| 3
|
app.py
|
Mastermindzh/Subdarr
| 6
|
12783371
|
<filename>app.py
"""Subdarr
Subdarr is a module which listens to post requests with movie/serie information
and downloads subtitles in the languages requested.
"""
import json
import os
import meinheld
import datetime
from datetime import timedelta
from logger.logger import log
from flask import Flask, request, send_file
from babelfish import Language
from subliminal import download_best_subtitles, save_subtitles, scan_video, scan_videos
from concurrent.futures import ThreadPoolExecutor
# Define APP
APP = Flask(__name__, template_folder="templates")
# Define nr of threads (defaults to 5)
executor = ThreadPoolExecutor()
# Define global variables
NO_DATA_RECEIVED = "No data received"
NO_PATH_PROVIDED = "No path provided"
NO_SUBTITLES_FOUND = "No subtitles found"
@APP.route('/', methods=['GET'])
def home():
"""Homepage
introductionary page (renders the readme.md)
"""
return '{"version":0.1}'
@APP.route('/connector', methods=['GET'])
def get_connector():
"""Returns the connector script"""
try:
return send_file(os.path.join("./connector", "download_subtitles.sh"), as_attachment=True)
except (TypeError, OSError) as err:
return error(err, 500)
@APP.route('/scan', methods=["POST"])
def scan():
"""scan for subtitles in a given path
json:
{
"languages": "eng, nld",
"path":"/path",
"age": 14
}
age = in days
"""
if request.json:
mydata = request.json
else:
log(NO_DATA_RECEIVED)
if not 'path' in mydata or not mydata['path']:
return error(NO_PATH_PROVIDED, 406)
if not 'languages' in mydata or not mydata['languages']:
mydata['languages'] = parse_languages("eng")
if not 'age' in mydata or not mydata['age']:
mydata['age'] = 14
executor.submit(scan_folder_for_subs, mydata['path'],mydata['age'], mydata['languages'])
return 'Scan has started in the background'
@APP.route("/download", methods=['POST'])
def download_subtitles():
"""Download subtitles"""
if request.json:
mydata = request.json
if not 'path' in mydata: # check if empty
log(NO_PATH_PROVIDED)
return error(NO_PATH_PROVIDED, 406)
# check if languages is empty, if so use english
if not 'languages' in mydata or not mydata['languages']:
mydata['languages'] = "eng"
log(json.dumps(mydata))
path = mydata['path']
videos = []
try:
videos.append(scan_video(path))
subtitles = download_best_subtitles(videos, parse_languages(mydata['languages']))
for video in videos:
save_subtitles(video, subtitles[video])
return json.dumps(mydata)
except Exception:
log(NO_SUBTITLES_FOUND)
return error(NO_SUBTITLES_FOUND, 404)
else:
log(NO_DATA_RECEIVED)
return error(NO_DATA_RECEIVED, 406)
def scan_folder_for_subs(path, age,languages):
dirname = os.path.basename(path)
log("Processing: " + dirname)
# scan for videos newer than {age} days and their existing subtitles in a folder
videos = scan_videos(path, age=timedelta(days=age))
log("Found the following videos:")
for v in videos:
log(" - " + v.name)
# download best subtitles and try to save it
log("Looking for subtitles..")
try:
subtitles = download_best_subtitles(videos, parse_languages(languages))
for v in videos:
if(len(subtitles[v]) >= 1):
log(" Saving subtitle(s) for: " + v.name)
save_subtitles(v, subtitles[v])
else:
log(" No subtitles found for: " + v.name)
except Exception:
log("ERROR: - Download failed.")
def parse_languages(csv_languages):
"""Parse languages string
Keyword arguments:
csv_languages -- Comma separated string of languages
"""
my_languages = []
for lang in csv_languages.split(","):
my_languages.append(Language(lang))
return set(my_languages)
def error(text, error_code):
"""Returns the error in json format
Keyword arguments:
text -- Human readable text for the error
error_code -- http status code
"""
return '{{"Error": "{}"}}'.format(text), error_code
# finally run the script if file is called directly
meinheld.listen(("0.0.0.0", 5500))
meinheld.run(APP)
| 2.796875
| 3
|
edalize/trellis.py
|
QuickLogic-Corp/edalize
| 2
|
12783372
|
<reponame>QuickLogic-Corp/edalize
# Copyright edalize contributors
# Licensed under the 2-Clause BSD License, see LICENSE for details.
# SPDX-License-Identifier: BSD-2-Clause
import os.path
from edalize.edatool import Edatool
from edalize.yosys import Yosys
from importlib import import_module
class Trellis(Edatool):
argtypes = ['vlogdefine', 'vlogparam']
@classmethod
def get_doc(cls, api_ver):
if api_ver == 0:
yosys_help = Yosys.get_doc(api_ver)
trellis_help = {
'lists' : [
{'name' : 'nextpnr_options',
'type' : 'String',
'desc' : 'Additional options for nextpnr'},
{'name' : 'yosys_synth_options',
'type' : 'String',
'desc' : 'Additional options for the synth_ecp5 command'},
]}
combined_members = []
combined_lists = trellis_help['lists']
yosys_members = yosys_help['members']
yosys_lists = yosys_help['lists']
combined_members.extend(m for m in yosys_members if m['name'] not in [i['name'] for i in combined_members])
combined_lists.extend(l for l in yosys_lists if l['name'] not in [i['name'] for i in combined_lists])
return {'description' : "Project Trellis enables a fully open-source flow for ECP5 FPGAs using Yosys for Verilog synthesis and nextpnr for place and route",
'members' : combined_members,
'lists' : combined_lists}
def configure_main(self):
# Write yosys script file
(src_files, incdirs) = self._get_fileset_files()
yosys_synth_options = self.tool_options.get('yosys_synth_options', [])
yosys_synth_options = ["-nomux"] + yosys_synth_options
yosys_edam = {
'files' : self.files,
'name' : self.name,
'toplevel' : self.toplevel,
'parameters' : self.parameters,
'tool_options' : {'yosys' : {
'arch' : 'ecp5',
'yosys_synth_options' : yosys_synth_options,
'yosys_as_subtool' : True,
}
}
}
yosys = getattr(import_module("edalize.yosys"), 'Yosys')(yosys_edam, self.work_root)
yosys.configure()
lpf_files = []
for f in src_files:
if f.file_type == 'LPF':
lpf_files.append(f.name)
elif f.file_type == 'user':
pass
if not lpf_files:
lpf_files = ['empty.lpf']
with open(os.path.join(self.work_root, lpf_files[0]), 'a'):
os.utime(os.path.join(self.work_root, lpf_files[0]), None)
elif len(lpf_files) > 1:
raise RuntimeError("trellis backend supports only one LPF file. Found {}".format(', '.join(lpf_files)))
# Write Makefile
nextpnr_options = self.tool_options.get('nextpnr_options', [])
template_vars = {
'name' : self.name,
'lpf_file' : lpf_files[0],
'nextpnr_options' : nextpnr_options,
}
self.render_template('trellis-makefile.j2',
'Makefile',
template_vars)
| 1.945313
| 2
|
rllib_integration/sensors/sensor.py
|
rsuwa/rllib-integration
| 22
|
12783373
|
#!/usr/bin/env python
# Copyright (c) 2021 Computer Vision Center (CVC) at the Universitat Autonoma de
# Barcelona (UAB).
#
# This work is licensed under the terms of the MIT license.
# For a copy, see <https://opensource.org/licenses/MIT>.
"""
Here are defined all the CARLA sensors
"""
import copy
import math
import numpy as np
import carla
# ==================================================================================================
# -- BaseSensor -----------------------------------------------------------------------------------
# ==================================================================================================
class BaseSensor(object):
def __init__(self, name, attributes, interface, parent):
self.name = name
self.attributes = attributes
self.interface = interface
self.parent = parent
self.interface.register(self.name, self)
def is_event_sensor(self):
return False
def parse(self):
raise NotImplementedError
def update_sensor(self, data, frame):
if not self.is_event_sensor():
self.interface._data_buffers.put((self.name, frame, self.parse(data)))
else:
self.interface._event_data_buffers.put((self.name, frame, self.parse(data)))
def callback(self, data):
self.update_sensor(data, data.frame)
def destroy(self):
raise NotImplementedError
class CarlaSensor(BaseSensor):
def __init__(self, name, attributes, interface, parent):
super().__init__(name, attributes, interface, parent)
world = self.parent.get_world()
type_ = self.attributes.pop("type", "")
transform = self.attributes.pop("transform", "0,0,0,0,0,0")
if isinstance(transform, str):
transform = [float(x) for x in transform.split(",")]
assert len(transform) == 6
blueprint = world.get_blueprint_library().find(type_)
blueprint.set_attribute("role_name", name)
for key, value in attributes.items():
blueprint.set_attribute(str(key), str(value))
transform = carla.Transform(
carla.Location(transform[0], transform[1], transform[2]),
carla.Rotation(transform[4], transform[5], transform[3])
)
self.sensor = world.spawn_actor(blueprint, transform, attach_to=self.parent)
self.sensor.listen(self.callback)
def destroy(self):
if self.sensor is not None:
self.sensor.destroy()
self.sensor = None
class PseudoSensor(BaseSensor):
def __init__(self, name, attributes, interface, parent):
super().__init__(name, attributes, interface, parent)
def callback(self, data, frame):
self.update_sensor(data, frame)
# ==================================================================================================
# -- Cameras -----------------------------------------------------------------------------------
# ==================================================================================================
class BaseCamera(CarlaSensor):
def __init__(self, name, attributes, interface, parent):
super().__init__(name, attributes, interface, parent)
def parse(self, sensor_data):
"""Parses the Image into an numpy array"""
# sensor_data: [fov, height, width, raw_data]
array = np.frombuffer(sensor_data.raw_data, dtype=np.dtype("uint8"))
array = np.reshape(array, (sensor_data.height, sensor_data.width, 4))
array = array[:, :, :3]
array = array[:, :, ::-1]
return array
class CameraRGB(BaseCamera):
def __init__(self, name, attributes, interface, parent):
super().__init__(name, attributes, interface, parent)
class CameraDepth(BaseCamera):
def __init__(self, name, attributes, interface, parent):
super().__init__(name, attributes, interface, parent)
class CameraSemanticSegmentation(BaseCamera):
def __init__(self, name, attributes, interface, parent):
super().__init__(name, attributes, interface, parent)
class CameraDVS(CarlaSensor):
def __init__(self, name, attributes, interface, parent):
super().__init__(name, attributes, interface, parent)
def is_event_sensor(self):
return True
def parse(self, sensor_data):
"""Parses the DVSEvents into an RGB image"""
# sensor_data: [x, y, t, polarity]
dvs_events = np.frombuffer(sensor_data.raw_data, dtype=np.dtype([
('x', np.uint16), ('y', np.uint16), ('t', np.int64), ('pol', np.bool)]))
dvs_img = np.zeros((sensor_data.height, sensor_data.width, 3), dtype=np.uint8)
dvs_img[dvs_events[:]['y'], dvs_events[:]['x'], dvs_events[:]['pol'] * 2] = 255 # Blue is positive, red is negative
return dvs_img
# ==================================================================================================
# -- LIDAR -----------------------------------------------------------------------------------
# ==================================================================================================
class Lidar(CarlaSensor):
def __init__(self, name, attributes, interface, parent):
super().__init__(name, attributes, interface, parent)
def parse(self, sensor_data):
"""Parses the LidarMeasurememt into an numpy array"""
# sensor_data: [x, y, z, intensity]
points = np.frombuffer(sensor_data.raw_data, dtype=np.dtype('f4'))
points = copy.deepcopy(points)
points = np.reshape(points, (int(points.shape[0] / 4), 4))
return points
class SemanticLidar(CarlaSensor):
def __init__(self, name, attributes, interface, parent):
super().__init__(name, attributes, interface, parent)
def parse(self, sensor_data):
"""Parses the SemanticLidarMeasurememt into an numpy array"""
# sensor_data: [x, y, z, cos(angle), actor index, semantic tag]
points = np.frombuffer(sensor_data.raw_data, dtype=np.dtype('f4'))
points = copy.deepcopy(points)
points = np.reshape(points, (int(points.shape[0] / 6), 6))
return points
# ==================================================================================================
# -- Others -----------------------------------------------------------------------------------
# ==================================================================================================
class Radar(CarlaSensor):
def __init__(self, name, attributes, interface, parent):
super().__init__(name, attributes, interface, parent)
def parse(self, sensor_data):
"""Parses the RadarMeasurement into an numpy array"""
# sensor_data: [depth, azimuth, altitute, velocity]
points = np.frombuffer(sensor_data.raw_data, dtype=np.dtype('f4'))
points = copy.deepcopy(points)
points = np.reshape(points, (int(points.shape[0] / 4), 4))
points = np.flip(points, 1)
return points
class Gnss(CarlaSensor):
def __init__(self, name, attributes, interface, parent):
super().__init__(name, attributes, interface, parent)
def parse(self, sensor_data):
"""Parses the GnssMeasurement into an numpy array"""
# sensor_data: [latitude, longitude, altitude]
return np.array([sensor_data.latitude, sensor_data.longitude, sensor_data.altitude], dtype=np.float64)
class Imu(CarlaSensor):
def __init__(self, name, attributes, interface, parent):
super().__init__(name, attributes, interface, parent)
def parse(self, sensor_data):
"""Parses the IMUMeasurement into an numpy array"""
# sensor_data: [accelerometer, gyroscope, compass]
return np.array([sensor_data.accelerometer.x, sensor_data.accelerometer.y, sensor_data.accelerometer.z,
sensor_data.gyroscope.x, sensor_data.gyroscope.y, sensor_data.gyroscope.z,
sensor_data.compass,
], dtype=np.float64)
class LaneInvasion(CarlaSensor):
def __init__(self, name, attributes, interface, parent):
super().__init__(name, attributes, interface, parent)
def is_event_sensor(self):
return True
def parse(self, sensor_data):
"""Parses the IMUMeasurement into a list"""
# sensor_data: [transform, lane marking]
return [sensor_data.transform, sensor_data.crossed_lane_markings]
class Collision(CarlaSensor):
def __init__(self, name, attributes, interface, parent):
self._last_event_frame = 0
super().__init__(name, attributes, interface, parent)
def callback(self, data):
# The collision sensor can have multiple callbacks per tick. Get only the first one
if self._last_event_frame != data.frame:
self._last_event_frame = data.frame
self.update_sensor(data, data.frame)
def is_event_sensor(self):
return True
def parse(self, sensor_data):
"""Parses the ObstacleDetectionEvent into a list"""
# sensor_data: [other actor, distance]
impulse = sensor_data.normal_impulse
impulse_value = math.sqrt(impulse.x ** 2 + impulse.y ** 2 + impulse.z ** 2)
return [sensor_data.other_actor, impulse_value]
class Obstacle(CarlaSensor):
def __init__(self, name, attributes, interface, parent):
super().__init__(name, attributes, interface, parent)
def is_event_sensor(self):
return True
def parse(self, sensor_data):
"""Parses the ObstacleDetectionEvent into a list"""
# sensor_data: [other actor, distance]
return [sensor_data.other_actor, sensor_data.distance]
| 2.4375
| 2
|
stock_portfolio/models/junction.py
|
katcosgrove/stock-portfolio
| 0
|
12783374
|
from sqlalchemy import (
Index,
Column,
Integer,
String,
ForeignKey,
)
# from sqlalchemy.orm import relationship
from .meta import Base
class Junction(Base):
__tablename__ = 'user_portfolios'
id = Column(Integer, primary_key=True)
stock_id = Column(String, ForeignKey('stocks.symbol'), nullable=False)
account_id = Column(String, ForeignKey('accounts.username'), nullable=False)
| 2.359375
| 2
|
dos/classification/breast_cancer_wisconsin.py
|
disooqi/COMP-6321-Project
| 1
|
12783375
|
import pandas as pd
from scipy.io import arff
from sklearn.preprocessing import OneHotEncoder, LabelEncoder, OrdinalEncoder, StandardScaler
from sklearn.impute import SimpleImputer
from sklearn.pipeline import Pipeline
from sklearn.compose import ColumnTransformer
from sklearn.neural_network import MLPClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import train_test_split, cross_val_score, KFold, GridSearchCV
dataset = pd.read_csv(r'..\..\data\breast-cancer-wisconsin\wdbc.data', header=None) # header=None, usecols=[3,6]
# print(dataset[1].value_counts())
dataset.pop(0)
y = LabelEncoder().fit_transform(dataset.pop(1).values)
si_step = ('si', SimpleImputer(strategy='constant', fill_value='MISSING'))
ohe_step = ('ohe', OneHotEncoder(sparse=False, handle_unknown='ignore'))
oe_step = ('le', OrdinalEncoder())
num_si_step = ('si', SimpleImputer(strategy='mean'))
sc_step = ('sc', StandardScaler())
cat_pipe = Pipeline([si_step, ohe_step])
num_pipe = Pipeline([num_si_step, sc_step])
bin_pipe = Pipeline([oe_step])
transformers = [
# ('cat', cat_pipe, ['DGN', 'PRE6', 'PRE14']),
('num', num_pipe, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27,
28, 29]),
# ('bin', bin_pipe, ['PRE7', 'PRE8', 'PRE9', 'PRE10', 'PRE11', 'PRE17', 'PRE19', 'PRE25', 'PRE30', 'PRE32']),
]
ct = ColumnTransformer(transformers=transformers)
# X_transformed = ct.fit_transform(dataset)
ml_pipe = Pipeline([
('X_transform', ct),
('mlp', MLPClassifier(solver='adam', alpha=1e-5, hidden_layer_sizes=(4, 3))),
])
kf = KFold(n_splits=5, shuffle=True)
# cv_score = cross_val_score(ml_pipe, dataset, y, cv=kf).mean()
param_grid = {
'X_transform__num__si__strategy': ['mean', 'median'],
'mlp__solver': ['sgd', 'adam', 'lbfgs'],
'mlp__alpha': [1e-1, 1e-3, 1e-5],
'mlp__hidden_layer_sizes': [(5, 2), (4, 3), (4, 4), (5, 5)],
'mlp__activation': ['identity', 'logistic', 'tanh', 'relu'],
}
knn_pipe = Pipeline([
('X_transform', ct),
('knn', KNeighborsClassifier(n_neighbors=8)),
])
ml_pipe.fit(dataset, y)
print(f'All data score: {ml_pipe.score(dataset, y)}')
knn_param_grid = {
'X_transform__num__si__strategy': ['mean', 'median'],
'knn__n_neighbors': range(1, 10),
}
gs = GridSearchCV(ml_pipe, param_grid, cv=kf)
gs.fit(dataset, y)
print(gs.best_params_)
print(gs.best_score_)
print(pd.DataFrame(gs.cv_results_))
| 2.421875
| 2
|
setup.py
|
jonhadfield/fval
| 0
|
12783376
|
#!/usr/bin/env python
import os
import re
import sys
from setuptools import (setup, find_packages)
if sys.argv[-1] == 'publish':
os.system('python setup.py sdist upload -r pypi')
sys.exit()
install_requires = ['colorama',
'colorlog',
'PyYAML>=3.11',
'file-magic']
try:
import concurrent.futures
except ImportError:
install_requires.append('futures')
if sys.version_info < (2, 7):
exit('Python version 2.7 or above is required.')
test_requirements = ['pytest>=3.0.3', 'pytest-cov>=2.4.0']
with open('fval/__init__.py', 'r') as fd:
version = re.search(r'^__version__\s*=\s*[\'"]([^\'"]*)[\'"]',
fd.read(), re.MULTILINE).group(1)
if not version:
raise RuntimeError('Cannot find version information')
readme = open('README.rst').read()
long_description = readme
setup(
name='fval',
version=version,
description='A file validator.',
long_description=long_description,
author='<NAME>',
author_email='<EMAIL>',
url='http://github.com/jonhadfield/fval',
packages=find_packages(),
data_files=[
('{0}/.fval'.format(os.path.expanduser('~')),
['samples/fval.cfg'])
],
entry_points={
'console_scripts': [
'fval = fval:main'
],
},
include_package_data=True,
install_requires=install_requires,
license='MIT',
scripts=['bin/fval'],
zip_safe=False,
classifiers=(
'Development Status :: 4 - Beta',
'Intended Audience :: System Administrators',
'Intended Audience :: Developers',
'Intended Audience :: Information Technology',
'Natural Language :: English',
'License :: OSI Approved :: MIT License',
'Operating System :: POSIX :: BSD :: Linux',
'Operating System :: POSIX :: BSD :: FreeBSD',
'Operating System :: POSIX :: BSD :: OpenBSD',
'Operating System :: Microsoft :: Windows',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy'
),
tests_require=install_requires + test_requirements,
)
| 1.546875
| 2
|
jd/api/rest/OrderCheckRefuseOrderQueryRequest.py
|
fengjinqi/linjuanbang
| 5
|
12783377
|
<reponame>fengjinqi/linjuanbang
from jd.api.base import RestApi
class OrderCheckRefuseOrderQueryRequest(RestApi):
def __init__(self,domain='gw.api.360buy.com',port=80):
RestApi.__init__(self,domain, port)
self.date = None
self.page = None
def getapiname(self):
return 'biz.order.checkRefuseOrder.query'
| 2.109375
| 2
|
Math/P07 - reverseInteger.py
|
HarshOza36/LeetCode_Problems
| 0
|
12783378
|
class Solution(object):
def reverse(self, x):
"""
:type x: int
:rtype: int
"""
num = x
x = str(abs(x))
out = int(x[::-1])
if(out > 2**31 -1 or out < -2**31):
return 0
elif(num < 0):
return -1*out
else:
return out
| 3.515625
| 4
|
game.py
|
projeto-de-algoritmos/Lista2_GustavoM_JoaoM
| 0
|
12783379
|
import pygame
from screens import AnswerScreen, FinishScreen, InfoScreen, MenuScreen, QuestionScreen, TestSceen
from time import sleep
class Game:
# Game constants
WIDTH = 1024
HEIGHT = 768
GAME_NAME = '<NAME>'
INTRO_TEXT = ''
# Game states
running = True
__screens = {}
current_screen = MenuScreen.ID
CORRECT_ANSWER = 1
WRONG_ANSWER = 2
TIMES_UP = 3
state_question = CORRECT_ANSWER
graphs = []
standard_graphs = []
current_graph = None
current_question = 0
max_questions = 0
correct_ans = 0
wrong_ans = 0
def __init__(self):
self.screen = pygame.display.set_mode((self.WIDTH, self.HEIGHT))
pygame.display.set_caption(self.GAME_NAME)
icon = pygame.image.load('icon.png')
pygame.display.set_icon(icon)
self.__screens[MenuScreen.ID] = MenuScreen(self)
self.__screens[InfoScreen.ID] = InfoScreen(self)
self.__screens[QuestionScreen.ID] = QuestionScreen(self)
self.__screens[AnswerScreen.ID] = AnswerScreen(self)
self.__screens[FinishScreen.ID] = FinishScreen(self)
#self.__screens[TestSceen.ID] = TestSceen(self)
self.clock = pygame.time.Clock()
def run(self, graphs=[]):
pygame.init()
self.standard_graphs = graphs
self.max_questions = len(graphs)
self.current_graph = graphs[0]
while self.running:
self.__screens[self.current_screen].run()
def exit(self):
self.running = False
def start_game(self):
self.current_question = 0
self.wrong_ans = 0
self.correct_ans = 0
self.graphs = self.standard_graphs
self.max_questions = len(self.graphs)
self.change_screen(QuestionScreen)
def change_screen(self, screen):
self.current_screen = screen.ID
def no_answer_question(self):
#print('path', self.current_graph.path)
self.current_graph.path
self.state_question = self.TIMES_UP
self.change_screen(AnswerScreen)
def answer_question(self, user_answer):
#print('path', self.current_graph.path)
#print(user_answer)
if self.current_graph.path == user_answer:
self.correct_ans+=1
self.state_question = self.CORRECT_ANSWER
else:
self.wrong_ans+=1
self.state_question = self.WRONG_ANSWER
self.change_screen(AnswerScreen)
def next_question(self):
self.current_question = self.current_question+1
if self.current_question>=self.max_questions:
self.current_question = 0
self.change_screen(FinishScreen)
else:
self.change_screen(QuestionScreen)
| 3.171875
| 3
|
main.py
|
winogradoff/VK-Chat-Guard
| 3
|
12783380
|
<filename>main.py
from os import environ, remove
from datetime import datetime
from hashlib import md5
from time import sleep
from requests import get, post
from vk import API
from apscheduler.schedulers.blocking import BlockingScheduler
class ChatGuard:
def __init__(self, token, chat_id, title, cache_url, cache_file, cache_temp, sleep_time,
scheduler_interval):
"""
Конструктор ChatGuard
:param token: токен авторизации в VK
:param chat_id: ID беседы
:param title: ожидаемое название беседы
:param cache_url: URL изображения беседы
:param cache_file: кеш файл-изображение беседы
:param cache_temp: файл для временного хранения
:param sleep_time: пауза перед chat_job (в секундах)
:param scheduler_interval: интервал chat_job (в секундах)
"""
self.token = token
self.chat_id = chat_id
self.chat_title = title
self.cache_url = cache_url
self.cache_file = cache_file
self.cache_temp = cache_temp
self.sleep_time = sleep_time
self.scheduler_interval = scheduler_interval
self.buffer_size = 8192
print('Authorization... ', end='')
self.api = API(access_token=self.token)
print('READY.')
def md5_file(self, path):
"""
Подсчёт md5 хеша файла
:param path: путь к файлу
:return: md5 hash
"""
print('Calculation hash of file {:s}... '.format(path), end='')
with open(path, 'rb') as file:
md5hash = md5()
buffer = file.read(self.buffer_size)
while len(buffer) > 0:
md5hash.update(buffer)
buffer = file.read(self.buffer_size)
print('READY.')
return md5hash.hexdigest()
def update_photo(self):
"""
Обновление фото чата
"""
print('Updating photo... ', end='')
response = self.api.messages.setChatPhoto(
file=post(
url=self.api.photos.getChatUploadServer(chat_id=self.chat_id)['upload_url'],
files={'file': open('images/logo.png', 'rb')}
).json()['response']
)
print('READY.')
if 'photo_200' in response['chat']:
print('Saving cache URL... ', end='')
self.save_cache_url(response['chat']['photo_200'])
print('READY.')
def get_cache_url(self):
"""
Получение кешированной ссылки
:return: ссылка из файла кеша
"""
print('Getting the cache url... ', end='')
with open(self.cache_url, 'r') as file:
content = file.readline()
print('READY.')
return content
def save_cache_url(self, url):
"""
Сохраниение ссылки в кеш
:param url: URL изображения
"""
print('Updating the cache url... ', end='')
with open(self.cache_url, 'w') as file:
file.write(url)
print('READY.')
def save_temp(self, content):
"""
Сохранение временного файла
:param content: содержимое файла
"""
print('Saving the temp file... ', end='')
with open(self.cache_temp, 'wb') as file:
file.write(content)
print('READY.')
def remove_temp(self):
"""
Удаление временного файла
"""
print('Removing the temp file... ', end='')
remove(self.cache_temp)
print('READY.')
def photo_changed(self, chat_response):
"""
Проверка изменения фото беседы
:param chat_response: ответ VK API messages.getChat
:return: результат проверки
"""
result = False
if 'photo_200' not in chat_response:
result = True
print('The chat photo is empty.')
else:
photo_200_url = chat_response['photo_200']
if self.get_cache_url() != photo_200_url:
# Если url отличается
print('The chat photo URL has been updated.')
print('Checking the md5 hash of file...')
response = get(photo_200_url)
self.save_temp(response.content)
if self.md5_file(self.cache_file) != self.md5_file(self.cache_temp):
result = True
print('md5_file(CACHE_FILE) != md5_file(CACHE_TEMP)')
else:
print('Files are the same.')
# Если md5 одинаковые, то обновить cache-url
print('Updating chat url... ', end='')
self.save_cache_url(photo_200_url)
self.remove_temp()
return result
def title_changed(self, chat_response):
"""
Проверка изменения названия беседы
:param chat_response: ответ VK API messages.getChat
:return: результат проверки
"""
return 'title' not in chat_response or chat_response['title'] != self.chat_title
def update_title(self):
"""
Обновление названия беседы
"""
print('Updating the chat title... ', end='')
self.api.messages.editChat(chat_id=self.chat_id, title=self.chat_title)
print('READY.')
def chat_job(self):
"""
Ежеминутное задание проверки названия и фото беседы
"""
print()
print('=== Begin ===')
# Метка времени
print(str(datetime.now()))
print('Waiting for {:d} seconds...'.format(self.sleep_time))
# Пауза во избежании блокироки
sleep(self.sleep_time)
print('Getting information about the chat... ', end='')
chat_response = self.api.messages.getChat(chat_id=self.chat_id)
print('READY.')
if self.photo_changed(chat_response):
print('The chat photo has been changed!')
self.update_photo()
else:
print('The chat photo is OK.')
if self.title_changed(chat_response):
print('The chat title has been changed!')
self.update_title()
else:
print('The title is OK.')
print('=== End ===')
print()
def run(self):
scheduler = BlockingScheduler()
scheduler.add_job(self.chat_job, 'interval', seconds=self.scheduler_interval)
scheduler.start()
if __name__ == '__main__':
ChatGuard(
token=environ.get("VK_AUTH_TOKEN", ""),
chat_id=int(environ.get("VK_CHAT_ID", 0)),
title=environ.get("VK_CHAT_TITLE", ""),
sleep_time=int(environ.get("VK_SLEEP_SECONDS", 0)),
scheduler_interval=int(environ.get("VK_SCHEDULER_INTERVAL_SECONDS", 0)),
cache_url='images/cache/cache-url',
cache_file='images/cache/cache-file',
cache_temp='images/cache/cache-temp'
).run()
| 2.5
| 2
|
tools/notebook/extensions/wstl/magics/_location_test.py
|
jianliyyh/healthcare-data-harmonization
| 1
|
12783381
|
# Copyright 2020 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Tests for wstl.magics._location."""
import json
from os import path
from absl.testing import absltest
from IPython.testing.globalipapp import get_ipython
from unittest import mock
from google.cloud import storage
from wstl.magics import _constants
from wstl.magics import _location
_ip = get_ipython()
class LocationTest(absltest.TestCase):
def test_parse_location_json_prefix_object_success(self):
shell = mock.MagicMock()
input_wstl_arg = """json://{"hello":"world"}"""
locations = _location.parse_location(shell, input_wstl_arg, file_ext=None)
self.assertLen(locations, 1)
self.assertTrue(locations[0].HasField("inline_json"))
self.assertEqual(locations[0].inline_json, "{\"hello\":\"world\"}")
def test_parse_location_json_prefix_list_success(self):
shell = mock.MagicMock()
input_wstl_arg = """json://[{"first": "world"},{"second": "world"}]"""
locations = _location.parse_location(shell, input_wstl_arg, file_ext=None)
self.assertLen(locations, 1)
self.assertTrue(locations[0].HasField("inline_json"))
self.assertEqual(locations[0].inline_json,
"""[{"first": "world"},{"second": "world"}]""")
@mock.patch.object(storage, "Client", autospec=True)
@mock.patch.object(storage, "Bucket", autospec=True)
def test_parse_location_gs_prefix_success(self, mock_bucket, mock_client):
class Item(object):
def __init__(self, bucket_name, name):
self.bucket = bucket_name
self.name = name
class FakeBucket(object):
def __init__(self, bucket_name):
self.name = bucket_name
bucket = FakeBucket("dummy_bucket")
items = [
Item(bucket, "file1.wstl"),
Item(bucket, "lib_folder/file2.wstl"),
Item(bucket, "lib_folder/file3.txt"),
Item(bucket, "input.json")
]
mock_bucket.list_blobs.return_value = iter(items)
mock_client.return_value.get_bucket.return_value = mock_bucket
shell = mock.MagicMock()
input_wstl_arg = "gs://dummy_bucket/input.json"
locations = _location.parse_location(
shell, input_wstl_arg, file_ext=_constants.JSON_FILE_EXT)
self.assertLen(locations, 1)
self.assertTrue(locations[0].HasField("gcs_location"))
self.assertEqual(locations[0].gcs_location, input_wstl_arg)
@mock.patch.object(storage, "Client", autospec=True)
@mock.patch.object(storage, "Bucket", autospec=True)
def test_parse_location_gs_prefix_wildcard_success(self, mock_bucket,
mock_client):
class Item(object):
def __init__(self, bucket, name):
self.bucket = bucket
self.name = name
class FakeBucket(object):
def __init__(self, bucket_name):
self.name = bucket_name
bucket = FakeBucket("dummy_bucket")
items = [
Item(bucket, "file1.txt"),
Item(bucket, "lib_folder/file2.wstl"),
Item(bucket, "lib_folder/file3.wstl"),
Item(bucket, "lib_folder/file4.json"),
Item(bucket, "input.json")
]
mock_bucket.list_blobs.return_value = iter(items)
mock_client.return_value.get_bucket.return_value = mock_bucket
shell = mock.MagicMock()
input_wstl_arg = "gs://dummy_bucket/lib_folder/*"
locations = _location.parse_location(
shell,
input_wstl_arg,
file_ext=_constants.WSTL_FILE_EXT,
load_contents=False)
self.assertLen(locations, 2)
self.assertTrue(locations[0].HasField("gcs_location"))
self.assertEqual(locations[0].gcs_location,
"gs://dummy_bucket/lib_folder/file2.wstl")
self.assertTrue(locations[1].HasField("gcs_location"))
self.assertEqual(locations[1].gcs_location,
"gs://dummy_bucket/lib_folder/file3.wstl")
@mock.patch.object(storage, "Client", autospec=True)
@mock.patch.object(storage, "Bucket", autospec=True)
def test_parse_location_gs_prefix_wildcard_unsupported_ext(
self, mock_bucket, mock_client):
class Item(object):
def __init__(self, bucket, name):
self.bucket = bucket
self.name = name
class FakeBucket(object):
def __init__(self, bucket_name):
self.name = bucket_name
bucket = FakeBucket("dummy_bucket")
items = [
Item(bucket, "file1.txt"),
Item(bucket, "lib_folder/file2.wstl"),
Item(bucket, "lib_folder/file3.wstl"),
Item(bucket, "lib_folder/file4.json"),
Item(bucket, "input.json")
]
mock_bucket.list_blobs.return_value = iter(items)
mock_client.return_value.get_bucket.return_value = mock_bucket
shell = mock.MagicMock()
input_wstl_arg = "gs://dummy_bucket/*.txt"
locations = _location.parse_location(
shell,
input_wstl_arg,
file_ext=_constants.WSTL_FILE_EXT,
load_contents=False)
self.assertEmpty(locations)
def test_parse_location_file_prefix_file_exists_success(self):
shell = mock.MagicMock()
content = """{"hello": "world"}"""
tmp_file = self.create_tempfile(
file_path="dummy.json", content=content, mode="w")
input_wstl_arg = "file://{}".format(tmp_file.full_path)
locations = _location.parse_location(
shell, input_wstl_arg, file_ext=_constants.JSON_FILE_EXT)
self.assertTrue(locations[0].HasField("inline_json"))
self.assertEqual(locations[0].inline_json, content)
def test_parse_location_file_prefix_wstl_suffix_success(self):
shell = mock.MagicMock()
content = """Result: $ToUpper("a")"""
tmp_file = self.create_tempfile(
file_path="dummy.wstl", content=content, mode="w")
input_wstl_arg = "file://{}".format(tmp_file.full_path)
locations = _location.parse_location(
shell,
input_wstl_arg,
file_ext=_constants.WSTL_FILE_EXT,
load_contents=False)
self.assertTrue(locations[0].HasField("local_path"))
self.assertEqual(locations[0].local_path, tmp_file.full_path)
def test_parse_location_file_prefix_wstl_wildcard_success(self):
shell = mock.MagicMock()
content = """Result: $ToUpper("a")"""
tmp_file = self.create_tempfile(
file_path="dummy.wstl", content=content, mode="w")
input_wstl_arg = "file://{}/*".format(path.dirname(tmp_file.full_path))
locations = _location.parse_location(
shell,
input_wstl_arg,
file_ext=_constants.WSTL_FILE_EXT,
load_contents=False)
self.assertLen(locations, 1)
self.assertTrue(locations[0].HasField("local_path"))
self.assertEqual(locations[0].local_path, tmp_file.full_path)
def test_parse_location_file_prefix_wildcard_success(self):
shell = mock.MagicMock()
content = """{"hello": "world"}"""
tmp_file = self.create_tempfile(
file_path="dummy.json", content=content, mode="w")
input_wstl_arg = "file://{}/*".format(path.dirname(tmp_file.full_path))
locations = _location.parse_location(
shell, input_wstl_arg, file_ext=_constants.JSON_FILE_EXT)
self.assertLen(locations, 1)
self.assertTrue(locations[0].HasField("inline_json"))
self.assertEqual(locations[0].inline_json, content)
def test_parse_location_file_suffix_ndjson_success(self):
shell = mock.MagicMock()
content = """{"first": "item"}\n{"second": "item"}"""
tmp_file = self.create_tempfile(
file_path="dummy.ndjson", content=content, mode="w")
input_wstl_arg = "file://{}".format(tmp_file.full_path)
locations = _location.parse_location(
shell, input_wstl_arg, file_ext=_constants.JSON_FILE_EXT)
self.assertLen(locations, 2)
self.assertTrue(locations[0].HasField("inline_json"))
self.assertEqual(locations[0].inline_json, "{\"first\": \"item\"}")
self.assertTrue(locations[1].HasField("inline_json"))
self.assertEqual(locations[1].inline_json, "{\"second\": \"item\"}")
def test_parse_location_file_prefix_textproto_suffix_success(self):
shell = mock.MagicMock()
content = """dummy_field: true"""
tmp_file = self.create_tempfile(
file_path="dummy.textproto", content=content, mode="w")
input_wstl_arg = "file://{}".format(tmp_file.full_path)
locations = _location.parse_location(
shell,
input_wstl_arg,
file_ext=_constants.TEXTPROTO_FILE_EXT,
load_contents=False)
self.assertTrue(locations[0].HasField("local_path"))
self.assertEqual(locations[0].local_path, tmp_file.full_path)
def test_parse_location_file_prefix_textproto_suffix_load_content_success(
self):
shell = mock.MagicMock()
content = """dummy_field: true"""
tmp_file = self.create_tempfile(
file_path="dummy.textproto", content=content, mode="w")
input_wstl_arg = "file://{}".format(tmp_file.full_path)
locations = _location.parse_location(
shell,
input_wstl_arg,
file_ext=_constants.TEXTPROTO_FILE_EXT,
load_contents=True)
self.assertTrue(locations[0].HasField("inline_json"))
self.assertEqual(locations[0].inline_json, "dummy_field: true")
def test_parse_location_file_prefix_no_load_content_success(self):
shell = mock.MagicMock()
content = """{"hello": "world"}"""
tmp_file = self.create_tempfile(
file_path="dummy.json", content=content, mode="w")
input_wstl_arg = "file://{}/*".format(path.dirname(tmp_file.full_path))
locations = _location.parse_location(
shell,
input_wstl_arg,
file_ext=_constants.JSON_FILE_EXT,
load_contents=False)
self.assertLen(locations, 1)
self.assertTrue(locations[0].HasField("local_path"))
self.assertEqual(locations[0].local_path, tmp_file.full_path)
def test_parse_location_file_prefix_invalid_path(self):
shell = mock.MagicMock()
content = """{"hello": "world"}"""
tmp_file = self.create_tempfile(content=content, mode="w")
input_wstl_arg = "file://invalid-{}".format(tmp_file.full_path)
locations = _location.parse_location(
shell, input_wstl_arg, file_ext=_constants.JSON_FILE_EXT)
self.assertEmpty(locations)
def test_parse_location_file_prefix_missing_extension(self):
shell = mock.MagicMock()
input_wstl_arg = "file://placeholder.json"
with self.assertRaises(ValueError):
_location.parse_location(shell, input_wstl_arg, file_ext=None)
def test_parse_location_python_prefix_string_success(self):
str_content = """{"hello": "world"}"""
_ip.push("str_content")
input_wstl_arg = "py://str_content"
locations = _location.parse_location(_ip, input_wstl_arg, file_ext=None)
self.assertLen(locations, 1)
self.assertTrue(locations[0].HasField("inline_json"))
self.assertEqual(locations[0].inline_json, str_content)
def test_parse_location_python_prefix_dict_success(self):
dict_content = {"hello": "world"}
_ip.push("dict_content")
input_wstl_arg = "py://dict_content"
locations = _location.parse_location(_ip, input_wstl_arg, file_ext=None)
self.assertLen(locations, 1)
self.assertTrue(locations[0].HasField("inline_json"))
self.assertEqual(locations[0].inline_json, json.dumps(dict_content))
def test_parse_location_python_prefix_list_success(self):
list_content = [{"first": "item"}, {"second": "item"}]
_ip.push("list_content")
input_wstl_arg = "py://list_content"
locations = _location.parse_location(_ip, input_wstl_arg, file_ext=None)
self.assertLen(locations, 1)
self.assertTrue(locations[0].HasField("inline_json"))
self.assertEqual(locations[0].inline_json,
json.dumps(list_content, sort_keys=True))
def test_parse_location_unknown_prefix_failure(self):
shell = mock.MagicMock()
input_wstl_arg = "invalid://blah"
with self.assertRaises(ValueError):
_location.parse_location(shell, input_wstl_arg, file_ext=None)
if __name__ == "__main__":
absltest.main()
| 2.25
| 2
|
async_scrapy_api/__init__.py
|
QYLGitHub/async_scrapyd_api
| 0
|
12783382
|
<reponame>QYLGitHub/async_scrapyd_api<filename>async_scrapy_api/__init__.py
""""
scrapyd api 的异步实现
"""
from .client import ScrapyApi as _ScrapyApi
class AsyncScrapyApi(_ScrapyApi):
pass
version = __version__ = '0.0.1'
__all_ = ["AsyncScrapyApi"]
| 1.289063
| 1
|
password/strongPasswd.py
|
Gao-zl/tools
| 0
|
12783383
|
<reponame>Gao-zl/tools<gh_stars>0
# -*- coding:utf-8 -*-
'''
Version: V1.0
Time: 2020.08.21
Author: Gaozhl
'''
import string
import random
char_set = {'small': 'abcdefghijklmnopqrstuvwxyz',
'nums': '0123456789',
'big': 'ABCDEFGHIJKLMNOPQRSTUVWXYZ',
'special': '^!\$%&/()=?{[]}+~-.:,;<>|'
}
def GenPassword(length=21, chars=string.ascii_letters + string.digits):
"""
Function to generate a password
强密码生成器
:param length: 选定长度
:param chars: 默认值即可
:return: 生成的复杂密码
"""
passwd = []
# 首字母先确定为大写字母,预防特殊要求
i = random.randint(0, 25)
passwd.append(char_set['big'][i])
while len(passwd) < length:
# 得到一个随机的key
key = random.choice(list(char_set.keys()))
# 获取这个key中的长度,选取其中的一个字母
choice = random.randint(0, len(char_set[key]) - 1)
# 前后字符是否相同进行判断,防止简单密码
if check_prev_char(passwd, char_set[key][choice]):
continue
else:
passwd.append(char_set[key][choice])
return ''.join(passwd)
def check_prev_char(passwd, current_char_set):
"""
检查前后两个字符是否相同,保证密码不出现简单密码
:param passwd: 输入的密码
:param current_char_set: 当前选取的字符种类
:return: 返回True或者False
"""
index = len(passwd)
if index == 0:
return False
else:
prev_char = passwd[index - 1]
if prev_char in current_char_set:
return True
else:
return False
print(GenPassword())
| 2.9375
| 3
|
test.py
|
TopShares/DuoWan
| 0
|
12783384
|
#encoding:utf-8
import requests
ID = '137442'
url = 'http://tu.duowan.cn/gallery/%s.html' % ID
headers = {
'User-Agent': 'Mozilla/5.0 (Linux; U; Android 8.1.0; zh-cn; OE106 Build/OPM1.171019.026) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/57.0.2987.132 MQQBrowser/9.2 Mobile Safari/537.36',
'Referer': 'http://tu.duowan.com/gallery/%s.html' % ID,
}
import re
r = requests.get(url,headers=headers)
if r.status_code == 200: # ok
s = r.content
html = s.decode()
print(html)
# strs = str(s)
# print(strs.replace(r"\\\\",r"\\"))
# html = r.content.decode('utf-8')
# print(html)
a = re.findall('imgJson = ([\s\S]*?);',html)
# print(a)
# exit()
# print(a)
# print(type(a))
# print(a[0])
import json
jsonp = json.loads(a[0])
folder = jsonp['gallery_title']
picInfo = jsonp['picInfo']
print(len(picInfo))
print(folder)
for i in picInfo:
add_intro = i['add_intro']
url = i['url']
print(add_intro,url)
| 2.84375
| 3
|
examples/simple.py
|
ldn-softdev/pyeapi
| 126
|
12783385
|
#!/usr/bin/env python
from __future__ import print_function
import pyeapi
connection = pyeapi.connect(host='192.168.1.16')
output = connection.execute(['enable', 'show version'])
print(('My system MAC address is', output['result'][1]['systemMacAddress']))
| 2.3125
| 2
|
bpy_lambda/2.78/scripts/addons_contrib/io_scene_map/export_map.py
|
resultant-gamedev/bpy_lambda
| 0
|
12783386
|
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
#http://www.pasteall.org/47943/python
# <pep8-80 compliant>
import bpy
import os
import mathutils
from mathutils import Vector
from contextlib import redirect_stdout
import io
stdout = io.StringIO()
# TODO, make options
PREF_SCALE = 1
PREF_FACE_THICK = 0.1
PREF_GRID_SNAP = False
# Quake 1/2?
# Quake 3+?
PREF_DEF_TEX_OPTS = '0 0 0 1 1 0 0 0' # not user settable yet
PREF_NULL_TEX = 'NULL' # not user settable yet
PREF_INVIS_TEX = 'common/caulk'
PREF_DOOM3_FORMAT = True
def face_uv_image_get(me, face):
uv_faces = me.uv_textures.active
if uv_faces:
return uv_faces.data[face.index].image
else:
return None
def face_uv_coords_get(me, face):
tf_uv_faces = me.tessface_uv_textures.active
if tf_uv_faces:
return tf_uv_faces.data[face.index].uv_raw[:]
else:
return None
def face_material_get(me, face):
idx = face.material_index
return me.materials[idx] if idx < len(me.materials) else None
def poly_to_doom(me, p, radius):
"""
Convert a face into Doom3 representation (infinite plane defined by its normal
and distance from origin along that normal).
"""
# Compute the distance to the mesh from the origin to the plane.
# Line from origin in the direction of the face normal.
origin = Vector((0, 0, 0))
target = Vector(p.normal) * radius
# Find the target point.
intersect = mathutils.geometry.intersect_line_plane(origin, target, Vector(p.center), Vector(p.normal))
# We have to handle cases where intersection with face happens on the "negative" part of the vector!
length = intersect.length
nor = p.normal.copy()
if (nor.dot(intersect.normalized()) > 0):
length *= -1
nor.resize_4d()
nor.w = length
return nor
def doom_are_same_planes(p1, p2):
"""
To avoid writing two planes that are nearly the same!
"""
# XXX Is sign of the normal/length important in Doom for plane definition??? For now, assume that no!
if p1.w < 0:
p1 = p1 * -1.0
if p2.w < 0:
p2 = p2 * -1.0
threshold = 0.0001
if abs(p1.w - p2.w) > threshold:
return False
# Distances are the same, check orientations!
if p1.xyz.normalized().dot(p2.xyz.normalized()) < (1 - threshold):
return False
# Same plane!
return True
def doom_check_plane(done_planes, plane):
"""
Check if plane as already been handled, or is similar enough to an already handled one.
Return True if it has already been handled somehow.
done_planes is expected to be a dict {written_plane: {written_plane, similar_plane_1, similar_plane_2, ...}, ...}.
"""
p_key = tuple(plane)
if p_key in done_planes:
return True
for p, dp in done_planes.items():
if p_key in dp:
return True
elif doom_are_same_planes(Vector(p), plane):
done_planes[p].add(p_key)
return True
done_planes[p_key] = {p_key}
return False
def ob_to_radius(ob):
radius = max(Vector(pt).length for pt in ob.bound_box)
# Make the ray casts, go just outside the bounding sphere.
return radius * 1.1
def is_cube_facegroup(faces):
"""
Returns a bool, true if the faces make up a cube
"""
# cube must have 6 faces
if len(faces) != 6:
# print('1')
return False
# Check for quads and that there are 6 unique verts
verts = {}
for f in faces:
f_v = f.vertices[:]
if len(f_v) != 4:
return False
for v in f_v:
verts[v] = 0
if len(verts) != 8:
return False
# Now check that each vert has 3 face users
for f in faces:
f_v = f.vertices[:]
for v in f_v:
verts[v] += 1
for v in verts.values():
if v != 3: # vert has 3 users?
return False
# Could we check for 12 unique edges??, probably not needed.
return True
def is_tricyl_facegroup(faces):
"""
is the face group a tri cylinder
Returns a bool, true if the faces make an extruded tri solid
"""
# tricyl must have 5 faces
if len(faces) != 5:
# print('1')
return False
# Check for quads and that there are 6 unique verts
verts = {}
tottri = 0
for f in faces:
if len(f.vertices) == 3:
tottri += 1
for vi in f.vertices:
verts[vi] = 0
if len(verts) != 6 or tottri != 2:
return False
# Now check that each vert has 3 face users
for f in faces:
for vi in f.vertices:
verts[vi] += 1
for v in verts.values():
if v != 3: # vert has 3 users?
return False
# Could we check for 9 unique edges??, probably not needed.
return True
def split_mesh_in_convex_parts(me):
"""
Not implemented yet. Should split given mesh into manifold convex meshes.
For now simply always returns the given mesh.
"""
# TODO.
return (me,)
def round_vec(v):
if PREF_GRID_SNAP:
return v.to_tuple(0)
else:
return v[:]
def write_quake_brush_cube(fw, ob, faces):
"""
Takes 6 faces and writes a brush,
these faces can be from 1 mesh, 1 cube within a mesh of larger cubes
Faces could even come from different meshes or be contrived.
"""
format_vec = '( %d %d %d ) ' if PREF_GRID_SNAP else '( %.9g %.9g %.9g ) '
fw('// brush from cube\n{\n')
for f in faces:
# from 4 verts this gets them in reversed order and only 3 of them
# 0,1,2,3 -> 2,1,0
me = f.id_data # XXX25
for v in f.vertices[:][2::-1]:
fw(format_vec % round_vec(me.vertices[v].co))
material = face_material_get(me, f)
if material and material.game_settings.invisible:
fw(PREF_INVIS_TEX)
else:
image = face_uv_image_get(me, f)
if image:
fw(os.path.splitext(bpy.path.basename(image.filepath))[0])
else:
fw(PREF_NULL_TEX)
fw(" %s\n" % PREF_DEF_TEX_OPTS) # Texture stuff ignored for now
fw('}\n')
def write_quake_brush_face(fw, ob, face):
"""
takes a face and writes it as a brush
each face is a cube/brush.
"""
format_vec = '( %d %d %d ) ' if PREF_GRID_SNAP else '( %.9g %.9g %.9g ) '
image_text = PREF_NULL_TEX
me = face.id_data
material = face_material_get(me, face)
if material and material.game_settings.invisible:
image_text = PREF_INVIS_TEX
else:
image = face_uv_image_get(me, face)
if image:
image_text = os.path.splitext(bpy.path.basename(image.filepath))[0]
# reuse face vertices
f_vertices = [me.vertices[vi] for vi in face.vertices]
# original verts as tuples for writing
orig_vco = tuple(round_vec(v.co) for v in f_vertices)
# new verts that give the face a thickness
dist = PREF_SCALE * PREF_FACE_THICK
new_vco = tuple(round_vec(v.co - (v.normal * dist)) for v in f_vertices)
#new_vco = [round_vec(v.co - (face.no * dist)) for v in face]
fw('// brush from face\n{\n')
# front
for co in orig_vco[2::-1]:
fw(format_vec % co)
fw(image_text)
fw(" %s\n" % PREF_DEF_TEX_OPTS) # Texture stuff ignored for now
for co in new_vco[:3]:
fw(format_vec % co)
if image and not material.game_settings.use_backface_culling: #uf.use_twoside:
fw(image_text)
else:
fw(PREF_INVIS_TEX)
fw(" %s\n" % PREF_DEF_TEX_OPTS) # Texture stuff ignored for now
# sides.
if len(orig_vco) == 3: # Tri, it seemms tri brushes are supported.
index_pairs = ((0, 1), (1, 2), (2, 0))
else:
index_pairs = ((0, 1), (1, 2), (2, 3), (3, 0))
for i1, i2 in index_pairs:
for co in orig_vco[i1], orig_vco[i2], new_vco[i2]:
fw(format_vec % co)
fw(PREF_INVIS_TEX)
fw(" %s\n" % PREF_DEF_TEX_OPTS) # Texture stuff ignored for now
fw('}\n')
def write_doom_brush(fw, ob, me):
"""
Takes a mesh object and writes its convex parts.
"""
format_vec = '( {} {} {} {} ) '
format_vec_uv = '( ( {} {} {} ) ( {} {} {} ) ) '
# Get the bounding sphere for the object for ray-casting
radius = ob_to_radius(ob)
fw('// brush from faces\n{\n'
'brushDef3\n{\n'
)
done_planes = {} # Store already written plane, to avoid writing the same one (or a similar-enough one) again.
for p in me.polygons:
image_text = PREF_NULL_TEX
material = face_material_get(me, p)
if material:
if material.game_settings.invisible:
image_text = PREF_INVIS_TEX
else:
image_text = material.name
# reuse face vertices
plane = poly_to_doom(me, p, radius)
if plane is None:
print(" ERROR: Could not create the plane from polygon!");
elif doom_check_plane(done_planes, plane):
#print(" WARNING: Polygon too similar to another one!");
pass
else:
fw(format_vec.format(*plane.to_tuple(6)))
fw(format_vec_uv.format(0.015625, 0, 1, 0, 0.015625, 1)) # TODO insert UV stuff here
fw('"%s" ' % image_text)
fw("%s\n" % PREF_DEF_TEX_OPTS) # Texture stuff ignored for now
fw('}\n}\n')
def write_node_map(fw, ob):
"""
Writes the properties of an object (empty in this case)
as a MAP node as long as it has the property name - classname
returns True/False based on weather a node was written
"""
props = [(p.name, p.value) for p in ob.game.properties]
IS_MAP_NODE = False
for name, value in props:
if name == "classname":
IS_MAP_NODE = True
break
if not IS_MAP_NODE:
return False
# Write a node
fw('{\n')
for name_value in props:
fw('"%s" "%s"\n' % name_value)
fw('"origin" "%.9g %.9g %.9g"\n' % round_vec(ob.matrix_world.to_translation()))
fw('}\n')
return True
def split_objects(context, objects):
scene = context.scene
final_objects = []
bpy.ops.object.select_all(action='DESELECT')
for ob in objects:
ob.select = True
bpy.ops.object.duplicate()
objects = bpy.context.selected_objects
bpy.ops.object.select_all(action='DESELECT')
tot_ob = len(objects)
for i, ob in enumerate(objects):
print("Splitting object: %d/%d" % (i, tot_ob))
ob.select = True
if ob.type == "MESH":
scene.objects.active = ob
bpy.ops.object.mode_set(mode='EDIT')
bpy.ops.mesh.select_all(action='DESELECT')
bpy.ops.mesh.select_mode(type='EDGE')
bpy.ops.object.mode_set(mode='OBJECT')
for edge in ob.data.edges:
if edge.use_seam:
edge.select = True
bpy.ops.object.mode_set(mode='EDIT')
bpy.ops.mesh.edge_split()
bpy.ops.mesh.separate(type='LOOSE')
bpy.ops.object.mode_set(mode='OBJECT')
split_objects = context.selected_objects
for split_ob in split_objects:
assert(split_ob.type == "MESH")
scene.objects.active = split_ob
bpy.ops.object.mode_set(mode='EDIT')
bpy.ops.mesh.select_mode(type='EDGE')
bpy.ops.mesh.select_all(action="SELECT")
bpy.ops.mesh.region_to_loop()
bpy.ops.mesh.fill_holes(sides=8)
slot_idx = 0
for slot_idx, m in enumerate(split_ob.material_slots):
if m.name == "textures/common/caulk":
break
#if m.name != "textures/common/caulk":
# mat = bpy.data.materials.new("textures/common/caulk")
# bpy.context.object.data.materials.append(mat)
split_ob.active_material_index = slot_idx # we need to use either actual material name or custom property instead of index
bpy.ops.object.material_slot_assign()
with redirect_stdout(stdout):
bpy.ops.mesh.remove_doubles()
bpy.ops.mesh.quads_convert_to_tris()
bpy.ops.mesh.tris_convert_to_quads()
bpy.ops.object.mode_set(mode='OBJECT')
final_objects += split_objects
ob.select = False
print(final_objects)
return final_objects
def export_map(context, filepath):
"""
pup_block = [\
('Scale:', PREF_SCALE, 1, 1000,
'Scale the blender scene by this value.'),\
('Face Width:', PREF_FACE_THICK, 0.01, 10,
'Thickness of faces exported as brushes.'),\
('Grid Snap', PREF_GRID_SNAP,
'snaps floating point values to whole numbers.'),\
'Null Texture',\
('', PREF_NULL_TEX, 1, 128,
'Export textureless faces with this texture'),\
'Unseen Texture',\
('', PREF_INVIS_TEX, 1, 128,
'Export invisible faces with this texture'),\
]
if not Draw.PupBlock('map export', pup_block):
return
"""
import time
from mathutils import Matrix
from bpy_extras import mesh_utils
t = time.time()
print("Map Exporter 0.0")
scene = context.scene
objects = context.selected_objects
obs_mesh = []
obs_lamp = []
obs_surf = []
obs_empty = []
SCALE_MAT = Matrix()
SCALE_MAT[0][0] = SCALE_MAT[1][1] = SCALE_MAT[2][2] = PREF_SCALE
TOTBRUSH = TOTLAMP = TOTNODE = 0
for ob in objects:
type = ob.type
if type == 'MESH':
obs_mesh.append(ob)
elif type == 'SURFACE':
obs_surf.append(ob)
elif type == 'LAMP':
obs_lamp.append(ob)
elif type == 'EMPTY':
obs_empty.append(ob)
obs_mesh = split_objects(context, obs_mesh)
with open(filepath, 'w') as fl:
fw = fl.write
if obs_mesh or obs_surf:
if PREF_DOOM3_FORMAT:
fw('Version 2')
# brushes and surf's must be under worldspan
fw('\n// entity 0\n')
fw('{\n')
fw('"classname" "worldspawn"\n')
print("\twriting cubes from meshes")
tot_ob = len(obs_mesh)
for i, ob in enumerate(obs_mesh):
print("Exporting object: %d/%d" % (i, tot_ob))
dummy_mesh = ob.to_mesh(scene, True, 'PREVIEW')
#print len(mesh_split2connected(dummy_mesh))
# 1 to tx the normals also
dummy_mesh.transform(ob.matrix_world * SCALE_MAT)
# High quality normals
#XXX25: BPyMesh.meshCalcNormals(dummy_mesh)
if PREF_DOOM3_FORMAT:
for me in split_mesh_in_convex_parts(dummy_mesh):
write_doom_brush(fw, ob, me)
TOTBRUSH += 1
if (me is not dummy_mesh):
bpy.data.meshes.remove(me)
else:
# We need tessfaces
dummy_mesh.update(calc_tessface=True)
# Split mesh into connected regions
for face_group in mesh_utils.mesh_linked_tessfaces(dummy_mesh):
if is_cube_facegroup(face_group):
write_quake_brush_cube(fw, ob, face_group)
TOTBRUSH += 1
elif is_tricyl_facegroup(face_group):
write_quake_brush_cube(fw, ob, face_group)
TOTBRUSH += 1
else:
for f in face_group:
write_quake_brush_face(fw, ob, f)
TOTBRUSH += 1
#print 'warning, not exporting "%s" it is not a cube' % ob.name
bpy.data.meshes.remove(dummy_mesh)
valid_dims = 3, 5, 7, 9, 11, 13, 15
for ob in obs_surf:
'''
Surf, patches
'''
data = ob.data
surf_name = data.name
mat = ob.matrix_world * SCALE_MAT
# This is what a valid patch looks like
"""
// brush 0
{
patchDef2
{
NULL
( 3 3 0 0 0 )
(
( ( -64 -64 0 0 0 ) ( -64 0 0 0 -2 ) ( -64 64 0 0 -4 ) )
( ( 0 -64 0 2 0 ) ( 0 0 0 2 -2 ) ( 0 64 0 2 -4 ) )
( ( 64 -64 0 4 0 ) ( 64 0 0 4 -2 ) ( 80 88 0 4 -4 ) )
)
}
}
"""
for i, nurb in enumerate(data.splines):
u = nurb.point_count_u
v = nurb.point_count_v
if u in valid_dims and v in valid_dims:
fw('// brush %d surf_name\n' % i)
fw('{\n')
fw('patchDef2\n')
fw('{\n')
fw('NULL\n')
fw('( %d %d 0 0 0 )\n' % (u, v))
fw('(\n')
u_iter = 0
for p in nurb.points:
if u_iter == 0:
fw('(')
u_iter += 1
# add nmapping 0 0 ?
if PREF_GRID_SNAP:
fw(" ( %d %d %d 0 0 )" %
round_vec(mat * p.co.xyz))
else:
fw(' ( %.6f %.6f %.6f 0 0 )' %
(mat * p.co.xyz)[:])
# Move to next line
if u_iter == u:
fw(' )\n')
u_iter = 0
fw(')\n')
fw('}\n')
fw('}\n')
# Debugging
# for p in nurb: print 'patch', p
else:
print("Warning: not exporting patch",
surf_name, u, v, 'Unsupported')
if obs_mesh or obs_surf:
fw('}\n') # end worldspan
print("\twriting lamps")
for ob in obs_lamp:
print("\t\t%s" % ob.name)
lamp = ob.data
fw('{\n')
fw('"classname" "light"\n')
fw('"light" "%.6f"\n' % (lamp.distance * PREF_SCALE))
if PREF_GRID_SNAP:
fw('"origin" "%d %d %d"\n' %
tuple([round(axis * PREF_SCALE)
for axis in ob.matrix_world.to_translation()]))
else:
fw('"origin" "%.6f %.6f %.6f"\n' %
tuple([axis * PREF_SCALE
for axis in ob.matrix_world.to_translation()]))
fw('"_color" "%.6f %.6f %.6f"\n' % tuple(lamp.color))
fw('"style" "0"\n')
fw('}\n')
TOTLAMP += 1
print("\twriting empty objects as nodes")
for ob in obs_empty:
if write_node_map(fw, ob):
print("\t\t%s" % ob.name)
TOTNODE += 1
else:
print("\t\tignoring %s" % ob.name)
for ob in obs_mesh:
scene.objects.unlink(ob)
bpy.data.objects.remove(ob)
print("Exported Map in %.4fsec" % (time.time() - t))
print("Brushes: %d Nodes: %d Lamps %d\n" % (TOTBRUSH, TOTNODE, TOTLAMP))
def save(operator,
context,
filepath=None,
global_scale=1.0,
face_thickness=0.1,
texture_null="NULL",
texture_opts='0 0 0 1 1 0 0 0',
grid_snap=False,
doom3_format=True,
):
global PREF_SCALE
global PREF_FACE_THICK
global PREF_NULL_TEX
global PREF_DEF_TEX_OPTS
global PREF_GRID_SNAP
global PREF_DOOM3_FORMAT
PREF_SCALE = global_scale
PREF_FACE_THICK = face_thickness
PREF_NULL_TEX = texture_null
PREF_DEF_TEX_OPTS = texture_opts
PREF_GRID_SNAP = grid_snap
PREF_DOOM3_FORMAT = doom3_format
if (PREF_DOOM3_FORMAT):
PREF_DEF_TEX_OPTS = '0 0 0'
else:
PREF_DEF_TEX_OPTS = '0 0 0 1 1 0 0 0'
export_map(context, filepath)
return {'FINISHED'}
| 1.851563
| 2
|
AluraProjects/Brasilidades/telefone.py
|
matheusm0ura/Python
| 0
|
12783387
|
<filename>AluraProjects/Brasilidades/telefone.py<gh_stars>0
import re
class TelefoneBr:
def __init__(self, telefone):
if self.valida_numero(telefone):
self.numero = telefone
else:
raise ValueError("Número inválido.")
def valida_numero(self, telefone):
padrao = "([0-9]{2})([0-9]{4,5})([0-9]{4}$)"
busca = re.search(padrao, telefone)
if busca:
return True
else:
return False
def __str__(self):
padrao = "([0-9]{2})([0-9]{4,5})([0-9]{4})"
busca = re.search(padrao, self.numero)
return "({}){}-{}".format(
busca.group(1),
busca.group(2),
busca.group(3))
| 3.390625
| 3
|
tests/sentry/eventstream/kafka/test_protocol.py
|
mlapkin/sentry
| 0
|
12783388
|
<gh_stars>0
from __future__ import absolute_import
import pytest
import pytz
from datetime import datetime
from sentry.eventstream.kafka.protocol import (
InvalidPayload,
InvalidVersion,
UnexpectedOperation,
parse_event_message,
)
from sentry.utils import json
def test_parse_event_message_invalid_payload():
with pytest.raises(InvalidPayload):
parse_event_message('{"format": "invalid"}')
def test_parse_event_message_invalid_version():
with pytest.raises(InvalidVersion):
parse_event_message(json.dumps([0, 'insert', {}]))
def test_parse_event_message_version_1():
event_data = {
'project_id': 1,
'group_id': 2,
'event_id': '00000000000010008080808080808080',
'message': 'message',
'platform': 'python',
'datetime': '2018-07-20T21:04:27.600640Z',
'data': {},
'extra': {},
'primary_hash': '49f68a5c8493ec2c0bf489821c21fc3b',
}
task_state = {
'is_new': True,
'is_sample': False,
'is_regression': False,
'is_new_group_environment': True,
}
kwargs = parse_event_message(json.dumps([1, 'insert', event_data, task_state]))
event = kwargs.pop('event')
assert event.project_id == 1
assert event.group_id == 2
assert event.event_id == '00000000000010008080808080808080'
assert event.message == 'message'
assert event.platform == 'python'
assert event.datetime == datetime(2018, 7, 20, 21, 4, 27, 600640, tzinfo=pytz.utc)
assert dict(event.data) == {}
assert kwargs.pop('primary_hash') == '49f68a5c8493ec2c0bf489821c21fc3b'
assert kwargs.pop('is_new') is True
assert kwargs.pop('is_sample') is False
assert kwargs.pop('is_regression') is False
assert kwargs.pop('is_new_group_environment') is True
assert not kwargs, 'unexpected values remaining: {!r}'.format(kwargs)
def test_parse_event_message_version_1_unsupported_operation():
assert parse_event_message(json.dumps([1, 'delete', {}])) is None
def test_parse_event_message_version_1_unexpected_operation():
with pytest.raises(UnexpectedOperation):
parse_event_message(json.dumps([1, 'invalid', {}, {}]))
| 2.09375
| 2
|
i3pystatus/pomodoro.py
|
fkusei/i3pystatus
| 413
|
12783389
|
import subprocess
from datetime import datetime, timedelta
from i3pystatus import IntervalModule
from i3pystatus.core.desktop import DesktopNotification
STOPPED = 0
RUNNING = 1
BREAK = 2
class Pomodoro(IntervalModule):
"""
This plugin shows Pomodoro timer.
Left click starts/restarts timer.
Right click stops it.
Example color settings.
.. code-block:: python
color_map = {
'stopped': '#2ECCFA',
'running': '#FFFF00',
'break': '#37FF00'
}
"""
settings = (
('sound',
'Path to sound file to play as alarm. Played by "aplay" utility'),
('pomodoro_duration',
'Working (pomodoro) interval duration in seconds'),
('break_duration', 'Short break duration in seconds'),
('long_break_duration', 'Long break duration in seconds'),
('short_break_count', 'Short break count before first long break'),
('format', 'format string, available formatters: current_pomodoro, '
'total_pomodoro, time'),
('inactive_format', 'format string to display when no timer is running'),
('color', 'dictionary containing a mapping of statuses to colours')
)
inactive_format = 'Start Pomodoro'
color_map = {
'stopped': '#2ECCFA',
'running': '#FFFF00',
'break': '#37FF00'
}
color = None
sound = None
interval = 1
short_break_count = 3
format = '☯ {current_pomodoro}/{total_pomodoro} {time}'
pomodoro_duration = 25 * 60
break_duration = 5 * 60
long_break_duration = 15 * 60
on_rightclick = "stop"
on_leftclick = "start"
def init(self):
# state could be either running/break or stopped
self.state = STOPPED
self.current_pomodoro = 0
self.total_pomodoro = self.short_break_count + 1 # and 1 long break
self.time = None
if self.color is not None and type(self.color) == dict:
self.color_map.update(self.color)
def run(self):
if self.time and datetime.utcnow() >= self.time:
if self.state == RUNNING:
self.state = BREAK
if self.current_pomodoro == self.short_break_count:
self.time = datetime.utcnow() + \
timedelta(seconds=self.long_break_duration)
else:
self.time = datetime.utcnow() + \
timedelta(seconds=self.break_duration)
text = 'Go for a break!'
else:
self.state = RUNNING
self.time = datetime.utcnow() + \
timedelta(seconds=self.pomodoro_duration)
text = 'Back to work!'
self.current_pomodoro = (self.current_pomodoro + 1) % self.total_pomodoro
self._alarm(text)
if self.state == RUNNING or self.state == BREAK:
min, sec = divmod((self.time - datetime.utcnow()).total_seconds(), 60)
text = '{:02}:{:02}'.format(int(min), int(sec))
sdict = {
'time': text,
'current_pomodoro': self.current_pomodoro + 1,
'total_pomodoro': self.total_pomodoro
}
color = self.color_map['running'] if self.state == RUNNING else self.color_map['break']
text = self.format.format(**sdict)
else:
text = self.inactive_format
color = self.color_map['stopped']
self.output = {
'full_text': text,
'color': color
}
def start(self):
self.state = RUNNING
self.time = datetime.utcnow() + timedelta(seconds=self.pomodoro_duration)
self.current_pomodoro = 0
def stop(self):
self.state = STOPPED
self.time = None
def _alarm(self, text):
notification = DesktopNotification(title='Alarm!', body=text)
notification.display()
if self.sound is not None:
subprocess.Popen(['aplay',
self.sound,
'-q'],
stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
| 2.734375
| 3
|
modules/molpro/RHF_Parser.py
|
Krzmbrzl/molpro-python
| 0
|
12783390
|
<reponame>Krzmbrzl/molpro-python
from typing import List
from typing import Iterator
from typing import Optional
import itertools
from molpro import ProgramParser
from molpro import register_program_parser
from molpro import utils
from molpro import RHF_Data
from molpro import MolproOutput
from molpro import ParserData
@register_program_parser
class RHF_Parser(ProgramParser):
def __init__(self):
super(ProgramParser, self).__init__()
def doParse(self, lines: List[str], lineIt: Iterator[int], output: MolproOutput) -> Optional[ParserData]:
data = RHF_Data()
lineIt, peekIt = itertools.tee(lineIt)
i = utils.skip_to(lines, peekIt, startswith="ITER")
utils.iterate_to(lineIt, i - 1)
data.iterations = utils.parse_iteration_table(lines, lineIt,
col_types=[
[int, float, float, float, float, int, int, float, float, str]],
del_cols={"ITER"})
# Skip empty lines
followingLine = next(lineIt)
while lines[followingLine].strip() == "":
followingLine = next(lineIt)
if lines[followingLine].startswith("?"):
# We assume that this is an error stating that RHF didn't converge
utils.consume(lines[followingLine],
prefix="?No convergence in rhfpr")
data.converged = False
output.errors.append("RHF failed to converge")
else:
data.converged = True
# Skip to and read final energy
energyLine = utils.skip_to(lines, lineIt, startswith="!RHF STATE")
data.total_energy = float(utils.consume(
lines[energyLine], prefix="!RHF STATE", gobble_until="Energy", strip=True))
return data
| 2.734375
| 3
|
simulate_data.py
|
bdhammel/line-visar-analysis
| 1
|
12783391
|
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from scipy.interpolate import interp2d
from scipy.ndimage import convolve1d
from PIL import Image
c = 299792.0 # um/ns
class Ray:
def __init__(self, lambda0: "um" = .532,
pulse_length: "ns" = 10, radius: "um" = 100):
"""
Parameters
----------
lambda_: flaot
wave length of the light in um
pulse_length : float
time in us
radius : float
radius of the beam
"""
self.radius = radius
self.lambda0 = lambda0
self._t = np.linspace(0, pulse_length, 2048)
self._y = np.linspace(-2, 2, 2048)
self._tt, self._yy = np.meshgrid(self._t, self._y)
self._lambda0 = lambda0*np.ones_like(self._tt)
self._delta = 0
@property
def pulse_length(self):
return self._t
@property
def beam_width(self):
return self._y
@property
def _k(self):
return 2*np.pi/self._lambda
@property
def _k0(self):
return 2*np.pi/self._lambda0
@property
def phi(self):
return self._k*self.dz + self._k0*self.dz
def E(self, t):
E = np.exp(1j*(self.phi + self._delta))
E_real = np.real(E)
E_imag = np.imag(E)
fE_real = interp2d(self._t, self._y, E_real)
fE_imag = interp2d(self._t, self._y, E_imag)
return fE_real(t, self._y) + 1j*fE_imag(t, self._y)
def set_lambda(self, lambda_):
self._lambda = lambda_
def propogate(self, dz):
self.dz = dz
def add_delta(self, delta):
self._delta = delta
class Target:
def __init__(self, velocity_equation):
"""
Parameters
----------
velocity_equation : str or fn
either step or sigmoid to use default velocity profile, or a
function that excepts a t and y meshgrid
"""
self._t = np.linspace(-5, 15, 2048)
self._y = np.linspace(-3, 3, 2048)
self.tau = 0
self._tt, self._yy = np.meshgrid(self._t, self._y)
if velocity_equation == "step":
self.velocity_equation = self.step
elif velocity_equation == "sigmoid":
self.velocity_equation = self.sigmoid
elif velocity_equation == "stationary":
self.velocity_equation = self.stationary
else:
self.velocity_equation = velocity_equation
@property
def _zz(self):
dt = np.diff(self._t).mean()
return np.cumsum(self._vv, axis=1)*dt
@property
def zz(self):
return interp2d(self._t, self._y, self._zz)
@property
def _dz(self):
"""Path the light travels to the target and back
"""
dzz = self._zz[..., -1, np.newaxis] - self._zz
return dzz
@property
def dz(self):
return interp2d(self._t, self._y, self._dz)
@property
def _vv(self):
return self.velocity_equation(self._tt, self._yy)
@property
def vv(self):
return interp2d(self._t, self._y, self._vv)
@staticmethod
def sigmoid(t: "ns", y: "um", max_velocity: "um/ns" = 5):
"""A velocity profile that follows a sigmoid like shape
"""
return max_velocity*np.exp(-y**4)/(1 + np.exp(-5*(t-3)))
@staticmethod
def step(t: "ns", y: "um", max_velocity: "um/ns" = 1):
"""A discontinuous jump velocity profile
"""
assert t.shape == y.shape
v = np.zeros_like(t)
v[t > 3] = max_velocity
return v
@staticmethod
def stationary(t: "ns", y: "um"):
"""A static target, not moving
"""
return np.zeros_like(t)
def reflect_off_target(self, ray):
ray = self._doppler_shift(ray)
dz = self.dz(ray.pulse_length, ray.beam_width)
ray.propogate(dz)
return ray
def _doppler_shift(self, ray):
vv = self.vv(ray.pulse_length, ray.beam_width)
ray.set_lambda(ray.lambda0*(1 - 2*vv/c))
return ray
def reflection_intensity(self, ray):
dy = np.diff(ray.beam_width).mean()
dz = np.diff(self.zz(ray.pulse_length, ray.beam_width), axis=0)
theta = np.arctan(dz/dy)
Idot = np.vstack(
(np.ones(shape=(2048)), np.apply_along_axis(np.cos, 0, theta))
)
return Idot
def plot_velocity(self):
fig = plt.figure()
ax = fig.add_subplot(111, projection="3d")
Axes3D.plot_surface(ax, self._tt, self._yy, self._vv)
ax.set_xlabel("Time [ns]")
ax.set_ylabel("x [mm]")
ax.set_zlabel("Velocity [km s-1]")
fig = plt.figure()
im = plt.pcolormesh(self._tt, self._yy, self._vv)
cb = fig.colorbar(im)
plt.xlabel("Time [ns]")
plt.ylabel("x [mm]")
cb.set_label("Velocity [km s-1]")
class Etalon:
def __init__(self, thickness: "mm", n):
"""Initial an etalon object
Parameters
-----------
d : float
thickness of the etalon
n : float
ndex of refraction of the etalon
"""
self._n = n
self._d = thickness
@property
def tau(self) -> "ns":
return 2*self._d/c*(self._n - 1/self._n)
def VPF(self, lambda0=.532):
return lambda0/(2*self.tau)
def set_VPF(self, VPF, lambda0: "um"):
tau = lambda0/(2*VPF)
self.set_tau(tau)
def set_tau(self, tau: "ns"):
"""Change the thickness of the etalon to match a
"""
self._d = c*tau/(2*(self._n - 1/self._n))
class Interferometer:
def __init__(self, etalon, tau: "ns" = .1):
"""
Parameters
----------
etalon : Etalon
the etalon used in the interferometer, provides VPF
tau : float
the time resolution of the streak camera, determined by the
width of the streak slit
"""
self.etalon = etalon
self.tau = tau
def _interfear_ray(self, ray):
"""Generate the interference pattern
"""
# get the electric field over the pulse length
E1 = ray.E(ray.pulse_length)
# generate the offset for the second ray
_delta_shape = len(ray.beam_width)
ray.add_delta(
np.linspace(0, 100, _delta_shape).reshape(_delta_shape, 1)
)
# generate the second ray, which is delayed by the etalon thickness
E2 = ray.E(ray.pulse_length - self.etalon.tau)
# Super position of the rays
E = E1 + E2
# only take the real component of the inner product (intensity)
Icos = np.real(E*E.conj())
return Icos
def _add_noise(self, im, ray, target, noise_level, signal_level):
"""Add detector noise to the generated fringe pattern
"""
print("...Including noise")
"""
noise = np.load("noise.npy")
"""
sig = im[:, 500]
sig_fft = np.fft.rfft(sig)
noise_fft = np.zeros_like(sig_fft)
noise_fft[3] = 50000
noise_fft[50] = 20000
noise_fft[200] = 5000
noise = np.fft.irfft(noise_fft)
noise /= noise.max()
nenv = noise_level*signal_level*np.exp(-i/40)
n = nenv*(2*np.random.random(size=(len(i))) - 1)
im = (im.T*noise.real).T
im /= im.max()
im += np.random.random(size=im.shape)*im.std()/3
return im
def _convolve_streak_slit(self, im, t):
"""Blur in the time-domain to account for the width of the streak
camera
Parameters
-----------
im : 2d np array
generated sweep
t : np array
array corresponding to the time of the sweep
"""
print("...Convolving streak slit")
dt = np.diff(t).mean()
tpx = int(self.tau//dt)
window = np.ones(shape=tpx)
return convolve1d(im, window, axis=1)
def output(self, ray, target, noise=False):
"""Generate the simulated data
Parameters
----------
ray : Ray class
the input ray
target :Target class
target containing the velocity profile
noise : bool (optional)
add in detector noise to the generated image
"""
I = self._interfear_ray(ray)
I = self._convolve_streak_slit(I, ray.pulse_length)
if noise:
I = self._add_noise(I, ray, target)
return I
def spatial_var_step(a: "angle", t: "ns", y: "um", max_velocity: "um/ns" = 1):
"""A velocity step-profile which varies linearly in space
Parameters
----------
a : float
the slope of the spatially varying profile
t : float
the time (in ns) at which to evaluate the velocity
y : float
the spatial location at which to evaluate the velocity
max_velocity : float
the maximum velocity of the shock
Returns
-------
the velocity determined by the argument parameters
"""
assert t.shape == y.shape
v = np.zeros_like(t)
v[t > -y/a + 3] = max_velocity
return v
def sin_step(freq, amp, t: "ns", y: "um", max_velocity: "um/ns" = 1):
"""A sinusoidally varying velocity profile in space
Parameters
----------
freq : float
the frequency of the spatially varying profile
amp : float
the amplitude of oscillations
t : float
the time (in ns) at which to evaluate the velocity
y : float
the spatial location at which to evaluate the velocity
max_velocity : float
the maximum velocity of the shock
Returns
-------
the velocity determined by the argument parameters
"""
v = np.zeros_like(t)
v[t > -amp*np.sin(freq*y/(2*np.pi)) + 3] = max_velocity
return v
def reference_shot(save=False, noise=False):
"""Generate a reference image
Parameters
----
save : bool (optional)
save the generated image
noise : bool (optional)
add in detector noise
Returns
-------
Pil Image instance
"""
stationary_target = Target(velocity_equation="stationary")
ray = Ray(pulse_length=10)
ray = stationary_target.reflect_off_target(ray)
etalon = Etalon(1, 1.5195) # VPF doesn't matter
interferometer = Interferometer(etalon=etalon)
ref = interferometer.output(ray, stationary_target, noise)
ref *= 256/ref.max()
ref = ref.astype(np.uint8)
refim = Image.fromarray(ref, mode="L")
plt.figure()
plt.imshow(refim, aspect='auto', cmap="gray", extent=(0, 10, -2, 2))
plt.xlabel("Time [ns]")
if save:
refim.save("~/Desktop/ref.jpg", "JPEG")
return ref
if __name__ == "__main__":
plt.close("all")
velocity_equation = lambda t, y: sin_step(20, .5, t, y, max_velocity=1)
etalon = Etalon(1, 1.5195)
etalon.set_VPF(2., lambda0=.532)
# target = Target(velocity_equation="step")
target = Target(velocity_equation=velocity_equation)
ray = Ray(pulse_length=10)
ray = target.reflect_off_target(ray)
interferometer = Interferometer(etalon=etalon)
sweep = interferometer.output(ray, target, noise=False)
plt.figure()
plt.imshow(sweep, aspect='auto', cmap="gray", extent=(0, 10, -2, 2))
plt.xlabel("Time [ns]")
sweep *= 256/sweep.max()
sweep = sweep.astype(np.uint8)
im = Image.fromarray(sweep, mode="L")
| 2.53125
| 3
|
researchWork/__init__.py
|
VladimirZubavlenko/ikaf42-app
| 0
|
12783392
|
default_app_config = 'researchWork.apps.ResearchWorkConfig'
| 1.070313
| 1
|
azure-mgmt-recoveryservicesbackup/azure/mgmt/recoveryservicesbackup/models/operation_result_info.py
|
JonathanGailliez/azure-sdk-for-python
| 1
|
12783393
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .operation_result_info_base import OperationResultInfoBase
class OperationResultInfo(OperationResultInfoBase):
"""Operation result info.
All required parameters must be populated in order to send to Azure.
:param object_type: Required. Constant filled by server.
:type object_type: str
:param job_list: List of jobs created by this operation.
:type job_list: list[str]
"""
_validation = {
'object_type': {'required': True},
}
_attribute_map = {
'object_type': {'key': 'objectType', 'type': 'str'},
'job_list': {'key': 'jobList', 'type': '[str]'},
}
def __init__(self, **kwargs):
super(OperationResultInfo, self).__init__(**kwargs)
self.job_list = kwargs.get('job_list', None)
self.object_type = 'OperationResultInfo'
| 2.09375
| 2
|
src/BertForIntent/Settings.py
|
drdauntless/TC-Bot
| 0
|
12783394
|
import json
class Settings:
def __init__(self, filename):
self.json_file = None
with open(filename) as f:
self.json_file = json.load(f)
self.hh_sheet_id = self.json_file['hh_sheet_id']
self.ha_sheet_id = self.json_file['ha_sheet_id']
# List of str: names of the individual sheets in the Google Sheet we want to use
self.hh_sheet_names = self.json_file['hh_sheet_names']
self.ha_sheet_names = self.json_file['ha_sheet_names']
self.columns_list = self.json_file['columns_list']
self.dialogue_column = self.json_file['dialogue_column']
self.dialogue_act_column = self.json_file['dialogue_act_column']
self.intent_column = self.json_file['intent_column']
self.delivery_column = self.json_file['delivery_column']
self.who_column = self.json_file['who_column']
self.action_column = self.json_file['action_column']
self.tone_column = self.json_file['tone_column']
self.driver_dict = self.json_file['driver_dict']
self.creativity_dict = self.json_file['creativity_dict']
self.dialogue_act_dict = self.json_file['dialogue_act_dict']
self.intent_dict = self.json_file['intent_dict']
self.delivery_dict = self.json_file['delivery_dict']
self.action_dict = self.json_file['action_dict']
self.who_dict = self.json_file['who_dict']
self.tone_dict = self.json_file['tone_dict']
self.root_column_name = self.json_file['root_column_name']
self.root_encode_dict = self.json_file[self.json_file['root_encode_dict']]
self.root_hierarchy = self.json_file['root_hierarchy']
self.stagger_training = self.json_file['stagger_training']
self.num_runs = self.json_file['num_runs']
self.num_train_epochs = self.json_file['num_train_epochs']
self.per_device_train_batch_size = self.json_file['per_device_train_batch_size']
self.per_device_eval_batch_size = self.json_file['per_device_eval_batch_size']
self.warmup_steps = self.json_file['warmup_steps']
self.weight_decay = self.json_file['weight_decay']
self.evaluation_strategy = self.json_file['evaluation_strategy']
self.eval_accumulation_steps = self.json_file['eval_accumulation_steps']
| 2.703125
| 3
|
tests/test_base.py
|
byashimov/django-pkgconf
| 15
|
12783395
|
<reponame>byashimov/django-pkgconf<filename>tests/test_base.py<gh_stars>10-100
from django.test import SimpleTestCase
from django.test.utils import override_settings
import myconf
import mymixinconf
import myprefixconf
class MyConfTest(SimpleTestCase):
def test_setup(self):
self.assertEqual(myconf.__name__, 'myconf')
# Generated prefix
self.assertEqual(myconf.__prefix__, 'MYAPP')
def test_defaults(self):
self.assertEqual(myconf.LIST, [])
self.assertEqual(myconf.STRING, 'test')
self.assertEqual(myconf.INTEGER, 0)
self.assertEqual(myconf.BOOLEAN, True)
self.assertEqual(myconf.METHOD('baz!'), 'test baz!')
self.assertEqual(myconf.PROPERTY, 'test baz!')
@override_settings(MYAPP_STRING='modified', MYAPP_INTEGER=1,
MYAPP_METHOD=lambda self, string: 'new ' + string,
MYAPP_PROPERTY=property(lambda self: 'new baz'))
def test_changes(self):
self.assertEqual(myconf.STRING, 'modified')
self.assertEqual(myconf.INTEGER, 1)
self.assertEqual(myconf.METHOD('baz'), 'new baz')
self.assertEqual(myconf.PROPERTY, 'new baz')
def test_unknown(self):
with self.assertRaises(AttributeError):
myconf.UNKNOWN
def test_monkeypatching(self):
from django.conf import settings
settings.MYAPP_STRING = 'monkey'
self.assertEqual(myconf.STRING, 'monkey')
# Property returns the new value
self.assertEqual(myconf.PROPERTY, 'monkey baz!')
# Reverse
with self.assertRaises(AttributeError):
myconf.STRING = 'banana'
# Be aware of this
myconf.LIST.append('boo!')
self.assertEqual(myconf.LIST, ['boo!'])
def test_function(self):
self.assertEqual(myconf.METHOD.__name__, 'METHOD')
self.assertEqual(myconf.METHOD.__doc__, 'Method docstring')
class MyPrefixConfTest(SimpleTestCase):
def test_setup(self):
self.assertEqual(myprefixconf.__prefix__, 'FOO_BAR')
def test_defaults(self):
self.assertEqual(myprefixconf.BOOLEAN, True)
@override_settings(FOO_BAR_BOOLEAN=False)
def test_changes(self):
self.assertEqual(myprefixconf.BOOLEAN, False)
class MyMixinConfTest(SimpleTestCase):
def test_defaults(self):
self.assertEqual(mymixinconf.FOO, 'foo')
self.assertEqual(mymixinconf.BAR, 'original bar')
@override_settings(MYAPP_FOO='new foo', MYAPP_BAZ='new baz')
def test_changes(self):
self.assertEqual(mymixinconf.FOO, 'new foo')
self.assertEqual(mymixinconf.BAR, 'new bar')
self.assertEqual(mymixinconf.BAZ, 'new baz')
| 2.359375
| 2
|
Challenges/Desafio019.py
|
JeffersonYepes/Python
| 0
|
12783396
|
<filename>Challenges/Desafio019.py
from random import choice
print('****** Sorteio de Alunos ******')
a1 = str(input('Digite o nome do aluno 1: '))
a2 = str(input('Digite o nome do aluno 2: '))
a3 = str(input('Digite o nome do aluno 3: '))
a4 = str(input('Digite o nome do aluno 4: '))
print('O aluno escolhido é: {}'.format(choice([a1, a2, a3, a4])))
| 3.625
| 4
|
algorithm/utils.py
|
tangxyw/RecAlgorithm
| 13
|
12783397
|
import tensorflow as tf
def train_input_fn(filepath, example_parser, batch_size, num_epochs, shuffle_buffer_size):
"""
模型的训练阶段input_fn
Args:
filepath (str): 训练集/验证集的路径
example_parser (function): 解析example的函数
batch_size (int): 每个batch样本大小
num_epochs (int): 训练轮数
shuffle_buffer_size (inr): shuffle时buffer的大小
Returns:
dataset
"""
dataset = tf.data.TFRecordDataset(filepath)
if shuffle_buffer_size > 0:
dataset = dataset.shuffle(shuffle_buffer_size)
dataset = dataset.repeat(num_epochs)
dataset = dataset.batch(batch_size)
dataset = dataset.map(example_parser, num_parallel_calls=tf.data.experimental.AUTOTUNE)
dataset = dataset.prefetch(1)
return dataset
def eval_input_fn(filepath, example_parser, batch_size):
"""
模型的eval阶段input_fn
Args:
filepath (str): 训练集/验证集的路径
example_parser (function): 解析example的函数
batch_size (int): 每个batch样本大小
Returns:
dataset
"""
dataset = tf.data.TFRecordDataset(filepath)
dataset = dataset.batch(batch_size)
dataset = dataset.map(example_parser, num_parallel_calls=tf.data.experimental.AUTOTUNE)
dataset = dataset.prefetch(1)
return dataset
def to_sparse_tensor(one_hot_tensor):
"""
将one-hot/multi-hot输入转化成稀疏张量, 作为tf.nn.safe_embedding_lookup_sparse的输入
Args:
one_hot_tensor (tensor): one-hot/multi-hot输入
Returns:
tf.SparseTensor
"""
one_hot_indices = tf.where(tf.not_equal(one_hot_tensor, 0))
one_hot_values = one_hot_indices[:, 1]
return tf.SparseTensor(
indices=one_hot_indices,
values=one_hot_values,
dense_shape=tf.shape(one_hot_tensor, out_type=tf.int64))
| 2.765625
| 3
|
tests/test_final.py
|
justanr/objtoolz
| 1
|
12783398
|
<reponame>justanr/objtoolz<filename>tests/test_final.py
from objtoolz.metas.final import Final
import pytest
def test_cant_inherit_from_final():
class FinalTest(object):
pass
FinalTest = Final('FinalTest', (FinalTest,), {})
with pytest.raises(TypeError) as err:
class SubclassedFinalTest(FinalTest):
pass
assert 'Attempting to inherit from final class' in str(err.value)
| 2.40625
| 2
|
write-up/Fortnight Challenge 2022/Cryptography/Really Silly Algorithm/server.py
|
compsec-hcmus/hcmus-fortnight-wu
| 0
|
12783399
|
#!/usr/bin/env python3
import binascii
import threading
from time import *
import socketserver
from string import hexdigits
from Crypto.Util.number import getPrime, inverse, bytes_to_long, long_to_bytes
banner = """
Welcome to my supreme signing server!
Send me a signed command, I will verify and do it for you, I will also sign your commands, but don't tinker too much with them though!
I'm not Blind, I can see through your cunning ruse, sometimes!
"""
FLAG_FILE = "flag.txt"
class RSA:
def __init__(self):
self.e = 0x10001
p = getPrime(1024)
q = getPrime(1024)
self.n = p * q
phi = (p - 1) * (q - 1)
self.d = inverse(self.e, phi)
def get_public_key(self):
return (self.e, self.n)
def sign(self, msg):
hex_str_of_peek = binascii.hexlify("peek".encode()).decode()
if msg.startswith(hex_str_of_peek):
return -1
msg = bytes_to_long(binascii.unhexlify(msg.encode()))
return pow(msg, self.d, self.n)
def verify(self, msg):
msg = bytes_to_long(binascii.unhexlify(msg.encode()))
return pow(msg, self.e, self.n)
class Service(socketserver.BaseRequestHandler):
#handle() will always run first
def handle(self):
self.get_flag()
rsa = RSA()
self.send(banner)
while True:
choice = self.receive("1. Sign\n2. Verify\nYour choice: ").decode()
if choice == "1":
cmd = self.receive("Command to sign: ").decode()
if not self.assure_hex(cmd):
self.send("Please send a hex string!\n")
continue
signed_msg = rsa.sign(cmd)
if signed_msg != -1:
self.send("Message signed successfully!\n" + self.num_to_hex_str(signed_msg))
else:
self.send("Ah ah, don't tinker with the commands!")
elif choice == "2":
cmd = self.receive("Command to verify: ").decode()
if not self.assure_hex(cmd):
self.send("Please send a hex string!\n")
continue
verified_cmd = rsa.verify(cmd)
verified_cmd = long_to_bytes(verified_cmd)
try:
#could be jibberish ¯\_(ツ)_/¯
verified_cmd = verified_cmd.decode()
if verified_cmd == "peek flag":
self.send("Here is the flag!\n" + self.flag)
break
elif verified_cmd == "get pubkey":
self.send("Here is the public key!\n" + str(rsa.get_public_key()) + "\n")
else:
self.send("Command executed!")
break
except:
self.send("There's something wrong with your command!")
break
else:
break
def num_to_hex_str(self, num):
return binascii.hexlify(long_to_bytes(num)).decode()
def hex_str_to_num(self, string):
return bytes_to_long(binascii.unhexlify(string.encode()))
def assure_hex(self, string):
return all(c in hexdigits for c in string)
def get_flag(self):
with open(FLAG_FILE, "r") as f:
self.flag = f.read()
def send(self, string, newline=True):
if type(string) is str:
string = string.encode("utf-8")
if newline:
string = string + b"\n"
self.request.sendall(string)
def receive(self, prompt=": "):
self.send(prompt, newline=False)
return self.request.recv(1000).strip()
class ThreadedService(
socketserver.ThreadingMixIn,
socketserver.TCPServer,
socketserver.DatagramRequestHandler,
):
pass
def main():
port = 20314
host = "103.245.249.107"
service = Service
server = ThreadedService((host, port), service)
server.allow_reuse_address = True
server_thread = threading.Thread(target=server.serve_forever)
server_thread.daemon = True
server_thread.start()
print("Server started on " + str(server.server_address) + "!")
# Now let the main thread just wait...
while True:
sleep(10)
if __name__ == "__main__":
main()
| 2.71875
| 3
|
tests/test_runtime.py
|
NathanDeMaria/aws-lambda-r-runtime
| 134
|
12783400
|
import base64
import json
import re
import unittest
import boto3
from tests import get_version, get_function_name, is_local
from tests.sam import LocalLambdaServer, start_local_lambda
class TestRuntimeLayer(unittest.TestCase):
lambda_server: LocalLambdaServer = None
@classmethod
def setUpClass(cls):
if is_local():
cls.lambda_server = start_local_lambda(template_path="test-template.yaml",
parameter_overrides={'Version': get_version()},
)
def get_client(self):
return self.lambda_server.get_client() if is_local() else boto3.client('lambda')
def test_script(self):
lambda_client = self.get_client()
response = lambda_client.invoke(FunctionName=get_function_name("ExampleFunction"),
Payload=json.dumps({'x': 1}),
)
raw_payload = response['Payload'].read().decode('utf-8')
result = json.loads(raw_payload)
self.assertEqual(2, result)
def test_lowercase_extension(self):
lambda_client = self.get_client()
response = lambda_client.invoke(FunctionName=get_function_name("LowerCaseExtensionFunction"),
Payload=json.dumps({'x': 1}),
)
raw_payload = response['Payload'].read().decode('utf-8')
result = json.loads(raw_payload)
self.assertEqual(2, result)
def test_multiple_arguments(self):
lambda_client = self.get_client()
payload = {'x': 'bar', 'y': 1}
response = lambda_client.invoke(FunctionName=get_function_name("MultipleArgumentsFunction"),
Payload=json.dumps(payload),
)
raw_payload = response['Payload'].read().decode('utf-8')
result = json.loads(raw_payload)
self.assertDictEqual(payload, result)
@unittest.skipIf(is_local(), 'Lambda local does not support log retrieval')
def test_debug_logging(self):
lambda_client = self.get_client()
response = lambda_client.invoke(FunctionName=get_function_name("LoggingFunction"),
LogType='Tail',
Payload=json.dumps({'x': 1}),
)
raw_payload = response['Payload'].read().decode('utf-8')
result = json.loads(raw_payload)
self.assertEqual(1, result)
log = base64.b64decode(response['LogResult']).decode('utf-8')
self.assertIn("runtime:Sourcing 'script.R'", log)
self.assertIn("runtime:Invoking function 'handler_with_debug_logging' with parameters:\n$x\n[1] 1", log)
self.assertIn("runtime:Function returned:\n[1] 1", log)
self.assertIn("runtime:Posted result:\n", log)
@unittest.skipIf(is_local(), 'Lambda local does not support log retrieval')
def test_no_debug_logging(self):
lambda_client = self.get_client()
response = lambda_client.invoke(FunctionName=get_function_name("ExampleFunction"),
LogType='Tail',
Payload=json.dumps({'x': 1}),
)
raw_payload = response['Payload'].read().decode('utf-8')
result = json.loads(raw_payload)
self.assertEqual(2, result)
log = base64.b64decode(response['LogResult']).decode('utf-8')
self.assertNotIn("Sourcing ", log)
self.assertNotIn("Invoking function ", log)
self.assertNotIn("Function returned:", log)
self.assertNotIn("Posted result:", log)
@unittest.skipIf(is_local(), 'Lambda local does not pass errors properly')
def test_missing_source_file(self):
lambda_client = self.get_client()
response = lambda_client.invoke(FunctionName=get_function_name("MissingSourceFileFunction"),
Payload=json.dumps({'y': 1}),
)
raw_payload = response['Payload'].read().decode('utf-8')
json_payload = json.loads(raw_payload)
self.assertEqual('Unhandled', response['FunctionError'])
self.assertIn('Source file does not exist: missing.[R|r]', json_payload['errorMessage'])
self.assertEqual('simpleError', json_payload['errorType'])
@unittest.skipIf(is_local(), 'Lambda local does not pass errors properly')
def test_missing_function(self):
lambda_client = self.get_client()
response = lambda_client.invoke(FunctionName=get_function_name("MissingFunctionFunction"),
Payload=json.dumps({'y': 1}),
)
raw_payload = response['Payload'].read().decode('utf-8')
json_payload = json.loads(raw_payload)
self.assertEqual('Unhandled', response['FunctionError'])
self.assertIn('Function "handler_missing" does not exist', json_payload['errorMessage'])
self.assertEqual('simpleError', json_payload['errorType'])
@unittest.skipIf(is_local(), 'Lambda local does not pass errors properly')
def test_function_as_variable(self):
lambda_client = self.get_client()
response = lambda_client.invoke(FunctionName=get_function_name("HandlerAsVariableFunction"),
Payload=json.dumps({'y': 1}),
)
raw_payload = response['Payload'].read().decode('utf-8')
json_payload = json.loads(raw_payload)
self.assertEqual('Unhandled', response['FunctionError'])
self.assertIn('Function "handler_as_variable" does not exist', json_payload['errorMessage'])
self.assertEqual('simpleError', json_payload['errorType'])
@unittest.skipIf(is_local(), 'Lambda local does not pass errors properly')
def test_missing_argument(self):
lambda_client = self.get_client()
response = lambda_client.invoke(FunctionName=get_function_name("ExampleFunction"))
raw_payload = response['Payload'].read().decode('utf-8')
json_payload = json.loads(raw_payload)
self.assertEqual('Unhandled', response['FunctionError'])
self.assertIn('argument "x" is missing, with no default', json_payload['errorMessage'])
self.assertEqual('simpleError', json_payload['errorType'])
@unittest.skipIf(is_local(), 'Lambda local does not pass errors properly')
def test_unused_argument(self):
lambda_client = self.get_client()
response = lambda_client.invoke(FunctionName=get_function_name("ExampleFunction"),
Payload=json.dumps({'x': 1, 'y': 1}),
)
raw_payload = response['Payload'].read().decode('utf-8')
json_payload = json.loads(raw_payload)
self.assertEqual('Unhandled', response['FunctionError'])
self.assertIn('unused argument (y = 1)', json_payload['errorMessage'])
self.assertEqual('simpleError', json_payload['errorType'])
# @unittest.skipIf(is_local(), 'Fails locally with "argument list too long"')
@unittest.skip('Fails with timeout')
def test_long_argument(self):
lambda_client = self.get_client()
payload = {x: x for x in range(0, 100000)}
response = lambda_client.invoke(FunctionName=get_function_name("VariableArgumentsFunction"),
Payload=json.dumps(payload),
)
raw_payload = response['Payload'].read().decode('utf-8')
result = json.loads(raw_payload)
self.assertEqual(1, result)
@unittest.skipIf(is_local(), 'Lambda local does not pass errors properly')
def test_missing_library(self):
lambda_client = self.get_client()
response = lambda_client.invoke(FunctionName=get_function_name("MissingLibraryFunction"),
Payload=json.dumps({'y': 1}),
)
raw_payload = response['Payload'].read().decode('utf-8')
json_payload = json.loads(raw_payload)
self.assertEqual('Unhandled', response['FunctionError'])
self.assertIn('there is no package called ‘Matrix’', json_payload['errorMessage'])
error_type = 'packageNotFoundError' if get_version() == '3_6_0' else 'simpleError'
self.assertEqual(error_type, json_payload['errorType'])
@classmethod
def tearDownClass(cls):
if is_local():
cls.lambda_server.kill()
| 2.328125
| 2
|
Module 3/Chapter 4/urlsqli.py
|
kongjiexi/Python-Penetration-Testing-for-Developers
| 34
|
12783401
|
import requests
url = "http://127.0.0.1/SQL/sqli-labs-master/Less-1/index.php?id="
initial = "'"
print "Testing "+ url
first = requests.post(url+initial)
if "mysql" in first.text.lower():
print "Injectable MySQL detected"
elif "native client" in first.text.lower():
print "Injectable MSSQL detected"
elif "syntax error" in first.text.lower():
print "Injectable PostGRES detected"
elif "ORA" in first.text.lower():
print "Injectable Oracle detected"
else:
print "Not Injectable :( "
| 2.84375
| 3
|
pims/filters/global_histogram.py
|
hurondp/pims
| 2
|
12783402
|
<gh_stars>1-10
# * Copyright (c) 2020-2021. Authors: see NOTICE file.
# *
# * Licensed under the Apache License, Version 2.0 (the "License");
# * you may not use this file except in compliance with the License.
# * You may obtain a copy of the License at
# *
# * http://www.apache.org/licenses/LICENSE-2.0
# *
# * Unless required by applicable law or agreed to in writing, software
# * distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
from abc import ABC, abstractmethod
from functools import cached_property
import numpy as np
from pyvips import Image as VIPSImage
from skimage.filters import threshold_isodata, threshold_minimum, threshold_otsu, threshold_yen
from pims.api.utils.models import Colorspace, FilterType
from pims.filters import AbstractFilter
from pims.processing.histograms.utils import clamp_histogram
class AbstractGlobalFilter(AbstractFilter, ABC):
@classmethod
def get_type(cls):
return FilterType.GLOBAL
class AbstractGlobalThresholdFilter(AbstractGlobalFilter, ABC):
@classmethod
def require_histogram(cls):
return True
@classmethod
def required_colorspace(cls):
return Colorspace.GRAY
def __init__(self, histogram=None, white_objects=False):
super().__init__(histogram)
self.white_objects = white_objects
self._impl[VIPSImage] = self._vips_impl
@cached_property
@abstractmethod
def threshold(self):
pass
def _vips_impl(self, img, *args, **kwargs):
if self.white_objects:
return img <= self.threshold
else:
return img > self.threshold
@classmethod
def get_name(cls):
return f"{super().get_name()} Threshold"
class OtsuThresholdFilter(AbstractGlobalThresholdFilter):
@classmethod
def identifier(cls):
return "Otsu"
@classmethod
def get_description(cls):
return "Otsu global filtering"
@cached_property
def threshold(self):
return threshold_otsu(hist=clamp_histogram(self.histogram))
class IsodataThresholdFilter(AbstractGlobalThresholdFilter):
@classmethod
def identifier(cls):
return "IsoData"
@cached_property
def threshold(self):
return threshold_isodata(hist=clamp_histogram(self.histogram))
@classmethod
def get_description(cls):
return "Isodata global filtering"
@classmethod
def aliases(cls):
# Default ImageJ auto threshold is a slight variant of Isodata threshold
# https://imagej.net/plugins/auto-threshold
return ["binary"]
class YenThresholdFilter(AbstractGlobalThresholdFilter):
@classmethod
def identifier(cls):
return "Yen"
@cached_property
def threshold(self):
return threshold_yen(hist=clamp_histogram(self.histogram))
@classmethod
def get_description(cls):
return "Yen global filtering"
class MinimumThresholdFilter(AbstractGlobalThresholdFilter):
@classmethod
def identifier(cls):
return "Minimum"
@cached_property
def threshold(self):
return threshold_minimum(hist=clamp_histogram(self.histogram))
@classmethod
def get_description(cls):
return "Minimum global filtering"
class MeanThresholdFilter(AbstractGlobalThresholdFilter):
@cached_property
def threshold(self):
hist, _ = clamp_histogram(self.histogram)
return np.average(np.arange(hist.size), weights=hist)
@classmethod
def identifier(cls):
return "Mean"
@classmethod
def get_description(cls):
return "Mean global filtering"
| 2.203125
| 2
|
exercises/solution_01_28b.py
|
ali4413/Ali-Mehrabifard
| 0
|
12783403
|
<reponame>ali4413/Ali-Mehrabifard
import pandas as pd
# The database
hockey_players = pd.read_csv('data/canucks.csv', index_col = 0)
# Find the total salary of the team and save it in an object called `player_cost`
player_cost = hockey_players[['Salary']].sum()
# Display it
player_cost
| 3.546875
| 4
|
apps/projects/fields.py
|
ExpoAshique/ProveBanking__s
| 0
|
12783404
|
from collections import OrderedDict
from vendors.models import Vendor
from .models import StaffingRequest
def requests_as_choices():
choices = OrderedDict()
requests = StaffingRequest.objects.all().order_by('-id')
for request in requests:
choices[request.project] = choices.get(request.project, [])
choices[request.project].append((request.id, request))
return choices.items()
def vendors_as_choices():
choices = OrderedDict()
vendors = Vendor.objects.all().order_by('-avg_score')
choices['Suggested vendors'] = []
for vendor in vendors:
choices['All vendors'] = choices.get('All vendors', [])
choices['All vendors'].append((vendor.id, vendor))
return choices.items()
| 2.46875
| 2
|
ke_tool/evaluate_transe_inductive.py
|
MichalPitr/KEPLER
| 98
|
12783405
|
<filename>ke_tool/evaluate_transe_inductive.py
import argparse
import graphvite as gv
import graphvite.application as gap
import numpy as np
import json
from tqdm import tqdm
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--entity_embeddings', help='numpy of entity embeddings')
parser.add_argument('--relation_embeddings', help='numpy of relation embeddings')
parser.add_argument('--entity2id', help='entity name to numpy id json')
parser.add_argument('--relation2id', help='entity name to numpy id json')
parser.add_argument('--dim', type=int, help='size of embedding')
parser.add_argument('--dataset', help="test dataset")
args = parser.parse_args()
# Building the graph
app = gap.KnowledgeGraphApplication(dim=args.dim)
app.load(file_name=args.dataset)
app.build()
app.train(model='TransE', num_epoch=0)
gv_entity2id = app.graph.entity2id
gv_relation2id = app.graph.relation2id
# Load embeddings (Only load the embeddings that appear in the entity2id file)
entity_embeddings_full = np.load(args.entity_embeddings)
relation_embeddings_full = np.load(args.relation_embeddings)
entity2id_ori = json.load(open(args.entity2id))
relation2id_ori = json.load(open(args.relation2id))
entity_embeddings = np.zeros((len(gv_entity2id), args.dim), dtype=np.float32)
entity2id = {}
i = 0
for key in tqdm(gv_entity2id):
entity2id[key] = i
entity_embeddings[i] = entity_embeddings_full[entity2id_ori[key]]
i += 1
relation_embeddings = np.zeros((len(gv_relation2id), args.dim), dtype=np.float32)
relation2id = {}
i = 0
for key in tqdm(gv_relation2id):
relation2id[key] = i
relation_embeddings[i] = relation_embeddings_full[relation2id_ori[key]]
i += 1
# Load embeddings to graphvite
print('load data ......')
assert(len(relation_embeddings) == len(app.solver.relation_embeddings))
assert(len(entity_embeddings) == len(app.solver.entity_embeddings))
app.solver.relation_embeddings[:] = relation_embeddings
print('loaded relation embeddings')
app.solver.entity_embeddings[:] = entity_embeddings
print('loaded entity embeddings')
# (Modified gv) Replace mapping with our own
app.entity2id = entity2id
app.relation2id = relation2id
print('start evaluation ......')
app.evaluate('link_prediction', file_name=args.dataset, filter_files=[args.dataset])
if __name__ == '__main__':
main()
| 2.34375
| 2
|
examples/representation/extract_ir2vec.py
|
ComputerSystemsLaboratory/YaCoS
| 8
|
12783406
|
#! /usr/bin/env python3
"""
Copyright 2021 <NAME>.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
#
# Classify applications into 104 classes given their raw code.
#
# The representation (graph) is created from IR.
#
import os
import sys
import glob
import numpy as np
from absl import app, flags, logging
from yacos.info import compy as R
from yacos.info.compy.extractors import LLVMDriver
def execute(argv):
"""Extract a graph representation."""
del argv
FLAGS = flags.FLAGS
# Instantiate the LLVM driver.
driver = LLVMDriver([])
# Instantiate the builder.
builder = R.LLVMIR2VecBuilder(driver)
# Verify datset directory.
if not os.path.isdir(FLAGS.dataset_directory):
logging.error('Dataset directory {} does not exist.'.format(
FLAGS.dataset_directory)
)
sys.exit(1)
folders = [
os.path.join(FLAGS.dataset_directory, subdir)
for subdir in os.listdir(FLAGS.dataset_directory)
if os.path.isdir(os.path.join(FLAGS.dataset_directory, subdir))
]
idx = FLAGS.dataset_directory.rfind('/')
last_folder = FLAGS.dataset_directory[idx+1:]
# Load data from all folders
for folder in folders:
# Create the output directory.
outdir = os.path.join(folder.replace(last_folder,
'{}_ir2vec'.format(last_folder)))
os.makedirs(outdir, exist_ok=True)
# Extract "ir2vec" from the file
sources = glob.glob('{}/*.ll'.format(folder))
for source in sources:
try:
extractionInfo = builder.ir_to_info(source)
except Exception:
logging.error('Error {}.'.format(source))
continue
filename = source.replace(folder, outdir)
filename = filename[:-3]
np.savez_compressed(filename,
values=extractionInfo.moduleInfo.ir2vec)
# Execute
if __name__ == '__main__':
# app
flags.DEFINE_string('dataset_directory',
None,
'Dataset directory')
flags.mark_flag_as_required('dataset_directory')
app.run(execute)
| 2.109375
| 2
|
python/shipRoot_conf.py
|
Plamenna/proba
| 0
|
12783407
|
<reponame>Plamenna/proba
import ROOT, atexit, sys
#-----prepare python exit-----------------------------------------------
ROOT.gInterpreter.ProcessLine('typedef double Double32_t')
def pyExit():
x = sys.modules['__main__']
if hasattr(x,'run'): del x.run
if hasattr(x,'fMan'): del x.fMan
if hasattr(x,'fRun'): del x.fRun
pass
def configure():
#ROOT.gROOT.LoadMacro("$VMCWORKDIR/gconfig/basiclibs.C")
#ROOT.basiclibs()
ROOT.gSystem.Load("libPythia6")
ROOT.gSystem.Load("libpythia8")
atexit.register(pyExit)
| 2.109375
| 2
|
analyzer.py
|
thatwist/volunteer-tools
| 0
|
12783408
|
<filename>analyzer.py
from model import Post
from dataclasses import dataclass, replace, asdict
from abc import ABC, abstractmethod
import re
class Rule(ABC):
@abstractmethod
def analyze(self, post: Post):
pass
@dataclass
class RegexpRule(Rule):
def __init__(self, regexps: dict[str, list[str]], update: dict[str, str]):
self.regexps = regexps
self.update = update
def analyze(self, post: Post) -> Post:
print(f"regexps {regexps}")
post_dict = asdict(post)
for k, v in regexps.items():
for r in v:
pattern = re.compile(r) # todo compile once
if re.search(pattern, post_dict[k]):
print(f"match {pattern} in {post_dict[k]}")
return replace(post, **update)
return post
rules = [
RegexpRule({ "text": ["київ"] }, { "geo": "київ" }),
# hide ammunition
RegexpRule({ "text": ["бронік", "бронежилет", ""] }, { "hidden": True }),
]
def analyze(p: Post) -> Post:
updated = p
for rule in rules:
updated = rule.analyze(updated)
| 2.6875
| 3
|
examples/routes/resequence_route.py
|
route4me/route4me-python-sdk
| 10
|
12783409
|
# -*- coding: utf-8 -*-
from route4me import Route4Me
API_KEY = "11111111111111111111111111111111"
def main():
r4m = Route4Me(API_KEY)
route = r4m.route
response = route.get_routes(limit=1, offset=0)
if isinstance(response, dict) and 'errors' in response.keys():
print('. '.join(response['errors']))
else:
route_id = response[0]['route_id']
print('Route ID: {}'.format(route_id))
response = route.get_route(route_id=route_id)
if isinstance(response, dict) and 'errors' in response.keys():
print('. '.join(response['errors']))
else:
print('Original Route')
print('Route ID: {}'.format(response['route_id']))
for i, address in enumerate(response['addresses']):
print('Address #{}'.format(i + 1))
print('\tAddress: {0}'.format(address['address']))
print('\tRoute Destination ID: {0}'.format(
address['route_destination_id']))
route_destination_id = response['addresses'][1]['route_destination_id']
route_destination_id2 = response['addresses'][2]['route_destination_id']
data = {
"route_destination_id": route_destination_id,
"route_id": route_id,
"addresses": [{
"route_destination_id": route_destination_id2,
"sequence_no": 6,
}]
}
print('After Re-sequence Route')
response = route.resequence_route(**data)
print('Route ID: {}'.format(response['route_id']))
for i, address in enumerate(response['addresses']):
print('Address #{}'.format(i + 1))
print('\tAddress: {0}'.format(address['address']))
print('\tRoute Destination ID: {0}'.format(
address['route_destination_id']))
if __name__ == '__main__':
main()
| 2.734375
| 3
|
FLAME/FLAME.py
|
marabouboy/FLAME
| 6
|
12783410
|
<filename>FLAME/FLAME.py
#!/usr/bin/python3
#FLAME 0.1.4
#Import Packages:
import argparse, io
import pysam
import FLAME_FUNC.FLAME_FUNC as FF #Break this down into each part based on the command.
#####################
# Need to be fixed List:
# Fix the comments within FLAME.py. It is a bit wonkey with different explainations for different functions. Be consistent.
#
#####################
#Input command:
parser = argparse.ArgumentParser(description = "FLAME: Full Length Adjacency Matrix Enumeration")
##Obligatory Inputs:
parser.add_argument("-I", dest = "INPUT", help = "Input file [Required]")
parser.add_argument("-GTF", dest = "GTF", help = "Reference File in GTF format")
parser.add_argument("-G", dest = "GENE", help = "Targeted Gene [Required]", default = "Transcriptome Mode")
parser.add_argument("--range", dest = "RANGE", help = "Variance Range", default = 20)
parser.add_argument("--min", dest = "MINIMUM", help = "Minimum Read Coverage", default = 10)
parser.add_argument("--ratio", dest = "RATIO", help = "Minimum Annotation Ratio", default = float(0.25))
parser.add_argument("-O", dest = "OUTPUT", help = "Output Prefix", default = "Flame")
##Optional Inputs:
parser.add_argument("-R", dest = "REF", help = "Reference File in Fasta format", default = 0)
#parser.add_argument("-THRESHOLD?", dest = "THRESHOLD", help = "Threshold for the Frequency Analysis, the lower threshold the more comprehensive but the longer processing power required", default = 0.01)?
parser.add_argument("-B", dest = "SAM", help = "Shortread Sequence", default = 0)
parser.add_argument("--verbose", dest = "VERBOSE", help = "Verbose output", action="store_true")
##Parser Funciton:
args = parser.parse_args()
print("\n-------------------------------------------------------------------------------------------")
print("\n-----------\tFLAME: Full Length Adjacency Matrix Enumeration\t\t\t-----------\n")
print("-------------------------------------------------------------------------------------------")
print("\n-----------\tInitiating FLAME\t\t\t\t\t\t-----------")
print("Input:\t\t{}\n\
GTF:\t\t{}\n\
Gene:\t\t{}\n\
Range:\t\t{}\n\
Output:\t\t{}-[Suffix]".format(
args.INPUT,
args.GTF,
args.GENE,
args.RANGE, #ADD ANOTHER FILED SPECIFYING THE MINIMUM RATIO FOR FLAME-TRANSCRIPTOMEWIDE?
args.OUTPUT
))
#Variables:
##Input Files and References:
Flame_Main_INPUT1 = []
for Flame_Main_COUNT in open(args.INPUT, "r"):
Flame_Main_INPUT1.append(Flame_Main_COUNT)
try:
if args.GTF != None:
Flame_Main_GTFFILE = open(args.GTF, "r") #Storage[List] of the GTF file.
else:
Flame_Main_GTFFILE = io.StringIO("")
except:
print("GTF-File Error")
Flame_Main_RANGESIZE = int(args.RANGE) #Storage(Integer) of the Windowsize.
Flame_Main_MINIMUMREADS = int(args.MINIMUM) #Storage(Integer) of the minimum number of reads.
Flame_Main_RATIOTHRESH = float(args.RATIO) #Storage(Float) of the ratio required for a gene to pass through and be Translated and Quantified.
Flame_Main_GENENAME = args.GENE #Storage("String") of the Specific Gene.
Flame_Main_REFERENCELIST = [] #Storage[Nested List] of the reference.
Flame_Main_SIGNIFGENES = []
Flame_Main_FREQUENCYWINDOWSIZE = 2 #FIX: Make this interactive so one can input the window size for the Frequency Analysis.
Flame_Main_FREQYENCYTHRESHOLD = float(0.01) #FIX: Make this interactive so one can input the window size for the Frequency Threshold.
#-------------------------------------------------------------------------------------------------------------------------------------#
#FLAME: Transcriptome Mode:
if args.GENE == "Transcriptome Mode": #<----------------- Change here
Flame_Main_GENELIST1 = []
Flame_Main_GENELIST2 = []
Flame_Main_TMPGENE1 = ""
Flame_Main_TMPGENE2 = ""
Flame_Main_INPUT2 = []
Flame_Main_PROGRESSMAX = len(Flame_Main_INPUT1)
Flame_Main_Counter1 = True
Flame_Main_Counter2 = True
Flame_Main_Counter3 = True
#FLAME-WT: 1. Segment the number of genes into a list:
for Flame_Main_COUNT1 in Flame_Main_GTFFILE.read().split("\n"):
try:
if (any(Flame_Main_COUNT1.split("\t")[2]) and
("exon" in Flame_Main_COUNT1.split("\t")[2])):
try:
Flame_Main_TMPGENE1 = Flame_Main_COUNT1.split(";")[2].split(" ")[2][1:-1]
except: #Need this "extra" except clause?
pass
if Flame_Main_TMPGENE1 == Flame_Main_TMPGENE2:
pass
else:
Flame_Main_TMPGENE1 = Flame_Main_TMPGENE1
Flame_Main_GENELIST1.append(Flame_Main_TMPGENE1)
else:
pass
except:
pass
Flame_Main_GENELIST2 = list(set(Flame_Main_GENELIST1))
Flame_Main_GENELIST2.sort()
#FLAME-WT: 2. Loop through this list in order to create a tmp-ref-file:
for Flame_Main_COUNT1 in Flame_Main_GENELIST2:
Flame_Main_GTFFILE.seek(0, 0)
print("\n%s:" %Flame_Main_COUNT1)
print("-----------\tInitiate Creation of Reference\t\t\t\t\t-----------")
Flame_Main_REFERENCELIST = FF.CREATEREFFUNC(Flame_Main_GTFFILE,
Flame_Main_COUNT1)
print("-----------\tInitiate Extraction of Relevant Reads\t\t\t\t-----------")
Flame_Main_INPUT2 = FF.SEGMENTFUNC(Flame_Main_INPUT1,
Flame_Main_REFERENCELIST)
#FLAME-WT: 3. Run the FILTERFUNC on the seperated reads:
print("-----------\tInitiate Filter Function\t\t\t\t\t-----------")
Flame_Main_ANNOTATEDREADS, Flame_Main_INCONGRUENTREADS = FF.FILTERFUNC(Flame_Main_INPUT2,
Flame_Main_REFERENCELIST,
Flame_Main_RANGESIZE)
#FLAME-WT: 4. Acquire the Ratio of Annoated Reads vs Incongruent Reads:
try:
Flame_Main_RATIO = float(len(Flame_Main_ANNOTATEDREADS)/
(len(Flame_Main_ANNOTATEDREADS)+len(Flame_Main_INCONGRUENTREADS)))
except (ZeroDivisionError):
print("%s: Zero Division, No Read-Coverage" % Flame_Main_COUNT1)
continue
#5a. If AnnotatedReads/TotalReads >= 0.25 (25%); simply run it through TRANSLATEFUNC & QUANTFUNC.
#print("Number of Reads:", len(Flame_Main_INPUT2))<------
if len(Flame_Main_INPUT2) < Flame_Main_MINIMUMREADS:
print("%s: Number of reads under gene-region below minimum-read threshold." %Flame_Main_COUNT1)
continue
if Flame_Main_RATIO >= Flame_Main_RATIOTHRESH:
Flame_Main_TRANSLATEANNOTATED, Flame_Main_TRANSLATEINCONGRUENT = FF.TRANSLATEFUNC(Flame_Main_REFERENCELIST,
Flame_Main_RANGESIZE,
Flame_Main_ANNOTATEDREADS)
print("-----------\tInitiate Quantification of each Splice Permutaiton, Annotated\t-----------")
Flame_Main_QUANTIFYANNOTATED = FF.QUANTIFYFUNC(Flame_Main_TRANSLATEANNOTATED)
Flame_Main_READTOTALLENGTH = []
##Print out quantification:
if Flame_Main_Counter1:
Flame_Main_OUTPUT = open("%s.QuantAnnotated.tsv" %args.OUTPUT, "w+")
Flame_Main_OUTPUT.write("Gene" + "\t" +
"Count" + "\t" +
"Length of Isoform" + "\t" +
"Number of Exons" + "\t" +
"Isoform Permutation" + "\n")
for k, v in Flame_Main_QUANTIFYANNOTATED.items():
#-----------The Function for Summing the Total Length of the Splice Variant-----------#
Flame_Main_READTOTALLENGTH = []
for Flame_Main_ANNOTATEDEXON in k.split(","):
for Flame_Main_COUNT in Flame_Main_REFERENCELIST:
if Flame_Main_ANNOTATEDEXON == Flame_Main_COUNT[0]:
Flame_Main_READTOTALLENGTH.append(int(Flame_Main_COUNT[3]))
elif Flame_Main_ANNOTATEDEXON != Flame_Main_COUNT[0]:
pass
#-----------The Function for Summing the Total Length of the Splice Variant-----------#
Flame_Main_OUTPUT.write(str(Flame_Main_COUNT1) + "\t" +
str(v) + "\t" +
str(sum(Flame_Main_READTOTALLENGTH)) + "\t" +
str(len(Flame_Main_READTOTALLENGTH)) + "\t" +
str(k) + "\n")
Flame_Main_OUTPUT.close()
Flame_Main_Counter1 = False
else:
if Flame_Main_QUANTIFYANNOTATED != {}:
Flame_Main_OUTPUT = open("%s.QuantAnnotated.tsv" %args.OUTPUT, "a")
for k, v in Flame_Main_QUANTIFYANNOTATED.items():
#-----------The Function for Summing the Total Length of the Splice Variant-----------#
Flame_Main_READTOTALLENGTH = []
for Flame_Main_ANNOTATEDEXON in k.split(","):
for Flame_Main_COUNT in Flame_Main_REFERENCELIST:
if Flame_Main_ANNOTATEDEXON == Flame_Main_COUNT[0]:
Flame_Main_READTOTALLENGTH.append(int(Flame_Main_COUNT[3]))
elif Flame_Main_ANNOTATEDEXON != Flame_Main_COUNT[0]:
pass
#-----------The Function for Summing the Total Length of the Splice Variant-----------#
Flame_Main_OUTPUT.write(str(Flame_Main_COUNT1) + "\t" +
str(v) + "\t" +
str(sum(Flame_Main_READTOTALLENGTH)) + "\t" +
str(len(Flame_Main_READTOTALLENGTH)) + "\t" +
str(k) + "\n")
Flame_Main_OUTPUT.close()
else:
pass
if Flame_Main_Counter2:
Flame_Main_OUTPUT = open("%s.Reference.txt" %args.OUTPUT, "w+")
Flame_Main_OUTPUT.write("Gene" + "\t" +
"Exon Name" + "\t" +
"Chromosome"+ "\t" +
"Exon Start Site" + "\t" +
"Exon Length" + "\t" +
"Exon Stop Site" + "\n")
for Flame_Main_COUNT in Flame_Main_REFERENCELIST:
Flame_Main_OUTPUT.write(str(Flame_Main_COUNT1)+ "\t" +
str(Flame_Main_COUNT[0]) + "\t" +
str(Flame_Main_COUNT[1]) + "\t" +
str(Flame_Main_COUNT[2]) + "\t" +
str(Flame_Main_COUNT[3]) + "\t" +
str(Flame_Main_COUNT[4]) + "\n")
Flame_Main_OUTPUT.close()
Flame_Main_Counter2 = False
else:
Flame_Main_OUTPUT = open("%s.Reference.txt" %args.OUTPUT, "a")
for Flame_Main_COUNT in Flame_Main_REFERENCELIST:
Flame_Main_OUTPUT.write(str(Flame_Main_COUNT1)+ "\t" +
str(Flame_Main_COUNT[0]) + "\t" +
str(Flame_Main_COUNT[1]) + "\t" +
str(Flame_Main_COUNT[2]) + "\t" +
str(Flame_Main_COUNT[3]) + "\t" +
str(Flame_Main_COUNT[4]) + "\n")
Flame_Main_OUTPUT.close()
else:
#5b. If AnnotatedReads/TotalReads <= 0.25; flag the gene for having low consensus with the reference and recommend further inspection.
Flame_Main_SIGNIFGENES.append(Flame_Main_COUNT1)
if Flame_Main_Counter3:
Flame_Main_OUTPUT = open("%s.SignificantGenes.txt" %args.OUTPUT, "w+")
Flame_Main_OUTPUT.write("Gene" + "\t" +
"Number of Reads" + "\t"+
"Ratio Annotated/Incongruent" + "\n")
Flame_Main_OUTPUT.write(str(Flame_Main_COUNT1) + "\t" +
str(len(Flame_Main_INPUT2)) + "\t" +
str(round(Flame_Main_RATIO, 2)) + "\n")
Flame_Main_OUTPUT.close()
Flame_Main_Counter3 = False
else:
Flame_Main_OUTPUT = open("%s.SignificantGenes.txt" %args.OUTPUT, "a")
Flame_Main_OUTPUT.write(str(Flame_Main_COUNT1) + "\t" +
str(len(Flame_Main_INPUT2)) + "\t" +
str(round(Flame_Main_RATIO, 2)) + "\n")
Flame_Main_OUTPUT.close()
#-------------------------------------------------------------------------------------------------------------------------------------#
#FLAME: Single-Gene Mode:
else: #<----------------- Change here
##The optional choices:
###Load the Reference if it is specified to be used for the Adjacent Splice Site Signal (S3) Detection.
if args.REF != 0:
print("Reference:\t{}\n".format(args.REF), end = "")
Flame_Main_REFFILE = open(args.REF, "r")
#Remove the header of the reference as well as make it one continuous string.
Flame_Main_REF = ""
for Flame_Main_REFLINE in Flame_Main_REFFILE:
if ">" in Flame_Main_REFLINE:
pass
else:
Flame_Main_REF += Flame_Main_REFLINE.rstrip()
###Load the Shortread if it specified to be used for the confirmation of splice signal using short read RNA-seq.
if args.SAM != 0:
print("Shortread:\t{}\n".format(args.SAM), end = "")
#If-statement that checks whether the Sequence Mapping/Alignment file is in SAM format or the compressed BAM format.
if str(args.SAM).endswith(".sam") or str(args.SAM).endswith(".SAM"):
Flame_Main_SHORTREAD1 = pysam.AlignmentFile("%s" %args.SAM, "r")
elif str(args.SAM).endswith(".bam") or str(args.SAM).endswith(".BAM"):
Flame_Main_SHORTREAD1 = pysam.AlignmentFile("%s" %args.SAM, "rb")
else:
print("Shortread input file format not recognized ([File].bam/[File].BAM/[File].sam/[File].SAM)")
if args.VERBOSE:
print("Verbose:\tOn\n")
print("-------------------------------------------------------------------------------------------\n")
###The Optional Removal of both.
if args.REF == 0 and args.SAM == 0:
print("\n")
##Variables used for the printing functions:
Flame_Main_Counter1 = 0
Flame_Main_Counter2 = 0
Flame_Main_Counter3_1 = False
Flame_Main_Counter3_2 = False
Flame_Main_Counter4 = 0
Flame_Main_Counter5 = 0
#Function to transform the GTF-file into an efficient python nested list: Exon[Name, Chromosome, Start, Length, Stop].
Flame_Main_REFERENCELIST = FF.CREATEREFFUNC(Flame_Main_GTFFILE,
args.GENE) #((The GTF Input file), (The name of the Gene))
print("-----------\tInitiate Filter Function\t\t\t\t\t-----------")
#Function to classify the reads as either "Annotated" or "Incongruent" and store them.
Flame_Main_ANNOTATEDREADS, Flame_Main_INCONGRUENTREADS = FF.FILTERFUNC(Flame_Main_INPUT1,
Flame_Main_REFERENCELIST,
Flame_Main_RANGESIZE) #Input, Reference, Rangesize
##Print out Annotated:
if Flame_Main_ANNOTATEDREADS != []:
Flame_Main_OUTPUT = open("%s.Annotated.bed" %args.OUTPUT, "w+")
for Flame_Main_COUNT in Flame_Main_ANNOTATEDREADS:
Flame_Main_OUTPUT.write(str(Flame_Main_COUNT) + "\n")
Flame_Main_OUTPUT.close()
else:
pass
##Print out Incongruent:
if Flame_Main_INCONGRUENTREADS != []:
Flame_Main_OUTPUT = open("%s.Incongruent.bed" %args.OUTPUT, "w+")
for Flame_Main_COUNT in Flame_Main_INCONGRUENTREADS:
Flame_Main_OUTPUT.write(str(Flame_Main_COUNT) + "\n")
Flame_Main_OUTPUT.close()
else:
pass
#Translate Function. They have built in Print function. Also make a function that will make either Annotated/Incongruent reads = 0 to avoid downstream computation, if one chooses.
Flame_Main_TRANSLATEANNOTATED, Flame_Main_TRANSLATEINCONGRUENT = FF.TRANSLATEFUNC(Flame_Main_REFERENCELIST,
Flame_Main_RANGESIZE,
Flame_Main_ANNOTATEDREADS,
Flame_Main_INCONGRUENTREADS)
if args.VERBOSE:
##Print out Annotated:
if Flame_Main_TRANSLATEANNOTATED != []:
Flame_Main_OUTPUT = open("%s.AnnotatedTrans.txt" %args.OUTPUT, "w+")
for Flame_Main_COUNT in Flame_Main_TRANSLATEANNOTATED:
Flame_Main_OUTPUT.write(str(Flame_Main_COUNT) + "\n")
Flame_Main_OUTPUT.close()
else:
pass
if args.VERBOSE:
##Print out Incongruent:
if Flame_Main_TRANSLATEINCONGRUENT != []:
Flame_Main_OUTPUT = open("%s.IncongruentTrans.txt" %args.OUTPUT, "w+")
for Flame_Main_COUNT in Flame_Main_TRANSLATEINCONGRUENT:
Flame_Main_OUTPUT.write(str(Flame_Main_COUNT) + "\n")
Flame_Main_OUTPUT.close()
else:
pass
#Annotated Adjacency Matrix Function:
print("-----------\tInitiate Creation of Empty Adjacency Matrix\t\t\t-----------")
#Command itself:
Flame_Main_ADJMTX1 = FF.EMPTYADJMTXFUNC(Flame_Main_REFERENCELIST)
print("-----------\tInitiate Creation of Adjacency Matrix, Annotated\t\t-----------")
#Command itself:
Flame_Main_ADJMTX1 = FF.ANNOTATEDADJMTXFUNC(Flame_Main_TRANSLATEANNOTATED,
Flame_Main_REFERENCELIST,
Flame_Main_ADJMTX1)
##Print out Adjacency Matrix, Annotated:
###Print the Column- and Rowheaders.
Flame_Main_OUTPUT = open("%s.AdjacencyAnnotated.tsv" %args.OUTPUT, "w+")
for Flame_Main_COUNT in Flame_Main_ADJMTX1:
if Flame_Main_Counter1 <= (len(Flame_Main_REFERENCELIST)-1):
Flame_Main_OUTPUT.write("\t" + str(Flame_Main_REFERENCELIST[Flame_Main_Counter1][0]))
Flame_Main_Counter1 += 1
elif Flame_Main_Counter1 > (len(Flame_Main_REFERENCELIST)-1):
Flame_Main_OUTPUT.write("\t" + "END" + "\t")
Flame_Main_OUTPUT.write("\n")
###Filling the Adjacency Matrix itself.
for Flame_Main_COUNT1 in Flame_Main_ADJMTX1:
if Flame_Main_Counter2 <= (len(Flame_Main_REFERENCELIST)-1):
Flame_Main_OUTPUT.write(str(Flame_Main_REFERENCELIST[Flame_Main_Counter2][0]) + "\t")
for Flame_Main_COUNT2 in Flame_Main_COUNT1:
Flame_Main_OUTPUT.write(str(Flame_Main_COUNT2) + "\t")
Flame_Main_OUTPUT.write("\n")
Flame_Main_Counter2 += 1
elif Flame_Main_Counter2 > (len(Flame_Main_REFERENCELIST)-1):
pass
Flame_Main_OUTPUT.close()
print("-----------\tInitiate Singling of Incongruent Exons\t\t\t\t-----------")
#Command itself:
Flame_Main_POTENTIALS = FF.INCONGRUENTSEPERATORFUNC(Flame_Main_TRANSLATEINCONGRUENT,
Flame_Main_REFERENCELIST)
print("-----------\tInitiate Novel Splice Site Detection, Part1: Frequency\t\t-----------")
#Command itself:
Flame_Main_GENEREFERENCE = FF.FREQUENCYSITEFUNC(Flame_Main_POTENTIALS,
Flame_Main_REFERENCELIST,
Flame_Main_RANGESIZE,
Flame_Main_FREQUENCYWINDOWSIZE)
print("-----------\tInitiate Novel Splice Site Detection, Part2: Threshold\t\t-----------")
#Prepare GTF reference:
Flame_Main_SPLICECANDIDATES = []
#Command itself:
Flame_Main_SPLICECANDIDATES = FF.FREQUENCYTHRESHFUNC(Flame_Main_GENEREFERENCE,
Flame_Main_FREQYENCYTHRESHOLD,
Flame_Main_INCONGRUENTREADS)
if args.REF != 0:
print("-----------\tInitiate Novel Splice Site Detection, Part3: Splice Signal\t-----------")
#Command itself:
Flame_Main_SPLICECANDIDATES = FF.SPLICESIGNALFUNC(Flame_Main_SPLICECANDIDATES,
Flame_Main_REF)
Flame_Main_Counter3_1 = True
else:
pass
if args.SAM != 0:
print("-----------\tInitiate Novel Splice Site Detection, Part4: Shortread\t\t-----------")
#Prepare a tmp dictionary:
Flame_Main_SPLICESITECOUNT = {}
#Command itself:
Flame_Main_SPLICECANDIDATES, Flame_Main_SPLICESITECOUNT = FF.SHORTREADFUNC(Flame_Main_SHORTREAD1,
Flame_Main_SPLICECANDIDATES,
Flame_Main_REFERENCELIST)
#The Raw Quantification of Shortread Splice Sites:
Flame_Main_OUTPUT = open("%s.ShortreadSplice.tsv" %args.OUTPUT, "w+") #Make this an verbose option.
Flame_Main_OUTPUT.write("Genomic Position" +
"\t" +
"Number" +
"\n")
for k, v in Flame_Main_SPLICESITECOUNT.items():
Flame_Main_OUTPUT.write(str(k) +
"\t" +
str(v) +
"\n")
Flame_Main_OUTPUT.close()
Flame_Main_Counter3_2 = True
else:
pass
##Print out Potential Splice Sites:
##################################################
##--->ADD THE CHROMOSOME WITHIN THE OUTPUT?!<---##
##################################################
Flame_Main_OUTPUT = open("%s.PotentialSplice.tsv" %args.OUTPUT, "w+")
if Flame_Main_Counter3_1 == False and Flame_Main_Counter3_2 == False:
Flame_Main_OUTPUT.write("Gene Position" + "\t" +
"Supporting Incongruent Reads, Absolute" + "\t" +
"Supporting Incongruent Reads, Percent" + "\n")
for Flame_Main_COUNT in Flame_Main_SPLICECANDIDATES:
Flame_Main_OUTPUT.write(str(Flame_Main_COUNT[0]) + "\t" +
str(Flame_Main_COUNT[1]) + "\t" +
str(Flame_Main_COUNT[2]) + "\n")
Flame_Main_OUTPUT.close
elif Flame_Main_Counter3_1 == True and Flame_Main_Counter3_2 == False :
Flame_Main_OUTPUT.write("Gene Position" + "\t" +
"Supporting Incongruent Reads, Absolute" + "\t" +
"Supporting Incongruent Reads, Percent" + "\t" +
"Adjacent Splice Signal" + "\n")
for Flame_Main_COUNT in Flame_Main_SPLICECANDIDATES:
Flame_Main_OUTPUT.write(str(Flame_Main_COUNT[0]) + "\t" +
str(Flame_Main_COUNT[1]) + "\t" +
str(Flame_Main_COUNT[2]) + "\t" +
str(Flame_Main_COUNT[3]) + "\n")
Flame_Main_OUTPUT.close
elif Flame_Main_Counter3_1 == False and Flame_Main_Counter3_2 == True:
Flame_Main_OUTPUT.write("Gene Position" + "\t" +
"Supporting Incongruent Reads, Absolute" + "\t" +
"Supporting Incongruent Reads, Percent" + "\t" +
"Short Read Support" + "\n")
for Flame_Main_COUNT in Flame_Main_SPLICECANDIDATES:
Flame_Main_OUTPUT.write(str(Flame_Main_COUNT[0]) + "\t" +
str(Flame_Main_COUNT[1]) + "\t" +
str(Flame_Main_COUNT[2]) + "\t" +
str(Flame_Main_COUNT[3]) + "\n")
Flame_Main_OUTPUT.close
elif Flame_Main_Counter3_1 == True and Flame_Main_Counter3_2 == True:
Flame_Main_OUTPUT.write("Gene Position" + "\t" +
"Supporting Incongruent Reads, Absolute" + "\t" +
"Supporting Incongruent Reads, Percent" + "\t" +
"Adjacent Splice Signal" + "\t" +
"Short Read Support" + "\n")
for Flame_Main_COUNT in Flame_Main_SPLICECANDIDATES:
Flame_Main_OUTPUT.write(str(Flame_Main_COUNT[0]) + "\t" +
str(Flame_Main_COUNT[1]) + "\t" +
str(Flame_Main_COUNT[2]) + "\t" +
str(Flame_Main_COUNT[3]) + "\t" +
str(Flame_Main_COUNT[4]) + "\n")
Flame_Main_OUTPUT.close
else:
print("Error, Type: Counter3")
print("-----------\tInitiate Quantification of each Splice Permutaiton, Annotated\t-----------")
#Command itself:
Flame_Main_QUANTIFYANNOTATED = FF.QUANTIFYFUNC(Flame_Main_TRANSLATEANNOTATED)
Flame_Main_READTOTALLENGTH = []
##Print out quantification:
if Flame_Main_QUANTIFYANNOTATED != {}:
Flame_Main_OUTPUT = open("%s.QuantAnnotated.tsv" %args.OUTPUT, "w+")
Flame_Main_OUTPUT.write("Count" + "\t" +
"Length of Isoform" + "\t" +
"Number of Exons" + "\t" +
"Isoform Permutation" + "\n")
for k, v in Flame_Main_QUANTIFYANNOTATED.items():
#-----------The Function for Summing the Total Length of the Splice Variant-----------#
Flame_Main_READTOTALLENGTH = []
for Flame_Main_ANNOTATEDEXON in k.split(","):
for Flame_Main_COUNT in Flame_Main_REFERENCELIST:
if Flame_Main_ANNOTATEDEXON == Flame_Main_COUNT[0]:
Flame_Main_READTOTALLENGTH.append(int(Flame_Main_COUNT[3]))
elif Flame_Main_ANNOTATEDEXON != Flame_Main_COUNT[0]:
pass
#-----------The Function for Summing the Total Length of the Splice Variant-----------#
Flame_Main_OUTPUT.write(str(v) + "\t" +
str(sum(Flame_Main_READTOTALLENGTH)) + "\t" +
str(len(Flame_Main_READTOTALLENGTH)) + "\t" +
str(k) + "\n")
Flame_Main_OUTPUT.close()
else:
pass
print("-----------\tInitiate Quantification of each Splice Permutaiton, Incongruent\t-----------")
#Command itself:
Flame_Main_QUANTIFYINCONGRUENT = FF.QUANTIFYFUNC(Flame_Main_TRANSLATEINCONGRUENT)
##Print out INCONGRUENT quantification:
if Flame_Main_QUANTIFYINCONGRUENT != {}:
Flame_Main_OUTPUT = open("%s.QuantIncongruent.tsv" %args.OUTPUT, "w+")
Flame_Main_OUTPUT.write("Count" + "\t" +
"Isoform" + "\n")
for k, v in Flame_Main_QUANTIFYINCONGRUENT.items():
Flame_Main_OUTPUT.write(str(v) + "\t" +
str(k) + "\n")
Flame_Main_OUTPUT.close()
else:
pass
#Create an option choice of one wants to produce a Adjacency Matrix:
print("-----------\tInitiate Creation of Empty Adjacency Matrix\t\t\t-----------")
Flame_Main_ADJMTX2 = FF.EMPTYADJMTXFUNC(Flame_Main_SPLICECANDIDATES)
print("-----------\tInitiate Creation of Adjacency Matrix, Incongruent\t\t-----------")
Flame_Main_ADJMTX2 = FF.INCONGRUENTADJMTXFUNC(Flame_Main_POTENTIALS,
Flame_Main_SPLICECANDIDATES,
Flame_Main_ADJMTX2,
Flame_Main_RANGESIZE)
##Print out Adjacency Matrix, Incongruent:
###Print the Column- and Rowheaders.
Flame_Main_OUTPUT = open("%s.AdjacencyIncongruent.tsv" %args.OUTPUT, "w+")
for Flame_Main_COUNT in Flame_Main_ADJMTX2:
if Flame_Main_Counter4 <= (len(Flame_Main_SPLICECANDIDATES)-1):
Flame_Main_OUTPUT.write("\t" + str(Flame_Main_SPLICECANDIDATES[Flame_Main_Counter4][0]))
Flame_Main_Counter4 += 1
elif Flame_Main_Counter4 > (len(Flame_Main_SPLICECANDIDATES)-1):
Flame_Main_OUTPUT.write("\t" + "Below 1%")
Flame_Main_Counter4 += 1
Flame_Main_OUTPUT.write("\n")
###Filling the Adjacency Matrix itself.
for Flame_Main_COUNT1 in Flame_Main_ADJMTX2:
if Flame_Main_Counter5 <= (len(Flame_Main_SPLICECANDIDATES)-1):
Flame_Main_OUTPUT.write(str(Flame_Main_SPLICECANDIDATES[Flame_Main_Counter5][0]) + "\t")
for Flame_Main_COUNT2 in Flame_Main_COUNT1:
Flame_Main_OUTPUT.write(str(Flame_Main_COUNT2) + "\t")
Flame_Main_Counter5 += 1
Flame_Main_OUTPUT.write("\n")
elif Flame_Main_Counter5 == (len(Flame_Main_SPLICECANDIDATES)):
Flame_Main_OUTPUT.write("Below 1%" + "\t")
for Flame_Main_COUNT2 in Flame_Main_COUNT1:
Flame_Main_OUTPUT.write(str(Flame_Main_COUNT2) + "\t")
Flame_Main_OUTPUT.write("\n")
Flame_Main_OUTPUT.close()
##The Verbose Output Option:
###Print out the raw potential ranges:
if args.VERBOSE:
Flame_Main_OUTPUT = open("%s.RawRanges.txt" %args.OUTPUT, "w+")
for Flame_Main_COUNT in Flame_Main_POTENTIALS:
Flame_Main_OUTPUT.write(str(Flame_Main_COUNT) + "\n")
Flame_Main_OUTPUT.close()
### Print out the Reference
if args.VERBOSE:
Flame_Main_OUTPUT = open("%s.Reference.txt" %args.OUTPUT, "w+")
Flame_Main_OUTPUT.write("Exon Name" + "\t" +
"Chromosome" + "\t" +
"Exon Start Site" + "\t" +
"Exon Length" + "\t" +
"Exon Stop Site" + "\n")
for Flame_Main_COUNT in Flame_Main_REFERENCELIST:
Flame_Main_OUTPUT.write(str(Flame_Main_COUNT[0]) + "\t" +
str(Flame_Main_COUNT[1]) + "\t" +
str(Flame_Main_COUNT[2]) + "\t" +
str(Flame_Main_COUNT[3]) + "\t" +
str(Flame_Main_COUNT[4]) + "\n")
Flame_Main_OUTPUT.close()
| 2.984375
| 3
|
termpixels/unix_keys.py
|
loganzartman/termpixels
| 17
|
12783411
|
<filename>termpixels/unix_keys.py
import re
from copy import copy
from termpixels.terminfo import Terminfo
from termpixels.keys import Key, Mouse
class KeyParser:
def __init__(self):
self.pattern_key_pairs = {}
def register_key(self, pattern, key):
self.pattern_key_pairs[pattern] = key
def parse(self, group):
matches = []
for pattern, key in self.pattern_key_pairs.items():
if group.startswith(pattern):
matches.append((pattern, copy(key)))
return matches
class SgrMouseParser:
# https://invisible-island.net/xterm/ctlseqs/ctlseqs.html#h3-Extended-coordinates
def __init__(self):
self.regex = re.compile(r"\x1b\[(?:\<|M)(\d+);(\d+);(\d+)(m|M)")
def parse(self, group):
match = self.regex.match(group)
if match is None:
return []
pressed = match.group(4) == "M"
button = int(match.group(1))
x = int(match.group(2)) - 1
y = int(match.group(3)) - 1
mouse = Mouse(x, y, **SgrMouseParser.decode_button(button, pressed))
return [(match.group(0), mouse)]
@staticmethod
def decode_button(btn, pressed):
MASK_MOVED = 0b100000
MASK_BUTTON = 0b11
MASK_WHEEL = 0b1000000
action = None
# detect action
if btn & MASK_MOVED:
action = "moved"
elif pressed:
action = "down"
else:
action = "up"
# detect button
left = False
middle = False
right = False
scroll = 0
if btn & MASK_WHEEL:
if btn & MASK_BUTTON == 0:
scroll = -1
else:
scroll = 1
else:
code = btn & MASK_BUTTON
if code == 0:
left = True
elif code == 1:
middle = True
elif code == 2:
right = True
return {
"action": action,
"left": left,
"middle": middle,
"right": right,
"scroll": scroll
}
class X10MouseParser:
# https://invisible-island.net/xterm/ctlseqs/ctlseqs.html#h2-Mouse-Tracking
def __init__(self):
self.regex = re.compile(r"\x1b\[M(.)(.)(.)")
def parse(self, group):
match = self.regex.match(group)
if match is None:
return []
event = ord(match.group(1)) - 32
x = ord(match.group(2)) - 32 - 1
y = ord(match.group(3)) - 32 - 1
mouse = Mouse(x, y, **X10MouseParser.decode_event(event))
return [(match.group(0), mouse)]
@staticmethod
def decode_event(event_code):
""" decode an xterm mouse event character not shifted by 32 """
button_code = event_code & 0b11
moved = event_code & 0b100000
released = button_code == 3
left = False
middle = False
right = False
scroll = 0
if moved:
action = "moved"
elif released:
action = "up"
else:
action = "down"
if event_code & 0b1000000:
# wheel event
if not moved:
if button_code == 0:
scroll = -1
elif button_code == 1:
scroll = 1
else:
# button event
if button_code == 0:
left = True
elif button_code == 1:
middle = True
elif button_code == 2:
right = True
else:
# button up is ambiguous in X10 mouse encoding
left = True
middle = True
right = True
mod_code = (event_code >> 2) & 0b111
# TODO: implement modifiers
if mod_code & 0b001:
pass # shift
if mod_code & 0b010:
pass # meta
if mod_code & 0b100:
pass # control
return {
"action": action,
"left": left,
"middle": middle,
"right": right,
"scroll": scroll
}
def make_key_parser(ti):
parser = KeyParser()
# special keys
names = {
"kbs": "backspace",
"kcbt": "backtab",
"khome": "home",
"kend": "end",
"kich1": "insert",
"kdch1": "delete",
"kpp": "pageup",
"knp": "pagedown",
"kcub1": "left",
"kcuf1": "right",
"kcuu1": "up",
"kcud1": "down"
}
for code, name in names.items():
seq = ti.parameterize(code)
if seq:
parser.register_key(seq.decode("ascii"), Key(name=name))
# terminfo files seem to have bad backspace (kbs) values; just register both
parser.register_key(chr(8), Key(name="backspace", char="\b"))
parser.register_key(chr(127), Key(name="backspace", char="\b"))
parser.register_key("\t", Key(name="tab", char="\t"))
# function keys
for i in range(1, 64):
seq = ti.string("kf{}".format(i))
if seq:
parser.register_key(seq.decode("ascii"), Key(name="f{}".format(i)))
# must be last
parser.register_key("\x1b", Key(name="escape"))
return parser
def make_parsers(ti):
return (
make_key_parser(ti),
X10MouseParser(),
SgrMouseParser()
)
| 2.828125
| 3
|
qs/rpcserver.py
|
pediapress/qserve
| 0
|
12783412
|
<filename>qs/rpcserver.py
#! /usr/bin/env python
from __future__ import print_function
import traceback
from builtins import object
from builtins import str
from past.builtins import basestring
try:
import simplejson as json
except ImportError:
import json
from gevent import pool, server as gserver, Greenlet, getcurrent, queue, spawn, GreenletExit
def key2str(kwargs):
r = {}
for k, v in list(kwargs.items()):
r[str(k)] = v
return r
class Dispatcher(object):
def __call__(self, req):
name, kwargs = req
kwargs = key2str(kwargs)
assert isinstance(name, basestring), "bad name argument"
cmd_name = str("rpc_" + name)
m = getattr(self, cmd_name, None)
if not m:
raise RuntimeError("no such method: %r" % (name,))
return m(**kwargs)
class RequestHandler(Dispatcher):
def __init__(self, client=None, client_id=None, **kw):
self.client = client
self.client_id = client_id
super(RequestHandler, self).__init__(**kw)
def shutdown(self):
super(RequestHandler, self).shutdown()
class ClientGreenlet(Greenlet):
client_id = None
status = ""
def __str__(self):
return "<%s: %s>" % (self.client_id, self.status)
def __repr__(self):
return "<Client %s>" % self.client_id
class Server(object):
def __init__(self, port=8080, host="", get_request_handler=None, secret=None, is_allowed=None):
self.port = port
self.host = host
self.secret = secret
self.get_request_handler = get_request_handler
self.pool = pool.Pool(1024, ClientGreenlet)
self.stream_server = gserver.StreamServer(
(host, port), self.handle_client, spawn=self.pool.spawn
)
if hasattr(self.stream_server, "pre_start"):
self.stream_server.pre_start()
else:
self.stream_server.init_socket() # gevent >= 1.0b1
self.client_count = 0
if is_allowed is None:
self.is_allowed = lambda x: True
else:
self.is_allowed = is_allowed
def run_forever(self):
self.stream_server.serve_forever()
def log(self, msg):
print(msg)
def handle_client(self, sock, addr):
if not self.is_allowed(addr[0]):
self.log("+DENY %r" % (addr,))
sock.close()
return
sock_file = None
current = getcurrent()
try:
self.client_count += 1
clientid = "<%s %s:%s>" % (self.client_count, addr[0], addr[1])
current.clientid = clientid
sock_file = sock.makefile()
lineq = queue.Queue()
def readlines():
while 1:
try:
line = sock_file.readline()
except Exception as e:
self.log("error reading socket: {}".format(e))
break
lineq.put(line)
if not line:
break
readgr = spawn(readlines)
readgr.link(lambda _: current.kill())
current.link(lambda _: readgr.kill())
handle_request = self.get_request_handler(client=(sock, addr), clientid=clientid)
# self.log("+connect: %s" % (clientid, ))
while 1:
current.status = "idle"
line = lineq.get()
if not line:
break
try:
req = json.loads(line)
except ValueError as err:
self.log("+protocol error %s: %s" % (clientid, err))
break
current.status = "dispatching: %s" % line[:-1]
try:
d = handle_request(req)
response = json.dumps(dict(result=d)) + "\n"
except GreenletExit:
raise
except Exception as err:
response = json.dumps(dict(error=str(err))) + "\n"
traceback.print_exc()
current.status = "sending response: %s" % response[:-1]
sock_file.write(response)
sock_file.flush()
except GreenletExit:
raise
except:
traceback.print_exc()
finally:
current.status = "dead"
# self.log("-disconnect: %s" % (clientid,))
sock.close()
if sock_file is not None:
sock_file.close()
handle_request.shutdown()
| 2.234375
| 2
|
Double Pendulum.py
|
NitulKalita/Python-CLI
| 6
|
12783413
|
<reponame>NitulKalita/Python-CLI
import string
from typing import List
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from double_pendulum import DoublePendulum
def random_hex() -> str:
hex_value = "".join(
np.random.choice(
list(string.hexdigits),
6
)
)
return f"#{hex_value}"
def animate(i):
time_template = 'time = %.1fs'
dt = .05
return_arr = []
for double_pendulum, ax_data in pendula_axes:
ax, line, time_text = ax_data
frame_x, frame_y = double_pendulum.get_frame_coordinates(i)
line.set_data(frame_x, frame_y)
time_text.set_text(time_template % (dt*i))
return_arr.extend([
line,
time_text,
])
return return_arr
def create_axes(fig: "matplotlib.figure.Figure", pendula: List["DoublePendulum"]) -> List["matplotlib.axes._subplots.AxesSubplot"]:
axes = []
longest_double_pendulum = max(pendula, key=lambda x: x.max_length)
for i, double_pendulum in enumerate(pendula):
color = random_hex()
ax = _create_individual_axis(longest_double_pendulum=longest_double_pendulum, fig=fig, i=i)
line, = ax.plot([], [], 'o-', lw=2, color=color)
time_text = ax.text(0.05, 0.9, '', transform=ax.transAxes)
axes.append((ax, line, time_text))
return axes
def _create_individual_axis(longest_double_pendulum: "DoublePendulum", fig: "matplotlib.figure.Figure", i: int) -> None:
ax = fig.add_subplot(
111,
autoscale_on=False,
xlim=(
-longest_double_pendulum.max_length,
longest_double_pendulum.max_length
),
ylim=(
-longest_double_pendulum.max_length,
longest_double_pendulum.max_length
),
)
ax.set_aspect('equal')
ax.grid()
return ax
if __name__ == "__main__":
fig = plt.figure()
pendula = DoublePendulum.create_multiple_double_pendula(num_pendula=20)
axes = create_axes(fig=fig, pendula=pendula)
pendula_axes = list(zip(pendula, axes))
ani = animation.FuncAnimation(
fig,
animate,
np.arange(1, len(pendula[0].y)),
interval=25,
blit=True,
)
plt.show()
| 3.3125
| 3
|
phoenix/monitor/views/__init__.py
|
TeriForey/fawkes
| 7
|
12783414
|
from pyramid.events import subscriber
from phoenix.events import JobFinished, JobStarted
import logging
LOGGER = logging.getLogger("PHOENIX")
@subscriber(JobStarted)
def notify_job_started(event):
event.request.session.flash(
'<h4><img src="/static/phoenix/img/ajax-loader.gif"></img> Job Created. Please wait ...</h4>', queue='success')
@subscriber(JobFinished)
def notify_job_finished(event):
if event.succeeded():
LOGGER.info("job %s succeded.", event.job.get('title'))
# event.request.session.flash("Job <b>{0}</b> succeded.".format(event.job.get('title')), queue='success')
else:
LOGGER.warn("job %s failed.", event.job.get('title'))
# logger.warn("status = %s", event.job.get('status'))
# event.request.session.flash("Job <b>{0}</b> failed.".format(event.job.get('title')), queue='danger')
| 2.28125
| 2
|
src/classify.py
|
ybayle/ISM2017
| 7
|
12783415
|
# -*- coding: utf-8 -*-
#!/usr/bin/python
#
# Author <NAME>
# E-mail <EMAIL>
# License MIT
# Created 03/11/2016
# Updated 11/12/2016
# Version 1.0.0
#
"""
Description of classify.py
======================
save train & test in files
read train & test
for list of classifier
train/test
gather results prec/rec/f
print best clf and results
:Example:
source activate py27
ipython
run classify.py
Only for 100 percent precision
run classify.py --train /media/sf_DATA/Datasets/Simbals/yann/train.csv --test /media/sf_DATA/Datasets/Simbals/yann/test.csv
notes
RandomForest complexity
https://www.quora.com/What-is-in-general-time-complexity-of-random-forest-What-are-the-important-parameters-that-affect-this-complexity
n instances and m attributes
computational cost of building a tree is O(mn log n).
RandomForest done in 135939ms (3mn) for 13 attributes and 192 instances
mn log n = 13*192*math.log(192) = 13122 ( 135939ms)
mn log n = 39*186*math.log(186) = 37907 (~ms)
To know the element available
print((clf.get_params().keys())
..todo::
Add
AdaBoostClassifier
BaggingClassifier
BernoulliNB
CalibratedClassifierCV
DPGMM
http://scikit-learn.org/stable/modules/generated/sklearn.mixture.DPGMM.html
Deprecated since version 0.18: This class will be removed in 0.20.
Use sklearn.mixture.BayesianGaussianMixture with parameter
weight_concentration_prior_type='dirichlet_process' instead.
DecisionTreeClassifier
ExtraTreeClassifier
ExtraTreesClassifier
GMM
GaussianNB
GradientBoostingClassifier
KNeighborsClassifier
LDA
LabelPropagation
LabelSpreading
LinearDiscriminantAnalysis
LogisticRegression
LogisticRegressionCV
MultinomialNB
NuSVC
QDA
QuadraticDiscriminantAnalysis
RandomForestClassifier
SGDClassifier
SVC
VBGMM
_ConstantPredictor
"""
import os
import sys
import time
import json
import utils
import joblib
import argparse
import webbrowser
import multiprocessing
import numpy as np
import matplotlib.pyplot as plt
from statistics import stdev
from functools import partial
from sklearn import metrics
from sklearn.cross_validation import KFold, cross_val_score
from sklearn.model_selection import StratifiedKFold
from sklearn.neural_network import MLPClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.gaussian_process import GaussianProcessClassifier
from sklearn.gaussian_process.kernels import RBF
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier, ExtraTreesClassifier, GradientBoostingClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis, LinearDiscriminantAnalysis
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import precision_recall_curve, precision_score, recall_score, classification_report, f1_score, accuracy_score
from sklearn.utils.testing import all_estimators
from sklearn import linear_model
from sklearn.metrics import roc_curve
from sklearn.metrics import roc_auc_score
from sklearn.metrics import precision_recall_curve, average_precision_score
def list_clf():
"""
..todo::
Do the same for:
class_weight
predict
predict_log_proba
"""
estimators = all_estimators()
for name, class_ in estimators:
if hasattr(class_, 'predict_proba'):
print(name)
def plot_clf(indir="res/"):
indir = utils.abs_path_dir(indir) + "/"
algos = []
measure = []
with open(indir + "global.csv", "r") as filep:
for line in filep:
line = line.split(",")
algos.append(line[0])
measure.append(tuple(map(float, line[1:4])))
n_groups = 3
fig, ax = plt.subplots(figsize=(10, 6))
index = np.arange(n_groups)
bar_width = 0.2
opacity = 0.4
error_config = {'ecolor': '0.3'}
color = utils.rand_color(len(algos))
rects = {}
offset = 0.15
for ind, algo in enumerate(algos):
print(ind)
print(tuple(measure[ind]))
rects[ind] = plt.bar(index + bar_width*ind + offset, tuple(measure[ind]), bar_width,
alpha=opacity,
color=color[ind],
label=algo)
plt.ylabel('Scores (in %)')
plt.xticks(index + bar_width*ind + offset, ('Precision', 'Recall', 'F-Measure'))
plt.legend()
plt.ylim(0, 1)
# spines & axis
ax = plt.gca()
ax.spines['right'].set_color('none')
ax.spines['top'].set_color('none')
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
art = []
lgd = ax.legend(loc=9, bbox_to_anchor=(1.1, 1.), frameon=False)
# lgd = pylab.legend(loc=9, bbox_to_anchor=(0.5, -0.1), ncol=2)
art.append(lgd)
# ax.legend()
plt.tight_layout()
img_name = "global.png"
plt.savefig(img_name, dpi=200, additional_artists=art, bbox_inches="tight")
# webbrowser.open(img_name)
# plt.show()
def read_file(filename):
"""Description of read_file
train/test example line:
filename,feat1,feat2,...,featn,tag
"""
filename = utils.abs_path_file(filename)
groundtruths = []
features = []
with open(filename, "r") as filep:
for row in filep:
line = row.split(",")
groundtruths.append(line[-1][:-1])
features.append([float(i) for i in line[1:-1]])
return features, groundtruths
def read_preds(filename):
"""Description of read_file
ex file:
ISRC,tag
"""
filename = utils.abs_path_file(filename)
isrcs = {}
with open(filename, "r") as filep:
for row in filep:
line = row.split(",")
# print(line)
isrcs[line[0]] = float(line[1])
# isrcs[line[0]] = 1.0-float(line[1])
return isrcs
def read_item_tag(filename):
"""Description of read_file
example line:
filename,tag
"""
filename = utils.abs_path_file(filename)
groundtruths = {}
with open(filename, "r") as filep:
for row in filep:
line = row.split(",")
groundtruths[line[0]] = line[1][:-1]
return groundtruths
def precision_100percent(train, test):
"""Description of precision_100percent
..todo::
1 Find best clf with default param
2 vary param of best clf and find best param
3 use best param and best clf to find recall for 100 percent precision
"""
utils.print_success("Find Recall for best Precision for each tag")
train = utils.abs_path_file(train)
test = utils.abs_path_file(test)
train_features, train_groundtruths = read_file(train)
test_features, test_groundtruths = read_file(test)
classifiers = {
# "RandomForest": RandomForestClassifier(),#n_estimators=5
"DecisionTree":DecisionTreeClassifier()#,#max_depth=10
# "SVM":SVC(kernel="linear", C=0.0205),
# "ExtraTreesClassifier":ExtraTreesClassifier(n_estimators=5, criterion="entropy", max_features="log2", max_depth=9),
# "LogisticRegression":LogisticRegression()
}
tags = list(set(test_groundtruths))
nb_tag = len(tags)
step = 0.01
# for index, tag in enumerate(["i"]):
for index, tag in enumerate(tags):
utils.print_success("Tag " + tag)
max_precision = 0
max_recall = 0
max_f_measure = 0
max_clf = ""
max_weight = 0
for key in classifiers:
clf = classifiers[key]
# for weight in np.arange(0., 0.01, 0.000001):
# for weight in np.arange(step, 1-step, step):
for weight in np.arange(0.0, 1.0, step):
print("Classifier " + key + " & Weight " + str(weight))
sys.stdout.write("\033[F")
sys.stdout.write("\033[K")
clf.set_params(class_weight={"i":weight, "s":1-weight})
clf.fit(train_features, train_groundtruths)
predictions = clf.predict(test_features)
precision = precision_score(test_groundtruths, predictions, average=None)[index]
if precision >= max_precision:
recall = recall_score(test_groundtruths, predictions, average=None)[index]
# if recall > max_recall:
max_precision = precision
max_recall = recall
max_f_measure = f1_score(test_groundtruths, predictions, average=None)[index]
max_weight = weight
max_clf = key
sys.stdout.write("\033[K")
utils.print_info("\tClassifier " + str(max_clf))
utils.print_info("\tPrecision " + str(max_precision))
utils.print_info("\tRecall " + str(max_recall))
utils.print_info("\tF-Measure " + str(max_f_measure))
utils.print_info("\tWeight " + str(max_weight))
def train_test(train, test, res_dir="res/", disp=True, outfilename=None):
"""Description of compare
compare multiple classifier and display the best one
"""
utils.print_success("Comparison of differents classifiers")
if train is not None and test is not None:
train_features = []
test_features = []
train_groundtruths = []
test_groundtruths = []
for elem in train:
train_groundtruths.append(elem)
train_features.append(train[elem])
for elem in test:
test_groundtruths.append(elem)
test_features.append(test[elem])
else:
utils.print_error("No valid data provided.")
res_dir = utils.create_dir(res_dir)
classifiers = {
# "RandomForest": RandomForestClassifier(n_estimators=5),
"KNeighbors":KNeighborsClassifier(1),
# "GaussianProcess":GaussianProcessClassifier(1.0 * RBF(1.0), warm_start=True),
# "DecisionTree":DecisionTreeClassifier(max_depth=5),
# "MLP":MLPClassifier(),
# "AdaBoost":AdaBoostClassifier(),
# "GaussianNB":GaussianNB(),
# "QDA":QuadraticDiscriminantAnalysis(),
# "SVM":SVC(kernel="linear", C=0.025),
# "GradientBoosting":GradientBoostingClassifier(),
# "ExtraTrees":ExtraTreesClassifier(),
# "LogisticRegression":LogisticRegression(),
# "LinearDiscriminantAnalysis":LinearDiscriminantAnalysis()
}
for key in classifiers:
utils.print_success(key)
clf = classifiers[key]
utils.print_info("\tFit")
clf.fit(train_features, train_groundtruths)
utils.print_info("\tPredict")
predictions = clf.predict(test_features)
print("Precision weighted\t" + str(precision_score(test_groundtruths, predictions, average='weighted')))
print("Recall weighted\t" + str(recall_score(test_groundtruths, predictions, average='weighted')))
print("F1 weighted\t" + str(f1_score(test_groundtruths, predictions, average='weighted')))
# print("Precision weighted\t" + str(precision_score(test_groundtruths, predictions, average=None)))
# print("Recall weighted\t" + str(recall_score(test_groundtruths, predictions, average=None)))
# print("f1 weighted\t" + str(f1_score(test_groundtruths, predictions, average=None)))
def classify(train=None, test=None, data=None, res_dir="res/", disp=True, outfilename=None):
"""Description of compare
compare multiple classifier and display the best one
"""
utils.print_success("Comparison of differents classifiers")
if data is not None:
train_features = data["train_features"]
train_groundtruths = data["train_groundtruths"]
test_features = data["test_features"]
test_groundtruths = data["test_groundtruths"]
else:
train = utils.abs_path_file(train)
test = utils.abs_path_file(test)
train_features, train_groundtruths = read_file(train)
test_features, test_groundtruths = read_file(test)
if not utils.create_dir(res_dir):
res_dir = utils.abs_path_dir(res_dir)
classifiers = {
"RandomForest": RandomForestClassifier(n_jobs=-1)
# "RandomForest": RandomForestClassifier(n_estimators=5),
# "KNeighbors":KNeighborsClassifier(3),
# "GaussianProcess":GaussianProcessClassifier(1.0 * RBF(1.0), warm_start=True),
# "DecisionTree":DecisionTreeClassifier(max_depth=5),
# "MLP":MLPClassifier(),
# "AdaBoost":AdaBoostClassifier(),
# "GaussianNB":GaussianNB(),
# "QDA":QuadraticDiscriminantAnalysis(),
# "SVM":SVC(kernel="linear", C=0.025),
# "GradientBoosting":GradientBoostingClassifier(),
# "ExtraTrees":ExtraTreesClassifier(),
# "LogisticRegression":LogisticRegression(),
# "LinearDiscriminantAnalysis":LinearDiscriminantAnalysis()
}
for key in classifiers:
utils.print_success(key)
clf = classifiers[key]
utils.print_info("\tFit")
clf.fit(train_features, train_groundtruths)
utils.print_info("\tPredict")
predictions = clf.predict(test_features)
if outfilename is not None:
with open(outfilename, "w") as filep:
for gt, pred in zip(test_groundtruths, predictions):
filep.write(gt + "," + pred + "\n")
# Global
data = [key]
data.append(str(precision_score(test_groundtruths, predictions, average='weighted')))
data.append(str(recall_score(test_groundtruths, predictions, average='weighted')))
data.append(str(f1_score(test_groundtruths, predictions, average='weighted')))
data = ",".join(data)
if disp:
print(data)
else:
with open(res_dir + "global.csv", "a") as filep:
filep.write(data + ",\n")
# Local
for index, tag in enumerate(list(set(train_groundtruths))):
precision = precision_score(test_groundtruths, predictions, average=None)
recall = recall_score(test_groundtruths, predictions, average=None)
f1 = f1_score(test_groundtruths, predictions, average=None)
line = key + "," + str(precision[index]) + "," + str(recall[index]) + "," + str(f1[index])
if disp:
print(line)
else:
with open(res_dir + "tag_" + tag + ".csv", "a") as filep:
filep.write(line + ",\n")
return predictions
def read_train_files(indir, separator=" "):
"""Description of read_train_files
Gather local features and GT from every individual train songs
"""
utils.print_success("Reading multiple train files")
indir = utils.abs_path_dir(indir) + "/"
groundtruths = []
features = []
included_extenstions = ["csv"]
filenames = [fn for fn in os.listdir(indir)
if any(fn.endswith(ext) for ext in included_extenstions)]
for index, filename in enumerate(filenames):
print(str(index + 1) + "/" + str(len(filenames)) + " " + filename)
sys.stdout.write("\033[F") # Cursor up one line
sys.stdout.write("\033[K") # Clear line
with open(indir + filename, "r") as filep:
for row in filep:
line = row.split(separator)
features.append([float(i) for i in line[:-1]])
groundtruths.append(line[-1][:-1])
sys.stdout.write("\033[K") # Clear line
return features, groundtruths
def read_train_file(filename):
"""
Read ONE train file
"""
groundtruths = []
features = []
filename = utils.abs_path_file(filename)
with open(filename, "r") as filep:
for line in filep:
line = line.split(",")
groundtruths.append(line[-1][:-1])
features.append(line[1:-1])
return features, groundtruths
def create_model(clf_name, features, groundtruths, outdir, classifiers):
begin = int(round(time.time() * 1000))
utils.print_success("Starting " + clf_name)
clf_dir = outdir + clf_name + "/"
utils.create_dir(clf_dir)
clf = classifiers[clf_name]
clf.fit(features, groundtruths)
joblib.dump(clf, clf_dir + clf_name + ".pkl")
utils.print_info(clf_name + " done in " + str(int(round(time.time() * 1000)) - begin) + "ms")
def create_models(outdir, train_features=None, train_groundtruths=None, train_file=None, train_dir=None, separator=" ", classifiers=None):
"""Description of create_models
Generate models for train data for different clf
In order to test later
..notes::
train_file must be formatted like:
item_name_1,feat1,feat2,...,featN,tag_or_class
item_name_2,feat1,feat2,...,featN,tag_or_class
...
item_name_N,feat1,feat2,...,featN,tag_or_class
..todo::
Manage when Provide train feat and gts or train_file
Find why commented clf cannot be used
pour train dir = /media/sf_github/yann/train/
20h04m49s Creating models
20h04m49s Reading multiple train files
20h05m04s Starting SVM
20h05m07s Starting RandomForest
20h05m11s Starting GradientBoosting
20h05m16s Starting DecisionTree
20h05m22s Starting ExtraTrees
20h05m27s Starting AdaBoost
20h05m34s Starting KNeighbors
20h05m50s KNeighbors done in 60836ms
20h06m18s ExtraTrees done in 89147ms
20h06m29s DecisionTree done in 100211ms
20h07m05s RandomForest done in 135939ms
20h08m56s AdaBoost done in 246550ms
20h13m40s GradientBoosting done in 530909ms
00h43m29s SVM done in 16719954ms
"""
utils.print_success("Creating models")
outdir = utils.abs_path_dir(outdir) + "/"
if train_file is not None:
features, groundtruths = read_train_file(train_file)
elif train_dir is not None:
features, groundtruths = read_train_files(train_dir, separator=separator)
else:
utils.print_warning("TODO Manage train feat and gts")
if classifiers is None:
classifiers = {
"RandomForest": RandomForestClassifier(),
"LogisticRegression":LogisticRegression(),
"KNeighbors":KNeighborsClassifier(),
"DecisionTree":DecisionTreeClassifier(),
"AdaBoost":AdaBoostClassifier(),
"GradientBoosting":GradientBoostingClassifier(),
"ExtraTrees":ExtraTreesClassifier(),
"SVM":SVC(kernel="linear", C=0.025, probability=True)
# "GaussianProcess":GaussianProcessClassifier(),
# "MLP":MLPClassifier(),
# "GaussianNB":GaussianNB(),
# "QDA":QuadraticDiscriminantAnalysis(),
# "LinearDiscriminantAnalysis":LinearDiscriminantAnalysis()
}
else:
if "RandomForest" in classifiers:
clf_name = "RandomForest"
begin = int(round(time.time() * 1000))
utils.print_success("Starting " + clf_name)
clf_dir = outdir + clf_name + "/"
utils.create_dir(clf_dir)
clf = RandomForestClassifier(n_jobs=-1)
# clf = RandomForestClassifier(verbose=100)
clf.fit(features, groundtruths)
joblib.dump(clf, clf_dir + clf_name + ".pkl")
utils.print_info(clf_name + " done in " + str(int(round(time.time() * 1000)) - begin) + "ms")
# # Parallel computing
# clf = []
# for key in classifiers:
# clf.append(key)
# partial_create_model = partial(create_model, features=features, groundtruths=groundtruths, outdir=outdir, classifiers=classifiers)
# # pool = multiprocessing.Pool(4)
# pool = multiprocessing.Pool(len(classifiers))
# pool.map(partial_create_model, clf) #make our results with a map call
# pool.close() #we are not adding any more processes
# pool.join() #tell it to wait until all threads are done before going on
def read_test_file(filename):
"""
Read ONE test file with content like:
feat1 feat2 ... featN
feat1 feat2 ... featN
...
feat1 feat2 ... featN
"""
features = []
filename = utils.abs_path_file(filename)
with open(filename, "r") as filep:
for line in filep:
line = line.split(" ")
line[-1] = line[-1][:-1]
feat = []
for tmp_feat in line:
feat.append(float(tmp_feat))
features.append(feat)
return features
def column(matrix, i):
return [row[i] for row in matrix]
def test_models(models_dir, test_dir, out_dir):
models_dir = utils.abs_path_dir(models_dir) + "/"
test_dir = utils.abs_path_dir(test_dir) + "/"
utils.create_dir(out_dir)
test_files = os.listdir(test_dir)
models = os.listdir(models_dir)
for model in models:
utils.print_success(model)
pred_dir = out_dir + model + "/"
utils.create_dir(pred_dir)
clf = joblib.load(models_dir + model + "/" + model + ".pkl")
for index, test_file in enumerate(test_files):
print(str(index) + "\t" + test_file)
sys.stdout.write("\033[F")
sys.stdout.write("\033[K")
test_features = read_test_file(test_dir + test_file)
predictions = clf.predict_proba(test_features)
with open(pred_dir + test_file, "w") as filep:
for pred in predictions:
filep.write(str(pred[0]) + "\n")
sys.stdout.write("\033[K")
def test_model(model, models_dir, test_dir, out_dir, test_files=None, test_file=None):
"""Description of test_model
Use one model previously fitted in order to predict_proba() or predict()
the tag for a bunch of test_files
..todo::
To enhance computation time: only compute file which are in groundtruths
if file already computed, do not recompute
"""
begin = int(round(time.time() * 1000))
utils.print_success("Testing " + model)
pred_dir = out_dir + model
clf = joblib.load(models_dir + model + "/" + model + ".pkl")
if test_files is not None:
pred_dir = pred_dir + "/"
utils.create_dir(pred_dir)
for index, test_file in enumerate(test_files):
# Check if isrc is in groundtruths to speed up computation time
if test_file[:12] in groundtruths:
test_features = read_test_file(test_dir + test_file)
try:
predictions = clf.predict_proba(test_features)
except AttributeError:
utils.print_warning("predict_proba does not exists for " + model + "\nRegular predict function is used.")
predictions = clf.predict(test_features)
with open(pred_dir + test_file, "w") as filep:
for pred in predictions:
filep.write(str(pred[0]) + "\n")
elif test_file is not None:
pred_dir = pred_dir + "_"
test_features = []
filename = []
with open(test_file, "r") as filep:
for index, line in enumerate(filep):
line = line.split(",")
# print(str(index) + " " + line[0])
test_features.append(line[1:-1])
filename.append(line[0])
try:
predictions = clf.predict_proba(test_features)
with open(pred_dir + "predict_proba.csv", "a") as filep2:
for filen, pred in zip(filename, predictions):
filep2.write(filen + "," + str(pred[0]) + "\n")
except:
pass
predictions = clf.predict(test_features)
with open(pred_dir + "predict.csv", "a") as filep2:
for filen, pred in zip(filename, predictions):
filep2.write(filen + "," + str(pred[0]) + "\n")
else:
utils.print_error("Error in arg for test_model() function")
utils.print_info(model + " done in " + str(int(round(time.time() * 1000)) - begin) + "ms")
def test_models_parallel(models_dir, out_dir, test_dir=None, test_file=None):
"""Description of test_models_parallel
17h16m12s DecisionTree done in 16135373ms
17h25m08s GradientBoosting done in 16671109ms
18h59m05s RandomForest done in 22307811ms
18h59m07s AdaBoost done in 22310633ms
19h18m12s ExtraTrees done in 23455779ms
"""
models_dir = utils.abs_path_dir(models_dir) + "/"
models = os.listdir(models_dir)
utils.create_dir(out_dir)
if test_dir is not None:
test_dir = utils.abs_path_dir(test_dir) + "/"
test_files = os.listdir(test_dir)
test_file = None
elif test_file is not None:
test_files = None
else:
utils.print_warning("TODO Error in arg for test_models_parallel() function")
partial_test_model = partial(test_model, models_dir=models_dir, test_dir=test_dir, out_dir=out_dir, test_files=test_files, test_file=test_file)
pool = multiprocessing.Pool(len(models))
pool.map(partial_test_model, models) #make our results with a map call
pool.close() #we are not adding any more processes
pool.join() #tell it to wait until all threads are done before going on
def cross_validation(train_filename, n_folds, outfilename):
filename = utils.abs_path_file(train_filename)
features = []
groundtruths = []
with open(filename, "r") as filep:
for line in filep:
line = line.split(",")
features.append([float(x) for x in line[1:-1]])
groundtruths.append(line[-1][:-1])
features = np.array(features)
groundtruths = np.array(groundtruths)
# Init
# if os.path.exists(outfilename):
try:
with open(outfilename, "r") as filep:
data = json.load(filep)
except:
data = {}
# else:
# data = {}
algo_name = "Method 1"
data[algo_name] = {}
data[algo_name]["uneven"] = {}
data[algo_name]["balanced"] = {}
for distribution in data[algo_name]:
data[algo_name][distribution]["precision"] = {}
data[algo_name][distribution]["recall"] = {}
data[algo_name][distribution]["f1"] = {}
for tmp in data[algo_name][distribution]:
data[algo_name][distribution][tmp]["instru"] = []
data[algo_name][distribution][tmp]["song"] = []
skf = StratifiedKFold(n_splits=n_folds)
for i in range(0, 10):
utils.print_warning("TODO for i in range")
song_precis = []
song_recall = []
song_fmeasu = []
inst_precis = []
inst_recall = []
inst_fmeasu = []
cur_fold = 0
for train, test in skf.split(features, groundtruths):
cur_fold += 1
utils.print_success("Iteration " + str(i) + "\tFold " + str(cur_fold))
dataset = {}
dataset["train_features"] = features[train]
dataset["train_groundtruths"] = groundtruths[train]
dataset["test_features"] = features[test]
dataset["test_groundtruths"] = groundtruths[test]
predictions = classify(data=dataset)
song_precis.append(precision_score(dataset["test_groundtruths"], predictions, average=None)[1])
song_recall.append(recall_score(dataset["test_groundtruths"], predictions, average=None)[1])
song_fmeasu.append(f1_score(dataset["test_groundtruths"], predictions, average=None)[1])
inst_precis.append(precision_score(dataset["test_groundtruths"], predictions, average=None)[0])
inst_recall.append(recall_score(dataset["test_groundtruths"], predictions, average=None)[0])
inst_fmeasu.append(f1_score(dataset["test_groundtruths"], predictions, average=None)[0])
song_precis = sum(song_precis) / float(len(song_precis))
song_recall = sum(song_recall) / float(len(song_recall))
song_fmeasu = sum(song_fmeasu) / float(len(song_fmeasu))
inst_precis = sum(inst_precis) / float(len(inst_precis))
inst_recall = sum(inst_recall) / float(len(inst_recall))
inst_fmeasu = sum(inst_fmeasu) / float(len(inst_fmeasu))
# Song
data[algo_name]["balanced"]["precision"]["song"].append(song_precis)
data[algo_name]["balanced"]["recall"]["song"].append(song_recall)
data[algo_name]["balanced"]["f1"]["song"].append(song_fmeasu)
# Instru
data[algo_name]["balanced"]["precision"]["instru"].append(inst_precis)
data[algo_name]["balanced"]["recall"]["instru"].append(inst_recall)
data[algo_name]["balanced"]["f1"]["instru"].append(inst_fmeasu)
with open(outfilename, "w") as outfile:
json.dump(data, outfile, indent=2)
def split(features, groundtruths, n_split):
"""Description of split
1 tmp array containing all item for each tag
2 random split of array for each tag
..todo::
manage possible errors
randomize split selection
"""
if n_split == 1:
return features, groundtruths
tags = list(set(groundtruths))
new_index = {}
for tag in tags:
new_index[tag] = []
for index, gt in enumerate(groundtruths):
new_index[gt].append(index)
new_feats = []
new_gts = []
for i in range(0, n_split):
indexes = []
for tag in tags:
ref = len(new_index[tag])/n_split
indexes.append(new_index[tag][ref*i:ref*(i+1)])
"""
..todo:: manage multiple tags!
"""
indexes = indexes[0] + indexes[1]
# print(features[:5])
# print(len(indexes))
# print(len(indexes[0]))
# print(len(indexes[1]))
# sys.exit()
indexes.sort()
new_gts.append([groundtruths[j] for j in indexes])
new_feats.append([features[j] for j in indexes])
return new_feats, new_gts
def increasing_test(groundtruths_file, predictions_file, metric, tag):
gts = read_item_tag(groundtruths_file)
preds = read_item_tag(predictions_file)
test_groundtruths = []
predictions = []
for isrc in preds:
if isrc in gts:
test_groundtruths.append(gts[isrc])
predictions.append(preds[isrc])
res = []
if "accuracy" in metric:
res.append(accuracy_score(test_groundtruths, predictions))
elif "precision" in metric:
res.append(precision_score(test_groundtruths, predictions, average=None)[tag])
elif "recall" in metric:
res.append(recall_score(test_groundtruths, predictions, average=None)[tag])
elif "f1_score" in metric:
res.append(f1_score(test_groundtruths, predictions, average=None)[tag])
else:
utils.print_error("classify.py line 735 metric argument error")
# print("Accuracy : " + str(accuracy_score(test_groundtruths, predictions)))
# print("Precision: " + str(precision_score(test_groundtruths, predictions, average=None)))
# print("Recall : " + str(recall_score(test_groundtruths, predictions, average=None)))
# print("F-score : " + str(f1_score(test_groundtruths, predictions, average=None)))
n_splits = 10
# for n_split in range(2, n_splits+1):
for n_split in [2, 10, 100]:
print("\t" + str(n_split))
feats_array, gts_array = split(predictions, test_groundtruths, n_split)
tmp_acc = []
for feats, gts in zip(feats_array, gts_array):
if "accuracy" in metric:
cur_acc = accuracy_score(gts, feats)
elif "precision" in metric:
cur_acc = precision_score(gts, feats, average=None)[tag]
elif "recall" in metric:
cur_acc = recall_score(gts, feats, average=None)[tag]
elif "f1_score" in metric:
cur_acc = f1_score(gts, feats, average=None)[tag]
tmp_acc.append(cur_acc)
print("\t\t" + str(stdev(tmp_acc)))
accuracy = sum(tmp_acc) / float(len(tmp_acc))
res.append(accuracy)
return res
def growing_testset(train_filename, test_filename, clf, clf_name=None):
"""Description of growing_testset
1 Generate accuracy graph for global
2 Create precision / recall / f-measure figures for each tag
..todo::
intermediate file which stores predictions for each ISRC
param for number of steps
repet N times
division problem ! it does N N/2 ... N/10 but we want :
1*N/10 2*N/10 ... 10*N/10
"""
train_features, train_groundtruths = read_file(train_filename)
test_features, test_groundtruths = read_file(test_filename)
if clf_name is not None and "RANSAC" in clf_name:
train_groundtruths = [True if i =="s" else False for i in train_groundtruths]
test_groundtruths = [True if i =="s" else False for i in test_groundtruths]
clf.fit(train_features, train_groundtruths)
if clf_name is not None and "RANSAC" in clf_name:
preds_float = clf.predict(test_features)
predictions = [True if i > 0.5 else False for i in preds_float]
else:
predictions = clf.predict(test_features)
test_acc = []
# test_acc.append(accuracy_score(test_groundtruths, predictions))
test_acc.append(precision_score(test_groundtruths, predictions, average=None)[0])
print("Accuracy : " + str(test_acc))
print("Precision: " + str(precision_score(test_groundtruths, predictions, average=None)))
print("Recall : " + str(recall_score(test_groundtruths, predictions, average=None)))
print("F-score : " + str(f1_score(test_groundtruths, predictions, average=None)))
n_splits = 10
for n_split in range(2, n_splits+1):
print(n_split)
feats_array, gts_array = split(test_features, test_groundtruths, n_split)
tmp_acc = []
for feats, gts in zip(feats_array, gts_array):
if clf_name is not None and "RANSAC" in clf_name:
preds_float = clf.predict(feats)
predictions = [True if i > 0.5 else False for i in preds_float]
else:
predictions = clf.predict(feats)
# cur_acc = accuracy_score(gts, predictions)
cur_acc = precision_score(gts, predictions, average=None)[0]
tmp_acc.append(cur_acc)
print("\t" + str(cur_acc))
accuracy = sum(tmp_acc) / float(len(tmp_acc))
test_acc.append(accuracy)
return test_acc
def plot_roc(indir, gts_file, outdir):
groundtruths = read_item_tag(gts_file)
plt.figure(1)
plt.plot([0, 1], [0, 1], 'k--', label="Random (0.5)")
indir = utils.abs_path_dir(indir)
for item in os.listdir(indir):
if ".csv" in item:
isrcs = read_preds(indir + "/" + item)
test_groundtruths = []
predictions = []
for isrc in isrcs:
if isrc in groundtruths:
test_groundtruths.append(groundtruths[isrc])
predictions.append(isrcs[isrc])
test_groundtruths = [tag=="s" for tag in test_groundtruths]
fpr_rf, tpr_rf, _ = roc_curve(test_groundtruths, predictions)
label = item[:-4] + " (" + str(round(roc_auc_score(test_groundtruths, predictions), 3)) + ")"
color = ""
if "VQMM" in item:
color = "ro"
elif "SVMBFF" in item:
color = "g-"
elif "GA" in item:
color = "b:"
plt.plot(fpr_rf, tpr_rf, color, label=label)
ax = plt.gca()
ax.spines['right'].set_color('none')
ax.spines['top'].set_color('none')
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
plt.xlabel('False positive rate')
plt.ylabel('True positive rate')
# plt.title('ROC curve for Algo (AUC)')
plt.legend(loc='best')
outdir = utils.abs_path_dir(outdir)
roc_fn = outdir + "Figure_3_ROC.png"
plt.savefig(roc_fn, dpi=200, bbox_inches="tight")
plt.savefig(outdir + "Figure_3_ROC.eps")
# plt.show()
plt.close()
utils.print_success("ROC curve successfully created in " + roc_fn)
def plot_precision_recall(indir, gts_file, outdir):
groundtruths = read_item_tag(gts_file)
plt.figure(1)
indir = utils.abs_path_dir(indir)
for item in os.listdir(indir):
if ".csv" in item:
isrcs = read_preds(indir + "/" + item)
test_groundtruths = []
predictions = []
for isrc in isrcs:
if isrc in groundtruths:
test_groundtruths.append(groundtruths[isrc])
predictions.append(isrcs[isrc])
test_groundtruths = [tag=="s" for tag in test_groundtruths]
precision, recall, _ = precision_recall_curve(test_groundtruths, predictions)
plt.plot(recall, precision, label=item[:-4] + " (" + str(round(average_precision_score(test_groundtruths, predictions), 3)) + ")")
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.ylim([0.0, 1.05])
plt.xlim([-0.05, 1.05])
plt.title('Precision-Recall curve for Algo (AUC)')
plt.legend(loc='best')
plt.savefig(outdir + "precision_recall.png", dpi=200, bbox_inches="tight")
# plt.show()
plt.close()
utils.print_success("Precision-Recall curve created in " + outdir)
if __name__ == "__main__":
PARSER = argparse.ArgumentParser(description="Compare classifiers")
PARSER.add_argument(
"--train",
help="path to train file",
type=str,
default="data/proba_hist_train.csv",
metavar="train")
PARSER.add_argument(
"--test",
help="path to test file",
type=str,
default="data/proba_hist_test.csv",
metavar="test")
PARSER.add_argument(
"-o",
"--outdir",
help="path to output directory",
type=str,
default="res/",
metavar="outdir")
plot_roc("roc_curve/")
# plot_precision_recall("/media/sf_github/classifiers/roc_curve/")
# # models_dir = "models_paral/"
# # utils.create_dir(models_dir)
# # train_file_1 = "/media/sf_DATA/Datasets/Simbals/yann/train.csv"
# # train_dir_1 = "/media/sf_github/yann/train/"
# # create_models(train_file=train_file_1)
# # create_models(outdir=models_dir, train_dir=train_dir_1)
# # test_models_parallel(models_dir, "/media/sf_DATA/Datasets/Simbals/yaafe/results/processed/", "/media/sf_DATA/Datasets/Simbals/yaafe/proba_preds/")
# # classify(PARSER.parse_args().train, PARSER.parse_args().test, PARSER.parse_args().outdir)
# # precision_100percent(PARSER.parse_args().train, PARSER.parse_args().test)
# # plot_clf()
# """
# Samedi 26 Novembre 2016 test finaux pour mon algo
# demandé par <NAME> Matthias
# """
# train_file = "/media/sf_github/yann/2_local_predictions/method_3_trainset_normalized.txt"
# models_dir = "final_models/"
# utils.create_dir(models_dir)
# # create_models(outdir=models_dir, train_file=train_file)
# out_dir = "/media/sf_DATA/Datasets/Simbals/yann/algo_final/"
# utils.create_dir(out_dir)
# test_file="/media/sf_github/yann/2_local_predictions/method_3_testset_normalized_with_tag.txt"
# # test_models_parallel(
# # models_dir=models_dir,
# # test_file=test_file,
# # out_dir=out_dir)
# test_features = []
# isrc_order = []
# utils.print_info("Loading clf")
# clf = joblib.load("/media/sf_github/classifiers/final_modelsRandomForest/RandomForest.pkl")
# with open(test_file, "r") as filep:
# for index, line in enumerate(filep):
# line = line.split(",")
# utils.print_info(str(index) + "\t" + line[0])
# test_features.append(line[1:-1])
# isrc_order.append(line[0])
# utils.print_info("Predict_proba")
# predictions = clf.predict(test_features)
# # predictions = clf.predict_proba(test_features)
# utils.print_info("Writing results")
# with open("/media/sf_DATA/Datasets/Simbals/yann/algo_final/RF.txt" , "w") as filep2:
# for index, pred in enumerate(predictions):
# filep2.write(isrc_order[index] + "," + str(pred[0]) + "\n")
# utils.print_info("Done")
# test_groundtruths = {}
# with open("/media/sf_github/repro/groundtruths.csv", "r") as filep:
# for row in filep:
# line = row.split(",")
# test_groundtruths[line[0]] = line[1][:-1]
# for i in np.arange(0.1, 1.0, 0.1):
# outfile = open("results/Bayle2_"+str(i)+".csv", "w")
# utils.print_progress_start(str(i))
# with open("/media/sf_DATA/Datasets/Simbals/yann/algo_final/RFproba.txt", "r") as filep:
# for line in filep:
# line = line.split(",")
# if line[0] in test_groundtruths:
# if float(line[-1][:-1]) > i:
# prediction = "i"
# else:
# prediction = "s"
# outfile.write(line[0] + "," + prediction + "\n")
# utils.print_progress_end()
# outfile.close()
# # groundtruths = []
# # predictions = []
# outfile = open("results/Bayle.csv", "w")
# with open("/media/sf_DATA/Datasets/Simbals/yann/algo_final/RF.txt", "r") as filep:
# for line in filep:
# line = line.split(",")
# if line[0] in test_groundtruths:
# outfile.write(line[0] + "," + line[-1][:-1] + "\n")
# # groundtruths.append(test_groundtruths[line[0]])
# # predictions.append(line[-1][:-1])
# outfile.close()
# # utils.scores("bayle", predictions, groundtruths)
| 2.5
| 2
|
tools/check_python_format.py
|
cockroachzl/recommenders-addons
| 584
|
12783416
|
<filename>tools/check_python_format.py
#!/usr/bin/env python
from subprocess import check_call, CalledProcessError
def check_bash_call(string):
check_call(["bash", "-c", string])
def _run_format():
files_changed = False
try:
check_bash_call(
"find . -name '*.py' -print0 | xargs -0 yapf --style=./.yapf -dr")
except CalledProcessError:
check_bash_call(
"find . -name '*.py' -print0 | xargs -0 yapf --style=./.yapf -ir")
files_changed = True
if files_changed:
print("Some files have changed.")
print("Please use 'yapf --style=google -ri ./**/*.py' before commit.")
else:
print("No formatting needed.")
if files_changed:
exit(1)
def run_format():
try:
_run_format()
except CalledProcessError as error:
print("Yapf check returned exit code", error.returncode)
exit(error.returncode)
if __name__ == "__main__":
run_format()
| 2.984375
| 3
|
drone_awe/params/Validation/FreeflyAlta8/drone.py
|
rymanderson/Drone-Models
| 2
|
12783417
|
params = {
'wingtype': 'rotary',
'rotorquantity': 8,
'diagonal': 1.325,
'batterycells': 6,
'batteryvoltage': 22.2,
'batterycapacity': 10000,
'batterytype': 'LiPo',
'numbatteries': 2,
'numbatteriesconnection': 'parallel',
'max_takeoffweight': 18.1,
'takeoffweight': 6.2,
'max_payload': 9.1,
'specific_power': 145,
'props': '18x6_Folding',
'motor_max_power_continuous': 350,
'motor_max_power_peak': 950,
'temperaturemin': -20,
'temperaturemax': 45,
'thrust_ratio_at_max_takeoffweight': '1.85:1',
'rotordiameter': 0.4572,
'payload': 0.0
}
| 1.203125
| 1
|
src/richard/playlists/urls.py
|
pyvideo/richard
| 51
|
12783418
|
# richard -- video index system
# Copyright (C) 2012, 2013, 2014, 2015 richard contributors. See AUTHORS.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django.conf.urls import patterns, url
urlpatterns = patterns(
'richard.playlists.views',
# playlists
url(r'^playlist/$',
'playlist_list', name='playlists-playlist-list'),
url(r'playlist/delete/?$',
'playlist_delete', name='playlists-playlist-delete'),
url(r'playlist/remove-video/?$',
'playlist_remove_video', name='playlists-playlist-remove-video'),
url(r'playlist/(?P<playlist_id>[0-9]+)/?$',
'playlist', name='playlists-playlist'),
)
| 1.796875
| 2
|
lib/UITableDelegate.py
|
apimetre/MetreAppUI_v0.25
| 0
|
12783419
|
<gh_stars>0
# Python imports
import os
import numpy as np
import datetime as datetime
import time
import json
from pytz import timezone
# Pythonista imports
import ui
class TData (ui.ListDataSource):
def __init__(self, scale, items=None):
ui.ListDataSource.__init__(self, items)
self.xscale = scale
def tableview_cell_for_row(self, tableview, section, row):
cell = ui.TableViewCell()
cell.text_label.text = str(self.items[row])
scaled_size = round(self.xscale, 1) *1.5 + 12
cell.text_label.font = ("Helvetica", scaled_size)
cell.text_label.alignment = ui.ALIGN_CENTER
return cell
class ResultsTable(object):
def __init__(self, subview_, table_, xscale, yscale, cwd):
self.subview = subview_
self.table = table_
self.xscale = xscale
self.yscale = yscale
self.cwd = cwd
self.log_src = (self.cwd + '/log/log_003.json')
with open(self.log_src) as json_file:
self.log = json.load(json_file)
if self.xscale > 2:
self.spacer = ' '
else:
self.spacer = ' '
with open(self.log_src) as json_file:
self.log = json.load(json_file)
self.etime = self.log['Etime']
self.sorted_etime = sorted(list(self.etime))
self.dt_etime = []
for val in self.etime:
tval = datetime.datetime.fromtimestamp(int(val))
self.dt_etime.append(tval)
self.acetone = self.log['Acetone']
############### This is for displaying '< 2' for acetone values < 2 ##############
self.acetone_str = []
for val in self.acetone:
if float(val) < 2:
self.acetone_str.append("< 2")
else:
self.acetone_str.append(str(round(val,1)))
###################################################################################
new_sorted_etime = sorted(list(self.etime)) # This is the sorted version of self.log['Etime']
new_sorted_dt = sorted(self.dt_etime)
self.rev_sort_etime = list(reversed(new_sorted_etime))
dt_list = []
orig_dt_list = []
for i in new_sorted_dt:
dt_list.append(i.strftime("%b %d, %Y, %I:%M %p"))
for i in self.dt_etime:
orig_dt_list.append(i.strftime("%b %d, %Y, %I:%M %p"))
results = []
self.ref_list_inv = []
for i in dt_list:
results.append(i + self.spacer + self.acetone_str[np.where(np.array(orig_dt_list) == i)[0][0]] + ' ppm ' + np.array(self.log['Key'])[np.where(np.array(orig_dt_list) == i)[0][0]])
self.ref_list_inv.append(np.where(np.array(orig_dt_list) == i)[0][0])
self.ref_list = list(reversed(self.ref_list_inv))
self.table.data_source = TData(self.xscale, reversed(results))
self.table.delegate.action = self.write_notes
def update_table(self):
self.table.reload()
with open(self.log_src) as json_file:
self.log = json.load(json_file)
self.etime = self.log['Etime']
self.dt_etime = []
for val in self.etime:
tval = datetime.datetime.fromtimestamp(int(val))
self.dt_etime.append(tval)
self.acetone = self.log['Acetone']
############### This is for displaying '< 2' for acetone values < 2 ##############
self.acetone_str = []
for val in self.acetone:
if float(val) < 2:
self.acetone_str.append("< 2")
else:
self.acetone_str.append(str(round(val,1)))
###################################################################################
new_sorted_etime = sorted(list(self.etime)) # This is the sorted version of self.log['Etime']
new_sorted_dt = sorted(self.dt_etime)
self.rev_sort_etime = list(reversed(new_sorted_etime))
dt_list = []
orig_dt_list = []
for i in new_sorted_dt:
dt_list.append(i.strftime("%b %d, %Y, %I:%M %p"))
for i in self.dt_etime:
orig_dt_list.append(i.strftime("%b %d, %Y, %I:%M %p"))
results = []
self.ref_list_inv = []
for i in dt_list:
results.append(i + self.spacer + self.acetone_str[np.where(np.array(orig_dt_list) == i)[0][0]] + ' ppm ' + np.array(self.log['Key'])[np.where(np.array(orig_dt_list) == i)[0][0]])
self.ref_list_inv.append(np.where(np.array(orig_dt_list) == i)[0][0])
self.ref_list = reversed(self.ref_list_inv)
self.table.data_source = TData(self.xscale, reversed(results))
def write_notes(self, sender):
with open(self.log_src) as json_file:
self.log = json.load(json_file)
self.row_ix = sender.selected_row
self.log_entry = self.log['Notes'][np.where(np.array(self.etime) == self.rev_sort_etime[self.row_ix])[0][0]]#self.log['Notes'][self.row_ix]
self.tdialog = ui.load_view('tabledialog')
self.tdialog.name = self.table.data_source.items[sender.selected_row]
self.tdialog.frame = (0,0,600,150)
update_button = self.tdialog['update']
replace_button = self.tdialog['replace']
self.tdialog['test_notes'].text = self.log_entry
update_button.action = self.update_log_notes
replace_button.action = self.replace_log_notes
self.tdialog.frame = (0, 0, 600, 150)
self.tdialog.present('Sheet')
def update_log_notes(self, sender):
self.update_table()
current_entry = self.log_entry
entry_to_add = self.tdialog['text_entry'].text
try:
if entry_to_add[0].isupper():
try:
if current_entry[-1] != '.':
spacer = '. '
else:
spacer = ' '
except:
spacer = ''
elif entry_to_add[0].isdigit():
try:
if current_entry[-1] != '.':
spacer = '. '
else:
spacer = ', '
except:
spacer = ''
else:
try:
if current_entry[-1] != ',':
spacer = ', '
else:
spacer = ' '
except:
spacer = ''
new_entry = self.log_entry + spacer + entry_to_add
self.log['Notes'][np.where(np.array(self.etime) == self.rev_sort_etime[self.row_ix])[0][0]] = new_entry
self.log['Key'][np.where(np.array(self.etime) == self.rev_sort_etime[self.row_ix])[0][0]] = " *"
with open(self.log_src, "w") as outfile:
json.dump(self.log, outfile)
self.tdialog['test_notes'].text = self.log['Notes'][np.where(np.array(self.etime) == self.rev_sort_etime[self.row_ix])[0][0]]
self.tdialog['text_entry'].text = ''
except:
self.tdialog['text_entry'].text = ''
self.tdialog['text_entry'].end_editing()
self.update_table()
self.table.delegate.action = self.write_notes
def replace_log_notes(self, sender):
self.update_table()
current_entry = self.log_entry
entry_to_add = self.tdialog['text_entry'].text
try:
self.log['Notes'][np.where(np.array(self.etime) == self.rev_sort_etime[self.row_ix])[0][0]] = entry_to_add
if entry_to_add != '':
self.log['Key'][np.where(np.array(self.etime) == self.rev_sort_etime[self.row_ix])[0][0]] = " *"
else:
self.log['Key'][np.where(np.array(self.etime) == self.rev_sort_etime[self.row_ix])[0][0]] = ''
with open(self.log_src, "w") as outfile:
json.dump(self.log, outfile)
self.tdialog['test_notes'].text = self.log['Notes'][np.where(np.array(self.etime) == self.rev_sort_etime[self.row_ix])[0][0]]
self.tdialog['text_entry'].text = ''
except:
self.tdialog['text_entry'].text = ''
self.tdialog['text_entry'].end_editing()
self.update_table()
self.table.delegate.action = self.write_notes
| 2.5
| 2
|
scripts/venv/lib/python2.7/site-packages/cogent/align/pycompare.py
|
sauloal/cnidaria
| 3
|
12783420
|
#!/usr/bin/env python
# Very slow. See compare.pyx
from __future__ import division
import cogent.util.progress_display as UI
from cogent.util.modules import importVersionedModule, ExpectedImportError
__author__ = "<NAME>"
__copyright__ = "Copyright 2007-2012, The Cogent Project"
__credits__ = ["<NAME>", "<NAME>"]
__license__ = "GPL"
__version__ = "1.5.3"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Production"
def py_segments_from_diagonal(seq1, seq2, window, threshold, min_gap_length,
diagonal):
d_segments = []
was_high = False
scores = [0] * window
score = 0
(i_lo, i_hi) = max(0, -diagonal), min(len(seq1), len(seq2)-diagonal)
for i in range(i_lo, i_hi):
j = i + diagonal
k = i % window
score -= scores[k]
scores[k] = seq1[i] == seq2[j]
score += scores[k]
if score >= threshold:
if not was_high:
start = max(i_lo, i - window)
if d_segments and start-d_segments[-1][1] < min_gap_length:
(start, jumped_end) = d_segments.pop()
was_high = True
else:
if was_high:
d_segments.append((start, i))
was_high = False
if was_high:
d_segments.append((start, i_hi))
return d_segments
try:
_compare = importVersionedModule('_compare', globals(),
(1, 3), "slow Python dotplot")
segments_from_diagonal = _compare.segments_from_diagonal
except ExpectedImportError:
segments_from_diagonal = py_segments_from_diagonal
@UI.display_wrap
def dotplot(seq1, seq2, window, threshold, min_gap_length=0, band=None, ui=None):
"""A list of line segments covering the window-mers with identical matches > threshold
Gaps of size less than min_gap will be hidden, which saves on line segments.
if 'band' is not None then it limits the searched area
"""
def one_diagonal(dia):
segs = segments_from_diagonal(seq1, seq2, window, threshold,
min_gap_length, dia)
return [((start, start+dia), (end, end+dia)) for (start, end) in segs]
if band is None:
band = max(len(seq1), len(seq2))
diagonals = range(-min(len(seq1), band), min(len(seq2), band)+1)
result = []
for diag_segments in ui.imap(one_diagonal, diagonals, noun='offset'):
result.extend(diag_segments)
return result
| 2.203125
| 2
|
KeylessTranspositionCipher.py
|
roysaurabh1308/Cryptographic-Algorithms
| 0
|
12783421
|
<gh_stars>0
# Keyless Transaposition Cipher
def encrypt(plain):
i = 0
j = 1
cipher = ""
cipher1 = ""
cipher2 = ""
while(i < len(plain)):
cipher1 += plain[i]
i += 2
while(j < len(plain)):
cipher2 += plain[j]
j += 2
pos = len(cipher1)
cipher = cipher1 + cipher2
return(cipher, pos)
def decrypt(cipher, pos):
i = 0
j = pos
k = j
plain = ""
while(i < k and j < len(cipher)):
plain += cipher[i] + cipher[j]
i += 1
j += 1
if(i < k):
plain += cipher[i]
return plain
plain = input("Enter the plain text: ")
cipher, pos = encrypt(plain)
print("After encryption: ", cipher)
plain = decrypt(cipher, pos)
print("After decryption: ", plain)
| 4.125
| 4
|
examples/csj/s0/csj_tools/wn.2.prep.text.py
|
treeaaa/wenet
| 1,166
|
12783422
|
import os
import sys
# train test1 test2 test3
def readtst(tstfn):
outlist = list()
with open(tstfn) as br:
for aline in br.readlines():
aline = aline.strip()
outlist.append(aline)
return outlist
def split_train_tests_xml(xmlpath, test1fn, test2fn, test3fn):
test1list = readtst(test1fn)
test2list = readtst(test2fn)
test3list = readtst(test3fn)
outtrainlist = list() # full path ".xml.simp" files
outt1list = list() # test 1, full path ".xml.simp" files
outt2list = list()
outt3list = list()
for afile in os.listdir(xmlpath):
if not afile.endswith('.xml.simp'):
continue
afile2 = xmlpath + '/' + afile
aid = afile.split('.')[0]
if aid in test1list:
outt1list.append(afile2)
elif aid in test2list:
outt2list.append(afile2)
elif aid in test3list:
outt3list.append(afile2)
else:
outtrainlist.append(afile2)
return outtrainlist, outt1list, outt2list, outt3list
def all_wavs(wavpath):
wavlist = list()
for afile in os.listdir(wavpath):
if not afile.endswith('.wav'):
continue
afile2 = wavpath + '/' + afile
wavlist.append(afile2)
return wavlist
def gen_text(xmllist, outpath):
# id \t text
# e.g., /workspace/asr/wenet/examples/csj/s0/data/xml/S11M1689.xml.simp
# ID = S11M1689_stime_etime
outtxtfn = os.path.join(outpath, 'text')
with open(outtxtfn, 'w') as bw:
for xmlfn in xmllist:
aid = xmlfn.split('/')[-1]
aid2 = aid.split('.')[0]
with open(xmlfn) as br:
for aline in br.readlines():
aline = aline.strip()
# stime \t etime \t text1 \t text2 \t text3 \t text4 \t text5
cols = aline.split('\t')
# TODO different between "< 7" and "< 4"? strange
# -> use "< 4", DO NOT use "< 7" !
if len(cols) < 4:
continue
stime = cols[0]
etime = cols[1]
atxt = cols[3].replace(' ', '')
afullid = '{}_{}_{}'.format(aid2, stime, etime)
aoutline = '{}\t{}\n'.format(afullid, atxt)
bw.write(aoutline)
def parse_xml_set(xmllist):
outset = set()
for xml in xmllist:
aid = xml.split('/')[-1]
aid2 = aid.split('.')[0]
outset.add(aid2)
return outset
def gen_wav_scp(xmllist, wavlist, outpath):
# xmlset = pure id set, alike 'S04F1228'
# can be from train, test1, test2, or test3
xmlset = parse_xml_set(xmllist)
outwavscpfn = os.path.join(outpath, 'wav.scp')
with open(outwavscpfn, 'w') as bw:
for wav in wavlist:
# wav is alike "/workspace/asr/wenet/examples/csj/s0/data
# /wav/S04F1228.wav_00458.875_00459.209.wav"
aid = wav.split('/')[-1]
cols = aid.split('_')
aid2 = cols[0].split('.')[0]
if aid2 not in xmlset:
continue
stime = cols[1]
etime = cols[2].replace('.wav', '')
afullid = '{}_{}_{}'.format(aid2, stime, etime)
wavabspath = os.path.abspath(wav)
aoutline = '{}\t{}\n'.format(afullid, wavabspath)
bw.write(aoutline)
def prep_text_wavscp(
xmlpath, wavpath, test1fn, test2fn, test3fn,
outtrainpath, out1path, out2path, out3path):
trainlist, t1list, t2list, t3list = split_train_tests_xml(
xmlpath,
test1fn,
test2fn,
test3fn)
wavlist = all_wavs(wavpath)
gen_text(trainlist, outtrainpath)
gen_text(t1list, out1path)
gen_text(t2list, out2path)
gen_text(t3list, out3path)
gen_wav_scp(trainlist, wavlist, outtrainpath)
gen_wav_scp(t1list, wavlist, out1path)
gen_wav_scp(t2list, wavlist, out2path)
gen_wav_scp(t3list, wavlist, out3path)
if __name__ == '__main__':
if len(sys.argv) < 10:
print(
"Usage: {}".format(sys.argv[0]) + "<xmlpath> " +
"<wavpath> <test1fn> <test2fn> <test3fn> " +
"<outtrainpath> <out1path> <out2path> <out3path>")
exit(1)
xmlpath = sys.argv[1]
wavpath = sys.argv[2]
test1fn = sys.argv[3]
test2fn = sys.argv[4]
test3fn = sys.argv[5]
outtrainpath = sys.argv[6]
out1path = sys.argv[7]
out2path = sys.argv[8]
out3path = sys.argv[9]
prep_text_wavscp(xmlpath, wavpath, test1fn,
test2fn, test3fn, outtrainpath,
out1path, out2path, out3path)
| 2.65625
| 3
|
Python/smallest-integer-divisible-by-k.py
|
RideGreg/LeetCode
| 1
|
12783423
|
# Time: O(k)
# Space: O(1)
# 1015
# Given a positive integer K, you need find the smallest positive integer N such that
# N is divisible by K, and N only contains the digit 1.
#
# Return the length of N. If there is no such N, return -1.
# 1 <= K <= 10^5
class Solution(object):
def smallestRepunitDivByK(self, K):
"""
:type K: int
:rtype: int
"""
# by observation, K % 2 = 0 or K % 5 = 0, it is impossible
if K % 2 == 0 or K % 5 == 0:
return -1
# Solution: for K not a multiple of 2 or 5, at least one from the K integers (1, 11, 111,
# ... 11..11 (K-length) will be divisible by K.
# let f(N) is a N-length integer only containing digit 1
# if there is no N in range [1..K] s.t. f(N) % K = 0
# then for the K integers f(1), f(2),... f(K),
# => there must be K remainders of f(N) % K in range [1..K-1] excluding 0
# => due to pigeonhole principle, there must be at least 2 same remainders
# => there must be some x, y in range [1..K] and x > y s.t. f(x) % K = f(y) % K
# => (f(x) - f(y)) % K = 0
# => (f(x-y) * 10^y) % K = 0
# => due to (x-y) in range [1..K] and f(x-y) % K != 0
# => 10^y % K = 0
# => K % 2 = 0 or K % 5 = 0
# => -><-
# it proves that there must be some N in range (1..K) s.t. f(N) % K = 0
# In fact, current remainder determines the next remainder, due to next_mod = (mod*10+1) % K
# so if a duplicate mod is found, it starts a loop. E.g.
# 1 % 6 = 1
# 11 % 6 = 5
# 111 % 6 = 3
# 1111 % 6 = 1
# 11111 % 6 = 5
# 111111 % 6 = 3
r = 0
for N in range(1, K+1):
r = (r*10+1) % K # module by K can reduce the integer, not affecting result
if not r:
return N
assert(False)
return -1 # never reach
| 3.5625
| 4
|
src/sima/riflex/generatortorquefault.py
|
SINTEF/simapy
| 0
|
12783424
|
# Generated with GeneratorTorqueFault
#
from enum import Enum
from enum import auto
class GeneratorTorqueFault(Enum):
""""""
NONE = auto()
LOSS = auto()
BACKUP = auto()
def label(self):
if self == GeneratorTorqueFault.NONE:
return "No generator torque fault"
if self == GeneratorTorqueFault.LOSS:
return "Total loss of generator torque"
if self == GeneratorTorqueFault.BACKUP:
return "Backup power - torque follows scaled torque control"
| 3.078125
| 3
|
pynnet/wrapper.py
|
walkerning/kaldi-fix
| 0
|
12783425
|
import os
def test_net(dir_net='exp/dnn4_pretrain-dbn_dnn/final.nnet'):
flag = os.system('./local/nnet/test_wer.sh %s >/dev/null 2>&1 '%dir_net)#
assert flag == 0
os.system('bash show_dnn test > res.log')
content = open('res.log').read()
res = float(content.split()[1])
return res
def finetune_net(dir_net = 'exp/dnn4_pretrain-dbn_dnn/nnet.init',
exp_dir='exp/dnn4_pretrain-dbn_dnn', iters=16, lr=0.002,
momentum=0, l2_penalty=0, halve_every_k=2):
flag = os.system('./finetune_dnn.sh --dir %s --nnet-init %s --iters %d --learning-rate %f --momentum %f --l2-penalty %f --halve-every-k %d'%(exp_dir, dir_net, iters, lr, momentum, l2_penalty, halve_every_k))
return flag
| 1.90625
| 2
|
training/twop/onevone.py
|
NoMoor/83Plus
| 0
|
12783426
|
<filename>training/twop/onevone.py
from math import pi
from twop.defending import Defending
onevone_exercises = [
Defending("Optimizer Testing", car_y=1000, car_spin=-pi / 2),
]
| 1.671875
| 2
|
Android/NDK/android-ndk-r20b-win/prebuilt/windows-x86_64/lib/python2.7/plat-win32/STDDEF.py
|
X018/CCTOOL
| 0
|
12783427
|
# Generated by h2py from /buildbot/src/googleplex-android/ndk-release-r20/prebuilts/gcc/linux-x86/host/x86_64-w64-mingw32-4.8/bin/../lib/gcc/x86_64-w64-mingw32/4.8.3/include/stddef.h
__WCHAR_TYPE__ = int
NULL = 0
| 0.933594
| 1
|
qap-lp/plots.py
|
j-kota/LP-QAP
| 0
|
12783428
|
<filename>qap-lp/plots.py
import numpy as np
import os
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from scipy.spatial import ConvexHull
import torch
import torch.nn as nn
from torch.autograd import Variable
from torch import optim
import torch.nn.functional as F
if __name__ == '__main__':
Noise = np.array([0.0, 0.005, 0.01, 0.015, 0.02, 0.025,0.030,0.040, 0.050])
SDP_ErdosRenyi_means = [1, 1, 1, 1, 1 ,0.98, 1, 0.96, 0.94]
SDP_ErdosRenyi_stds = [0, 0, 0, 0, 0, 0.03, 0, 0.06, 0.08]
LowRank_ErdosRenyi_means = [1, 0.86, 0.84, 0.76, 0.66, 0.66, 0.5,
0.35, 0.30]
LowRank_ErdosRenyi_stds = [0, 0.15, 0.15, 0.15, 0.25, 0.25, 0.3,
0.3, 0.3]
SDP_Regular_means = [0.08, 0.06, 0.04, 0.03, 0.02, 0.03, 0.05, 0.02,0.03]
SDP_Regular_stds = [0.08, 0.04,0.04,0.02,0.02,0.02,0.02,0.02,0.01]
LowRank_Regular_means = [1, 0.83, 0.65, 0.4, 0.42,0.3,0.34,0.14,0.14]
LowRank_Regular_stds = [0,0.25,0.25,0.25,0.25,0.25,0.25,0.15,0.20]
filename = 'results.npz'
Names = ['ErdosRenyi Graph Model', 'Random Regular Graph Model']
Models = ['ErdosRenyi', 'Regular']
Comparatives_ErdosRenyi = [SDP_ErdosRenyi_means, SDP_ErdosRenyi_stds,
LowRank_ErdosRenyi_means,
LowRank_ErdosRenyi_stds]
Comparatives_Regular = [SDP_Regular_means, SDP_Regular_stds,
LowRank_Regular_means,
LowRank_Regular_stds]
Comparatives = {'ErdosRenyi':Comparatives_ErdosRenyi,
'Regular':Comparatives_Regular}
main_path = '/home/anowak/tmp/'
for j, model in enumerate(Models):
Dire = ['/home/anowak/tmp/QAP0.000' + model + '_3/',
'/home/anowak/tmp/QAP0.005' + model + '_3/',
'/home/anowak/tmp/QAP0.010' + model + '_3/',
'/home/anowak/tmp/QAP0.015' + model + '_3/',
'/home/anowak/tmp/QAP0.020' + model + '_3/',
'/home/anowak/tmp/QAP0.025' + model + '_3/',
'/home/anowak/tmp/QAP0.030' + model + '_3/',
'/home/anowak/tmp/QAP0.040' + model + '_3/',
'/home/anowak/tmp/QAP0.050' + model + '_3/',]
ErrorBars = []
AccuracyMean = []
H = []
L = []
for k, dire in enumerate(Dire):
path = os.path.join(dire, filename)
npz = np.load(path)
accuracy = npz['accuracy_train']
# print(accuracy.shape)
accuracy_mean = []
for i in range(accuracy.shape[0]-100):
std = accuracy[i:i+100].std()
accuracy_mean.append(accuracy[i:i+100].mean())
ErrorBars.append(std)
AccuracyMean.append(accuracy_mean[-1])
# print(accuracy_mean)
plt.figure(0)
plt.clf()
plt.plot(accuracy_mean, c='r')
plt.savefig(dire + 'accuracy_mean.png')
ErrorBars = np.array(ErrorBars)
AccuracyMean = np.array(AccuracyMean)
fig = plt.figure(1)
plt.clf()
# plt.subplot(1, 2, j + 1)
# SDP
SDP = Comparatives[model][:2]
LowRank = Comparatives[model][2:]
print(len(SDP[0]), len(SDP[1]))
plt.errorbar(Noise, SDP[0], yerr=[SDP[1], SDP[1]],
fmt='-o', c='b', label='SDP')
plt.errorbar(Noise, LowRank[0], yerr=[LowRank[1], LowRank[1]],
fmt='-o', c='g', label='LowRankAlign(k=4)')
plt.errorbar(Noise, AccuracyMean, yerr=[ErrorBars, ErrorBars],
fmt='-o', c='r', label='GNN')
# h1, l1 = fig.get_legend_handles_labels()
plt.xlabel('Noise')
plt.ylabel('Recovery Rate')
plt.title(Names[j], fontsize=25)
# l = [lsdp, llrk, lres]
# names ['SDP', 'LowRank', 'GNN']
if j == 0:
plt.legend(loc='lower left', prop={'size':12})
plt.savefig(main_path + model + '.eps', format='eps')
# plt.tight_layout(pad=0.4, w_pad=2.0, h_pad=1.0)
# fig.legend(loc='upper center', bbox_to_anchor=(0.5, -0.05),
# fancybox=True, shadow=True, ncol=5)
# plt.savefig(main_path + 'QAP_results.png')
| 1.78125
| 2
|
examples/xrd/o2t_simulation_nimnsb_inp.py
|
LukeSkywalker92/heuslertools
| 0
|
12783429
|
from heuslertools.xrd import O2TSimulation
from heuslertools.xrd.materials import NiMnSb, InP
import xrayutilities as xu
import numpy as np
import matplotlib.pyplot as plt
##### LAYERSTACK #####
sub = xu.simpack.Layer(InP, np.inf)
lay1 = xu.simpack.Layer(NiMnSb, 400, relaxation=0.0)
layerstack = xu.simpack.PseudomorphicStack001('NiMnSb on InP', sub, lay1)
print(layerstack)
xrd = O2TSimulation(layerstack)
om, int = xrd.simulate_o2t(0, 0, 2, 0.4)
plt.figure()
plt.semilogy(om, int)
plt.show()
| 2.125
| 2
|
reminder.py
|
NikitaMikhailov/bot_herobot
| 0
|
12783430
|
<reponame>NikitaMikhailov/bot_herobot<filename>reminder.py
#!/usr/bin/env bash
# !/bin/bash
# !/bin/sh
# !/bin/sh -
from vk_api import VkUpload
from vk_api.bot_longpoll import VkBotLongPoll, VkBotEventType
import datetime, requests, vk_api, calendar
from vk_api.utils import get_random_id
# сделать проверку при переходе на завтра в последний день месяца !!done
# сделать проверку на правильную дату !!done
# исправить регистр в напоминании !!done
# добавить возможность указывать время с точкой и без минут
# добавить напоминания на год вперед !!done и на какой-то конкретный год
# добавить написание месяца словом
f=open('/root/bot_herobot_chat/token.txt','r')
token=f.read()
f.close()
session = requests.Session()
vk_session = vk_api.VkApi(token=token)
longpoll = VkBotLongPoll(vk_session, '178949259')
vk = vk_session.get_api()
upload = VkUpload(vk_session) # Для загрузки изображений
# file_zametki = open("/root/bot_herobot_ls/resurses/zametki.txt", "w", encoding="utf8")
# file_zametki.close()
format_command = "Формат команды:\n'напомни мне\n+\nутром/днем/вечером\nв hh:mm\nзавтра в hh:mm\nзавтра утром/днем/вечером\nday.month\nday.month в hh:mm\n+\nтекст напоминания.'\nПросмотреть список напоминаний: 'напомни мне все'"
uncorrect_comand = "Команда напоминания некорректна"
def sent_message(text, user_id):
vk.messages.send(
user_id=user_id,
random_id=get_random_id(),
message=text
)
def refactor_time_start(time_start):
tm_st = time_start.split(':')
if len(tm_st) < 2:
return time_start.split('.')
else:
return tm_st
def correct_date(date_start):
if int(date_start.split('.')[1]) < 1 or int(date_start.split('.')[1]) > 12 or int(
date_start.split('.')[0]) < 1 or int(date_start.split('.')[0]) > \
calendar.monthrange(datetime.datetime.now().year, int(date_start.split('.')[1]))[1]:
text = 'Дата задана некорректно'
sent_message(text, event.obj.peer_id)
return False
if int(date_start.split('.')[1]) <= datetime.datetime.now().month and int(
date_start.split('.')[0]) < datetime.datetime.now().day or int(
date_start.split('.')[1]) < datetime.datetime.now().month:
text = 'Указанная дата меньше текущей, напоминание сработает только на следующий год'
sent_message(text, event.obj.peer_id)
return True
return True
def correct_time(time_start, today_flag):
time_start = refactor_time_start(time_start)
if len(time_start)>2:
text = 'Время задано некорректно'
sent_message(text, event.obj.peer_id)
return False
if int(time_start[0]) < 0 or int(time_start[0]) > 23 or int(
time_start[1]) < 0 or int(time_start[1]) > 59:
text = 'Время задано некорректно'
sent_message(text, event.obj.peer_id)
return False
if today_flag is True and datetime.time(int(time_start[0]),
int(time_start[1])) <= datetime.time(
datetime.datetime.now().hour, datetime.datetime.now().minute):
text = 'Указанное время меньше текущего'
sent_message(text, event.obj.peer_id)
return False
return True
for event in longpoll.listen():
if event.type == VkBotEventType.MESSAGE_NEW and event.obj.text and event.from_user:
input_text = event.obj.text
event.obj.text = event.obj.text.lower()
if event.obj.text[:11:] == "напомни мне":
try:
if event.obj.text == "напомни мне":
text = "Я могу помочь тебе не забывать важную информацию"
sent_message(text, event.obj.peer_id)
sent_message(format_command, event.obj.peer_id)
elif event.obj.text == "напомни мне все" or event.obj.text == 'напомни мне всё':
f = open('/root/bot_herobot_ls/resurses/zametki.txt', encoding='utf8')
text = 'Список твоих напоминаний\n'
for line in f:
zametka = line.split('***#***')
if zametka != ['\n'] and zametka[5] == str(event.obj.peer_id):
text += zametka[1]+'.'+zametka[0]+' на '+zametka[2]+':'+zametka[3]+' с текстом "' + zametka[4].capitalize()+'"\n'
if text == 'Список твоих напоминаний\n':
text = 'У тебя нет напоминаний'
sent_message(text, event.obj.peer_id)
f.close()
elif event.obj.text[11:14:] == " в ":
time_start = event.obj.text.split(' ')[3]
if correct_time(time_start, True) is True:
date_start = [datetime.datetime.now().month, datetime.datetime.now().day]
time_start = refactor_time_start(time_start)
file_zametki = open("/root/bot_herobot_ls/resurses/zametki.txt", "a", encoding="utf8")
file_zametki.write(str(date_start[0]) + '***#***' + str(date_start[1]) + '***#***' + time_start[
0] + '***#***' + time_start[1] + '***#***' + input_text[event.obj.text.find(' ', 16,
-1) + 1::] + '***#***' + str(
event.obj.peer_id) + '***#***' + '\n')
file_zametki.close()
text = "Напоминание с текстом: '" + input_text[
event.obj.text.find(' ', 16, -1) + 1::] + "' в " + \
time_start[0] + ':' + time_start[1] + " на сегодня создано."
sent_message(text, event.obj.peer_id)
elif event.obj.text[11:11 + len(" завтра в "):] == " завтра в ":
time_start = event.obj.text.split(' ')[4]
if correct_time(time_start, False) is True:
date_start = [datetime.datetime.now().month, int(datetime.datetime.now().day) + 1]
if date_start[1] > calendar.monthrange(datetime.datetime.now().year, int(date_start[0]))[1]:
date_start[0] += 1
date_start[1] = 1
if date_start[0] > 12:
date_start[0] = 1
time_start = refactor_time_start(time_start)
file_zametki = open("/root/bot_herobot_ls/resurses/zametki.txt", "a", encoding="utf8")
file_zametki.write(str(date_start[0]) + '***#***' + str(date_start[1]) + '***#***' + time_start[
0] + '***#***' + time_start[1] + '***#***' + input_text[event.obj.text.find(' ',
11 + len(
" завтра в ") + 2,
-1) + 1::] + '***#***' + str(
event.obj.peer_id) + '***#***' + '\n')
file_zametki.close()
text = "Напоминание с текстом: '" + input_text[event.obj.text.find(' ',
11 + len(
" завтра в ") + 2,
-1) + 1::] + "' в " + \
time_start[0] + ':' + time_start[1] + " на завтра создано."
sent_message(text, event.obj.peer_id)
elif event.obj.text[11:11 + len(" завтра утром "):] == " завтра утром ":
time_start = "9:00"
if correct_time(time_start, False) is True:
date_start = [datetime.datetime.now().month, int(datetime.datetime.now().day) + 1]
if date_start[1] > calendar.monthrange(datetime.datetime.now().year, int(date_start[0]))[1]:
date_start[0] += 1
date_start[1] = 1
if date_start[0] > 12:
date_start[0] = 1
time_start = time_start.split(':')
file_zametki = open("/root/bot_herobot_ls/resurses/zametki.txt", "a", encoding="utf8")
file_zametki.write(str(date_start[0]) + '***#***' + str(date_start[1]) + '***#***' + time_start[
0] + '***#***' + time_start[1] + '***#***' + input_text[event.obj.text.find(' ',
11 + len(
" завтра утром ") - 2,
-1) + 1::] + '***#***' + str(
event.obj.peer_id) + '***#***' + '\n')
file_zametki.close()
text = "Напоминание с текстом: '" + input_text[event.obj.text.find(' ',
11 + len(
" завтра утром ") - 2,
-1) + 1::] + "' в " + \
time_start[0] + ':' + time_start[1] + " на завтра создано."
sent_message(text, event.obj.peer_id)
elif event.obj.text[11:11 + len(" завтра днем "):] == " завтра днем " or event.obj.text[11:11 + len(
" завтра днём "):] == " завтра днём ":
time_start = "13:00"
if correct_time(time_start, False) is True:
date_start = [datetime.datetime.now().month, int(datetime.datetime.now().day) + 1]
if date_start[1] > calendar.monthrange(datetime.datetime.now().year, int(date_start[0]))[1]:
date_start[0] += 1
date_start[1] = 1
if date_start[0] > 12:
date_start[0] = 1
time_start = time_start.split(':')
file_zametki = open("/root/bot_herobot_ls/resurses/zametki.txt", "a", encoding="utf8")
file_zametki.write(str(date_start[0]) + '***#***' + str(date_start[1]) + '***#***' + time_start[
0] + '***#***' + time_start[1] + '***#***' + input_text[event.obj.text.find(' ',
11 + len(
" завтра днем ") - 2,
-1) + 1::] + '***#***' + str(
event.obj.peer_id) + '***#***' + '\n')
file_zametki.close()
text = "Напоминание с текстом: '" + input_text[event.obj.text.find(' ',
11 + len(
" завтра днем ") - 2,
-1) + 1::] + "' в " + \
time_start[0] + ':' + time_start[1] + " на завтра создано."
sent_message(text, event.obj.peer_id)
elif event.obj.text[11:11 + len(" завтра вечером "):] == " завтра вечером ":
time_start = "18:00"
if correct_time(time_start, False) is True:
date_start = [datetime.datetime.now().month, int(datetime.datetime.now().day) + 1]
if date_start[1] > calendar.monthrange(datetime.datetime.now().year, int(date_start[0]))[1]:
date_start[0] += 1
date_start[1] = 1
if date_start[0] > 12:
date_start[0] = 1
time_start = time_start.split(':')
file_zametki = open("/root/bot_herobot_ls/resurses/zametki.txt", "a", encoding="utf8")
file_zametki.write(str(date_start[0]) + '***#***' + str(date_start[1]) + '***#***' + time_start[
0] + '***#***' + time_start[1] + '***#***' + input_text[event.obj.text.find(' ',
11 + len(
" завтра вечером ") - 2,
-1) + 1::] + '***#***' + str(
event.obj.peer_id) + '***#***' + '\n')
file_zametki.close()
text = "Напоминание с текстом: '" + input_text[event.obj.text.find(' ',
11 + len(
" завтра вечером ") - 2,
-1) + 1::] + "' в " + \
time_start[0] + ':' + time_start[1] + " на завтра создано."
sent_message(text, event.obj.peer_id)
elif event.obj.text[11:11 + len(" утром "):] == " утром ":
time_start = "9:00"
if correct_time(time_start, True) is True:
date_start = [datetime.datetime.now().month, datetime.datetime.now().day]
time_start = time_start.split(':')
file_zametki = open("/root/bot_herobot_ls/resurses/zametki.txt", "a", encoding="utf8")
file_zametki.write(str(date_start[0]) + '***#***' + str(date_start[1]) + '***#***' + time_start[
0] + '***#***' + time_start[1] + '***#***' + input_text[event.obj.text.find(' ',
11 + len(
" утром ") - 2,
-1) + 1::] + '***#***' + str(
event.obj.peer_id) + '***#***' + '\n')
file_zametki.close()
text = "Напоминание с текстом: '" + input_text[event.obj.text.find(' ',
11 + len(
" утром ") - 2,
-1) + 1::] + "' в " + \
time_start[0] + ':' + time_start[1] + " на сегодня создано."
sent_message(text, event.obj.peer_id)
elif event.obj.text[11:11 + len(" днем "):] == " днем " or event.obj.text[
11:11 + len(" днём "):] == " днём ":
time_start = "13:00"
if correct_time(time_start, True) is True:
date_start = [datetime.datetime.now().month, datetime.datetime.now().day]
time_start = time_start.split(':')
file_zametki = open("/root/bot_herobot_ls/resurses/zametki.txt", "a", encoding="utf8")
file_zametki.write(str(date_start[0]) + '***#***' + str(date_start[1]) + '***#***' + time_start[
0] + '***#***' + time_start[1] + '***#***' + input_text[event.obj.text.find(' ',
11 + len(
" днем ") - 2,
-1) + 1::] + '***#***' + str(
event.obj.peer_id) + '***#***' + '\n')
file_zametki.close()
text = "Напоминание с текстом: '" + input_text[event.obj.text.find(' ',
11 + len(
" днем ") - 2,
-1) + 1::] + "' в " + \
time_start[0] + ':' + time_start[1] + " на сегодня создано."
sent_message(text, event.obj.peer_id)
elif event.obj.text[11:11 + len(" вечером "):] == " вечером ":
time_start = "18:00"
if correct_time(time_start, True) is True:
date_start = [datetime.datetime.now().month, datetime.datetime.now().day]
time_start = time_start.split(':')
file_zametki = open("/root/bot_herobot_ls/resurses/zametki.txt", "a", encoding="utf8")
file_zametki.write(str(date_start[0]) + '***#***' + str(date_start[1]) + '***#***' + time_start[
0] + '***#***' + time_start[1] + '***#***' + input_text[event.obj.text.find(' ',
11 + len(
" вечером ") - 2,
-1) + 1::] + '***#***' + str(
event.obj.peer_id) + '***#***' + '\n')
file_zametki.close()
text = "Напоминание с текстом: '" + input_text[event.obj.text.find(' ',
11 + len(
" вечером ") - 2,
-1) + 1::] + "' в " + \
time_start[0] + ':' + time_start[1] + " на сегодня создано."
sent_message(text, event.obj.peer_id)
elif event.obj.text.split(' ')[2][0].isdigit():
date_start = event.obj.text.split(' ')[2]
if len(event.obj.text.split(' ')) < 5 or event.obj.text.split(' ')[4][0].isdigit() is False:
event.obj.text = event.obj.text.split(' ')
event.obj.text.insert(3, 'в')
event.obj.text.insert(4, '06:00')
input_text = input_text.split(' ')
input_text.insert(3, 'в')
input_text.insert(4, '06:00')
input_text = ' '.join(input_text)
event.obj.text = ' '.join(event.obj.text)
time_start = event.obj.text.split(' ')[4]
if correct_time(time_start, False) is True:
if correct_date(date_start) is True:
if int(date_start.split('.')[0]) == datetime.datetime.now().day and int(
date_start.split('.')[
1]) == datetime.datetime.now().month:
if correct_time(time_start, True) is False:
continue
else:
date_start = date_start.split('.')
time_start = refactor_time_start(time_start)
file_zametki = open("/root/bot_herobot_ls/resurses/zametki.txt", "a",
encoding="utf8")
file_zametki.write(
str(date_start[1]) + '***#***' + str(date_start[0]) + '***#***' + time_start[
0] + '***#***' + time_start[1] + '***#***' + input_text[
event.obj.text.find(' ',
event.obj.text.find(
' в ') + 3,
-1) + 1::] + '***#***' + str(
event.obj.peer_id) + '***#***' + '\n')
file_zametki.close()
text = "Напоминание с текстом: '" + input_text[event.obj.text.find(' ',
event.obj.text.find(
' в ') + 3,
-1) + 1::] + "' в " + \
time_start[0] + ':' + time_start[1] + " на " + str(
date_start[0]) + '.' + str(
date_start[1]) + " создано."
sent_message(text, event.obj.peer_id)
else:
date_start = date_start.split('.')
time_start = refactor_time_start(time_start)
file_zametki = open("/root/bot_herobot_ls/resurses/zametki.txt", "a", encoding="utf8")
file_zametki.write(
str(date_start[1]) + '***#***' + str(date_start[0]) + '***#***' + time_start[
0] + '***#***' + time_start[1] + '***#***' + input_text[event.obj.text.find(' ',
event.obj.text.find(
' в ') + 3,
-1) + 1::] + '***#***' + str(
event.obj.peer_id) + '***#***' + '\n')
file_zametki.close()
text = "Напоминание с текстом: '" + input_text[event.obj.text.find(' ',
event.obj.text.find(
' в ') + 3,
-1) + 1::] + "' в " + \
time_start[0] + ':' + time_start[1] + " на " + str(date_start[0]) + '.' + str(
date_start[1]) + " создано."
sent_message(text, event.obj.peer_id)
else:
sent_message(uncorrect_comand, event.obj.peer_id)
sent_message(format_command, event.obj.peer_id)
except:
sent_message(uncorrect_comand, event.obj.peer_id)
sent_message(format_command, event.obj.peer_id)
| 2.046875
| 2
|
dungeonlevelfactory.py
|
co/TheLastRogue
| 8
|
12783431
|
from dungeonfeature import new_stairs_up
import dungeonfeature
import terrain
import tile
from dungeonlevel import DungeonLevel
def get_empty_tile_matrix(width, height):
return [[tile.Tile()
for x in range(width)]
for y in range(height)]
def unknown_level_map(width, height, depth):
tile_matrix = get_empty_tile_matrix(width, height)
dungeon_level = DungeonLevel(tile_matrix, depth)
for x in range(width):
for y in range(height):
tile_matrix[y][x] = tile.unknown_tile
return dungeon_level
def dungeon_level_from_lines(lines):
terrain_matrix = terrain_matrix_from_lines(lines)
dungeon_level = DungeonLevel(terrain_matrix, 1)
set_terrain_from_lines(dungeon_level, lines)
return dungeon_level
def dungeon_level_from_file(file_name):
lines = read_file(file_name)
return dungeon_level_from_lines(lines)
def terrain_matrix_from_lines(lines):
width = len(lines[0])
height = len(lines)
terrain_matrix = get_empty_tile_matrix(width, height)
return terrain_matrix
def set_terrain_from_lines(dungeon_level, lines):
for x in range(dungeon_level.width):
for y in range(dungeon_level.height):
features = char_to_terrain_and_features(lines[y][x])
for f in features:
f.mover.replace_move((x, y), dungeon_level)
def char_to_terrain_and_features(c):
if c == '#':
return [terrain.Wall()]
elif c == '+':
return [terrain.Door()]
elif c == '~':
return [terrain.Water()]
elif c == 'g':
return [terrain.GlassWall()]
elif c == '_':
return [terrain.Chasm()]
elif c == '>':
return [terrain.Floor(), new_stairs_up()]
elif c == 'p':
return [terrain.Floor(), dungeonfeature.new_plant()]
else:
return [terrain.Floor()]
def read_file(file_name):
f = open(file_name, "r")
data = f.readlines()
data = [line.strip() for line in data]
f.close()
return data
| 3.015625
| 3
|
spam_filter/__main__.py
|
vjaos/SpamFilter
| 0
|
12783432
|
import numpy as np
import pandas as pd
from .nlp_utils.classifier import NaiveBayesClassifier
from .nlp_utils.tokenizer import NGramTokenizer
DATASET_PATH = 'spam_filter/data/spam.csv'
def preprocess_data():
dataset = pd.read_csv(DATASET_PATH, encoding='latin-1')
dataset.rename(columns={'v1': 'labels', 'v2': 'message'}, inplace=True)
dataset['label'] = dataset['labels'].map({'ham': 0, 'spam': 1})
dataset.drop(['labels'], axis=1, inplace=True)
train_indices, test_indices = [], []
for i in range(dataset.shape[0]):
if np.random.uniform(0, 1) < 0.75:
train_indices += [i]
else:
test_indices += [i]
train_dataset = dataset.loc[train_indices]
test_dataset = dataset.loc[test_indices]
train_dataset.reset_index(inplace=True)
train_dataset.drop(['index'], axis=1, inplace=True)
test_dataset.reset_index(inplace=True)
test_dataset.drop(['index'], axis=1, inplace=True)
return train_dataset, test_dataset
def metrics(labels, predictions):
true_pos, true_neg, false_pos, false_neg = 0, 0, 0, 0
for i in range(len(labels)):
true_pos += int(labels[i] == 1 and predictions[i] == 1)
true_neg += int(labels[i] == 0 and predictions[i] == 0)
false_pos += int(labels[i] == 0 and predictions[i] == 1)
false_neg += int(labels[i] == 1 and predictions[i] == 0)
precision = true_pos / (true_pos + false_pos)
recall = true_pos / (true_pos + false_neg)
f_score = 2 * precision * recall / (precision + recall)
accuracy = (true_pos + true_neg) / (true_pos + true_neg + false_pos + false_neg)
print("Precision: ", precision)
print("Recall: ", recall)
print("F-score: ", f_score)
print("Accuracy: ", accuracy)
if __name__ == '__main__':
train_dataset, test_dataset = preprocess_data()
classifier = NaiveBayesClassifier()
classifier.train(train_dataset)
prediction_list = classifier.predict(test_dataset['message'])
metrics(test_dataset['label'], prediction_list)
| 2.640625
| 3
|
Script/Tool/md2pdf.py
|
dtcxzyw/NCEEHelper
| 1
|
12783433
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
import pypandoc
import os
print("Pandoc", pypandoc.get_pandoc_version())
base = "../../Note/"
for r, ds, fs in os.walk(base):
for f in fs:
if f.endswith(".md"):
src = r+"/"+f
dst = src.replace(".md", ".pdf")
print(src, "->", dst)
print(pypandoc.convert_file(
src, "pdf", outputfile=dst, format="gfm", encoding="utf-8",
extra_args=["-V", "CJKmainfont=Microsoft YaHei", "--pdf-engine=xelatex"]))
| 2.828125
| 3
|
test/conftest.py
|
onefinestay/pylytics
| 5
|
12783434
|
from mysql.connector.errors import OperationalError
import pytest
from test.helpers import db_fixture, execute
@pytest.fixture(scope="session")
def warehouse():
return db_fixture("test_warehouse")
@pytest.fixture
def empty_warehouse(warehouse):
cursor = warehouse.cursor()
cursor.execute("SHOW TABLES")
tables = [_[0] for _ in cursor]
cursor.close()
execute(warehouse, "SET foreign_key_checks = 0")
for table in tables:
try:
execute(warehouse, "DROP TABLE {}".format(table))
except OperationalError:
execute(warehouse, "DROP VIEW {}".format(table))
execute(warehouse, "SET foreign_key_checks = 1")
warehouse.commit()
return warehouse
| 2.1875
| 2
|
clickbay/urls.py
|
kilonzijnr/Click-Bay
| 0
|
12783435
|
from .import views
from django.urls import path
from django.conf import settings
from django.conf.urls.static import static
from . views import *
# Application Views
urlpatterns = [
path('', views.user_login, name='login'),
path('logout/', views.user_logout, name='logout'),
path('signup/', views.user_signup, name='signup'),
path('profile', profile, name='profile'),
path('homepage', homepage, name='homepage'),
path('profile/update/', views.update_profile, name='update_profile'),
path('user/<int:id>/', views.user_profile, name='user_profile'),
path('like/<int:id>/', views.like_image, name='like_image'),
path('comment/add', views.save_comment, name='add_comment'),
path('search/', views.search_images, name='search_images'),
path('upload/add/', views.save_image, name='save.image'),
path('picture/<int:id>/', views.image_comments, name='single_image'),
path('follow/<int:pk>',views.FollowView,name="follow")
]
if settings.DEBUG:
urlpatterns+= static(settings.MEDIA_URL, document_root = settings.MEDIA_ROOT)
| 1.960938
| 2
|
python/examples/barcode_scanner.py
|
VNOpenAI/daisykit
| 13
|
12783436
|
import cv2
import json
from daisykit.utils import get_asset_file
from daisykit import BarcodeScannerFlow
config = {
"try_harder": True,
"try_rotate": True
}
barcode_scanner_flow = BarcodeScannerFlow(json.dumps(config))
# Open video stream from webcam
vid = cv2.VideoCapture(0)
while(True):
# Capture the video frame
ret, frame = vid.read()
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
result = barcode_scanner_flow.Process(frame, draw=True)
frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
# Display the resulting frame
cv2.imshow('frame', frame)
# The 'q' button is set as the
# quitting button you may use any
# desired button of your choice
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# After the loop release the cap object
vid.release()
# Destroy all the windows
cv2.destroyAllWindows()
| 3.140625
| 3
|
texaslan/users/forms.py
|
hsmeans/texaslan.org
| 2
|
12783437
|
from django import forms
from django.contrib.auth import get_user_model
from django.template.defaultfilters import filesizeformat
from django.utils.translation import ugettext_lazy as _
from django.contrib.auth.models import Group
from os import path
from .models import User
from texaslan.applications.models import Application
class UserSignupForm(forms.ModelForm):
class Meta:
model = User
fields = ['full_name', 'nick_name', 'graduation_date', 'concentration', 'gender']
widgets = {
'full_name': forms.TextInput(attrs={'placeholder': 'Full Name'}),
'nick_name': forms.TextInput(attrs={'placeholder': 'Nick Name'}),
'graduation_date': forms.TextInput(attrs={'placeholder': 'Graduation Date'}),
}
def signup(self, request, user):
user.username = self.cleaned_data['username']
user.full_name = self.cleaned_data['full_name']
user.nick_name = self.cleaned_data['nick_name']
user.graduation_date = self.cleaned_data['graduation_date']
user.save()
open_rush_group = Group.objects.get(name="Open Rushie")
open_rush_group.user_set.add(user)
open_rush_group.save()
(application, created) = Application.objects.get_or_create(applicant_user__pk=user.pk)
application.applicant_user = user
application.save()
class UserUpdateForm(forms.ModelForm):
class Meta:
model = User
fields = ['full_name', 'nick_name', 'graduation_date', 'concentration', 'gender']
| 2.21875
| 2
|
source/tui.py
|
krustowski/textovka-tui
| 0
|
12783438
|
<filename>source/tui.py<gh_stars>0
#!/usr/bin/env python3
# encoding: utf-8
__author__ = "krustowski"
__email__ = "<EMAIL>"
__license__ = "MIT"
__date__ = "Sunday, Apr 5, 2020"
__version__ = "1.0"
try:
import npyscreen
except:
print("Run './setup.py install' first...")
exit()
from source.api import Api
import time
api = Api()
class msgBox(npyscreen.BoxTitle):
_contained_widget = npyscreen.MultiLineEdit
class textovkaForm(npyscreen.Form):
def afterEditing(self):
# send an action
ping = int(time.time() * 1000)
a = api.sendAction(self.actions.get_selected_objects()[0])
pong = int(time.time() * 1000) - ping
# get inventary
inventary = a.player["inventary"] if a.player["inventary"] != None else []
# get room actions
actions = []#[""] bug
actions.extend(a.room["actions"] if a.room["actions"] != None else [])
actions.extend(["go-north", "go-south", "go-east", "go-west"])
# update the form
self.name = "textovka (api: " + a.api["version"] + ") (room: " + a.player["room"] + ") (ping: " + str(pong) + " ms)"
self.hp.value = a.player["hp"]
self.inventary.values = inventary
self.message.value = a.message
self.message.rely = 7 + (len(inventary) - 1 if len(inventary) > 0 else 0)
#self.actions.max_height = len(actions)
self.actions.values = actions
self.actions.value = [0]
self.actions.rely = 14 + (len(inventary) - 1 if len(inventary) > 0 else 0)# + a.message.count("\n")
# shall we continue?
if a.player["game_ended"]:
self.actions.editable = False
self.actions.hidden = True
if api.player["hp"] <= 0:
self.actions.editable = False
self.actions.hidden = True
"""
if a.player["game_ended"]:
self.parentApp.setNextForm(None)
else:
self.parentApp.setNextForm("MAIN")
"""
def create(self):
# get inventary
inventary = api.player["inventary"] if api.player["inventary"] != None else []
# get room actions
actions = []#[""] bug
actions.extend(api.room["actions"] if api.room["actions"] != None else [])
actions.extend(["go-north", "go-south", "go-east", "go-west"])
# form objects init
self.nickname = self.add(npyscreen.TitleText, name = "nickname", editable = False, value = api.player["nickname"])
self.hp = self.add(npyscreen.TitleSlider, out_of = 100.0, label = True, name = "hp", editable = False, value = api.player["hp"])
self.inventary = self.add(npyscreen.TitleSelectOne, scroll_exit = True, name = "inventary", values = inventary, rely = 5, editable = False)
#self.message = self.add(npyscreen.MultiLineEdit, name = "message", value = api.message, editable = False, relx = 18, rely = 8 + (len(inventary) - 1 if len(inventary) > 0 else 0))
self.message = self.add(msgBox, name = "message", value = api.message, editable = False, relx = 3, rely = 7 + (len(inventary) - 1 if len(inventary) > 0 else 0), max_height = 6, color = "CONTROL")
self.actions = self.add(npyscreen.TitleSelectOne, scroll_exit = True, max_height = 7, max_width = 0, name = "actions", values = actions, value = [0,], rely = 14 + (len(inventary) - 1 if len(inventary) > 0 else 0))# + api.message.count("\n"))
# hide the actions, if the game is over
if api.player["game_ended"]:
self.actions.editable = False
self.actions.hidden = True
# ...or the player is dead
if api.player["hp"] <= 0:
self.actions.editable = False
self.actions.hidden = True
class textovkaTUI(npyscreen.NPSAppManaged):
def onStart(self):
self.addForm("MAIN", textovkaForm, name = "textovka (api: " + api.api["version"] + ") (room: " + api.player["room"] + ")")
| 2.234375
| 2
|
one/utils/environment/idp.py
|
DNXLabs/one
| 5
|
12783439
|
import click
from PyInquirer import prompt
from one.utils.environment.common import get_credentials_file, get_config_file, get_idp_file, write_config
from one.utils.prompt import style
from one.docker.image import Image
from one.docker.container import Container
from one.__init__ import CLI_ROOT
from one.prompt.idp import PROVIDER_QUESTIONS, GSUITE_QUESTIONS, AZURE_QUESTIONS, OKTA_QUESTIONS
from one.prompt.auth import AWS_ACCESS_KEY_QUESTIONS
image = Image()
container = Container()
def configure_idp():
provider_answer = prompt(PROVIDER_QUESTIONS, style=style)
if not provider_answer:
raise SystemExit
if provider_answer['provider'] == 'Google G Suite SSO':
configure_gsuite()
elif provider_answer['provider'] == 'Microsoft Azure SSO':
configure_azure()
elif provider_answer['provider'] == 'Okta SSO':
configure_okta()
elif provider_answer['provider'] == 'AWS SSO':
configure_aws_sso()
elif provider_answer['provider'] == 'AWS IAM user':
configure_iam_user()
else:
raise SystemExit
def configure_gsuite():
answers = prompt(GSUITE_QUESTIONS, style=style)
if not bool(answers):
raise SystemExit
idp_file = get_idp_file()
idp_file['gsuite'] = {
'google_idp_id': answers['GOOGLE_IDP_ID'],
'google_sp_id': answers['GOOGLE_SP_ID']
}
write_config(idp_file, '/idp')
click.echo('\n')
def configure_azure():
answers = prompt(AZURE_QUESTIONS, style=style)
if not bool(answers):
raise SystemExit
idp_file = get_idp_file()
idp_file['azure'] = {
'AZURE_TENANT_ID': answers['AZURE_TENANT_ID'],
'AZURE_APP_ID_URI': answers['AZURE_APP_ID_URI']
}
write_config(idp_file, '/idp')
click.echo('\n')
def configure_okta():
answers = prompt(OKTA_QUESTIONS, style=style)
if not bool(answers):
raise SystemExit
idp_file = get_idp_file()
idp_file['okta'] = {
'okta_org': answers['OKTA_ORG'],
'okta_aws_app_url': answers['OKTA_AWS_APP_URL'],
'okta_aws_default_region': answers['OKTA_AWS_DEFAULT_REGION']
}
write_config(idp_file, '/idp')
click.echo('\n')
def configure_aws_sso():
auth_image = image.get_image('aws_v2')
work_volume = CLI_ROOT + ':/work'
env_sso = {}
env_sso['AWS_CONFIG_FILE'] = '/work/config'
container.create(
image=auth_image,
command='configure sso',
volumes=[work_volume],
environment=env_sso
)
click.echo('\n')
def configure_iam_user():
aws_auth_answer = prompt(AWS_ACCESS_KEY_QUESTIONS, style=style)
if not aws_auth_answer:
raise SystemExit
credentials_file = get_credentials_file()
credentials_file[aws_auth_answer['PROFILE']] = {
'AWS_ACCESS_KEY_ID': aws_auth_answer['AWS_ACCESS_KEY_ID'],
'AWS_SECRET_ACCESS_KEY': aws_auth_answer['AWS_SECRET_ACCESS_KEY']
}
config_file = get_config_file()
config_file['profile ' + aws_auth_answer['PROFILE']] = {
'REGION': aws_auth_answer['REGION']
}
write_config(credentials_file, '/credentials')
write_config(config_file, '/config')
click.echo('\n')
| 2.109375
| 2
|
dustbin/tests.py
|
chiro2001/cumcm-a
| 0
|
12783440
|
import matplotlib.pyplot as plt
import pandas as pd
from utils import *
# 主索节点的坐标和编号
data1 = pd.read_csv("data/附件1.csv", encoding='ANSI')
# print('主索节点的坐标和编号:\n', data1)
nodes_data = {}
for d in data1.itertuples():
nodes_data[d[1]] = {
# 'position': tuple(d[2:]),
'position_raw': np.array(d[2:]),
'position': np.array(d[2:]),
# 伸缩量,即所有需要求解的变量
'expand': 0
}
# 促动器下端点(地锚点)坐标、
# 基准态时上端点(顶端)的坐标,
# 以及促动器对应的主索节点编号
data2 = pd.read_csv("data/附件2.csv", encoding='ANSI')
# print('data2:\n', data2)
for d in data2.itertuples():
nodes_data[d[1]]['actuactor_head'] = np.array(d[2:5])
nodes_data[d[1]]['actuactor_base'] = np.array(d[5:8])
# print(nodes_data)
triangles_data = []
# 反射面板对应的主索节点编号
data3 = pd.read_csv("data/附件3.csv", encoding='ANSI')
# print('data3:\n', data3)
for d in data3.itertuples():
triangles_data.append(tuple(d[1:]))
# print(triangles_data)
# 绘制当前图像
def draw_points(points: np.ndarray = None, nodes_data_: dict = nodes_data):
ax = plt.axes(projection='3d')
plt.xlim(-300, 300)
plt.ylim(-300, 300)
ax.set_zlim(-400, -100)
if points is None:
points = to_points(nodes_data_=nodes_data_)
points2 = to_points(nodes_data_=nodes_data_, dict_key='actuactor_head')
points3 = to_points(nodes_data_=nodes_data_, dict_key='actuactor_base')
m = get_rotation_matrix(np.pi / 6, np.pi / 4, np.pi / 12)
# m = get_rotation_matrix(0, 0, np.pi / 12)
# m = get_rotation_matrix(a, b, c)
# points = points * m
# np.zeros((3, 3)) * np.zeros((100, 3)).T
# 矩阵乘法用 np.dot...
points = np.dot(points, m)
ax.scatter3D(points.T[0], points.T[1], points.T[2], c="g", marker='.')
ax.scatter3D(points2.T[0], points2.T[1], points2.T[2], c="c", marker='.')
ax.scatter3D(points3.T[0], points3.T[1], points3.T[2], c='m', marker='.')
plt.show()
# 计算在当前伸缩值状态下,主索节点的位置(position)
def update_expand(nodes_data_: dict = nodes_data):
for name in nodes_data_:
node = nodes_data_[name]
# 用促动器下端坐标和最初的主索节点位置确定方向向量,计算伸缩量
n = get_unit_vector(node['position_raw'], node['actuactor_base'])
# 更新 position
node['position'] = node['position_raw'] + n * node['expand']
# 转换数据到坐标
def to_points(nodes_data_: dict = nodes_data, dict_key: str = 'position') -> np.ndarray:
points = []
for name in nodes_data_:
node = nodes_data_[name]
points.append(node[dict_key])
return np.array(points)
def do_rotation(alpha: float, beta: float) -> np.ndarray:
m = get_rotation_matrix(0, alpha, beta)
for i in range(0, 8):
# c -> beta
# b -> alpha ?
draw_points(b=0, c=0, a=i * np.pi / 8)
# import random
# for name in nodes_data:
# nodes_data[name]['expand'] = 20 * random.random()
# update_expand()
# draw_points()
# print(plane_symmetry_point([1, 1, 1, 0], [2, 2, 2]))
| 2.984375
| 3
|
ads/routes.py
|
sinisaos/starlette-piccolo-rental
| 3
|
12783441
|
<gh_stars>1-10
from starlette.routing import Route, Router
from ads.endpoints import (ad, ad_create, ad_delete, ad_edit, ad_images,
ads_list, edit_upload, filter_search, image_delete,
image_edit, maps, review_create, review_delete,
review_edit, search, upload)
ads_routes = Router(
[
Route(
"/", endpoint=ads_list, methods=["GET", "POST"], name="ads_list"
),
Route(
"/{id:int}/{slug:str}",
endpoint=ad,
methods=["GET", "POST"],
name="ad",
),
Route(
"/create",
endpoint=ad_create,
methods=["GET", "POST"],
name="ad_create",
),
Route(
"/edit/{id:int}",
endpoint=ad_edit,
methods=["GET", "POST"],
name="ad_edit",
),
Route(
"/delete/{id:int}",
endpoint=ad_delete,
methods=["GET", "POST"],
name="ad_delete",
),
Route(
"/images",
endpoint=ad_images,
methods=["GET", "POST"],
name="ad_images",
),
Route("/upload", endpoint=upload, methods=["POST"], name="upload"),
Route(
"/image-edit/{id:int}",
endpoint=image_edit,
methods=["GET", "POST"],
name="image_edit",
),
Route(
"/edit-upload/{aid:int}",
endpoint=edit_upload,
methods=["POST"],
name="edit_upload",
),
Route(
"/image-delete/{id:int}",
endpoint=image_delete,
methods=["POST"],
name="image_delete",
),
Route(
"/review-create",
endpoint=review_create,
methods=["GET", "POST"],
name="review_create",
),
Route(
"/review-edit/{id:int}",
endpoint=review_edit,
methods=["GET", "POST"],
name="review_edit",
),
Route(
"/review-delete/{id:int}",
endpoint=review_delete,
methods=["GET", "POST"],
name="review_delete",
),
Route("/search", endpoint=search, methods=["GET"], name="search"),
Route(
"/filter-search",
endpoint=filter_search,
methods=["GET"],
name="filter_search",
),
Route("/map/{city:str}", endpoint=maps, methods=["GET"], name="maps"),
]
)
| 1.929688
| 2
|
__init__.py
|
deckvig/calibre-douban
| 0
|
12783442
|
#!/usr/bin/env python
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
__license__ = 'GPL v3'
__copyright__ = '2011, Kov<NAME> <<EMAIL>>; 2011, <NAME> <<EMAIL>>'
__docformat__ = 'restructuredtext en'
import time
try:
from queue import Empty, Queue
except ImportError:
from Queue import Empty, Queue
from calibre.ebooks.metadata import check_isbn
from calibre.ebooks.metadata.sources.base import Option, Source
from calibre.ebooks.metadata.book.base import Metadata
from calibre import as_unicode
NAMESPACES = {
'openSearch': 'http://a9.com/-/spec/opensearchrss/1.0/',
'atom': 'http://www.w3.org/2005/Atom',
'db': 'https://www.douban.com/xmlns/',
'gd': 'http://schemas.google.com/g/2005'
}
def get_details(browser, url, timeout): # {{{
try:
if Douban.DOUBAN_API_KEY:
url = url + "?apikey=" + Douban.DOUBAN_API_KEY
raw = browser.open_novisit(url, timeout=timeout).read()
except Exception as e:
gc = getattr(e, 'getcode', lambda: -1)
if gc() != 403:
raise
# Douban is throttling us, wait a little
time.sleep(2)
raw = browser.open_novisit(url, timeout=timeout).read()
return raw
# }}}
class Douban(Source):
name = 'Douban Books'
author = '<NAME>, xcffl, jnozsc, deckvig'
version = (3, 2, 0)
minimum_calibre_version = (5, 0, 0)
description = _(
'Downloads metadata and covers from Douban.com. '
'Useful only for Chinese language books.'
)
capabilities = frozenset(['identify', 'cover'])
touched_fields = frozenset([
'title', 'authors', 'tags', 'pubdate', 'comments', 'publisher',
'identifier:isbn', 'rating', 'identifier:douban'
]) # language currently disabled
supports_gzip_transfer_encoding = True
cached_cover_url_is_reliable = True
DOUBAN_API_KEY = '054022eaeae0b00e0fc068c0c0a2102a'
DOUBAN_API_URL = 'https://api.douban.com/v2/book/search'
DOUBAN_BOOK_URL = 'https://book.douban.com/subject/%s/'
options = (
Option(
'include_subtitle_in_title', 'bool', True,
_('Include subtitle in book title:'),
_('Whether to append subtitle in the book title.')
),
Option(
'douban_api_domain', 'string', "https://api.douban.com",
_('simple boot douban api address:'),
_('simple boot douban api server address.')
),
)
def save_settings(self, *args, **kwargs):
Source.save_settings(self, *args, **kwargs)
@property
def douban_api_domain(self):
return self.prefs['douban_api_domain']
def to_metadata(self, browser, log, entry_, timeout): # {{{
from calibre.utils.date import parse_date, utcnow
douban_id = entry_.get('id')
title = entry_.get('title')
description = entry_.get('summary')
# subtitle = entry_.get('subtitle') # TODO: std metada doesn't have this field
publisher = entry_.get('publisher')
isbn = entry_.get('isbn13') # ISBN11 is obsolute, use ISBN13
pubdate = entry_.get('pubdate')
authors = entry_.get('author')
book_tags = entry_.get('tags')
rating = entry_.get('rating')
cover_url = entry_.get('image')
series = entry_.get('series')
if not authors:
authors = [_('Unknown')]
if not douban_id or not title:
# Silently discard this entry
return None
mi = Metadata(title, authors)
mi.identifiers = {'douban': douban_id}
mi.publisher = publisher
mi.comments = description
# mi.subtitle = subtitle
# ISBN
isbns = []
if isinstance(isbn, (type(''), bytes)):
if check_isbn(isbn):
isbns.append(isbn)
else:
for x in isbn:
if check_isbn(x):
isbns.append(x)
if isbns:
mi.isbn = sorted(isbns, key=len)[-1]
mi.all_isbns = isbns
# Tags
mi.tags = [tag['name'] for tag in book_tags]
# pubdate
if pubdate:
try:
default = utcnow().replace(day=15)
mi.pubdate = parse_date(pubdate, assume_utc=True, default=default)
except:
log.error('Failed to parse pubdate %r' % pubdate)
# Ratings
if rating:
try:
mi.rating = float(rating['average']) / 2.0
except:
log.exception('Failed to parse rating')
mi.rating = 0
# Cover
mi.has_douban_cover = None
u = cover_url
if u:
# If URL contains "book-default", the book doesn't have a cover
if u.find('book-default') == -1:
mi.has_douban_cover = u
# Series
if series:
mi.series = series['title']
return mi
# }}}
def get_book_url(self, identifiers): # {{{
db = identifiers.get('douban', None)
if db is not None:
return ('douban', db, self.DOUBAN_BOOK_URL % db)
# }}}
def create_query(self, log, title=None, authors=None, identifiers={}): # {{{
try:
from urllib.parse import urlencode
except ImportError:
from urllib import urlencode
SEARCH_URL = self.douban_api_domain + '/v2/book/search?count=10&'
ISBN_URL = self.douban_api_domain + '/v2/book/isbn/'
SUBJECT_URL = self.douban_api_domain + '/v2/book/'
q = ''
t = None
isbn = check_isbn(identifiers.get('isbn', None))
subject = identifiers.get('douban', None)
if isbn is not None:
q = isbn
t = 'isbn'
elif subject is not None:
q = subject
t = 'subject'
elif title or authors:
def build_term(prefix, parts):
return ' '.join(x for x in parts)
title_tokens = list(self.get_title_tokens(title))
if title_tokens:
q += build_term('title', title_tokens)
author_tokens = list(
self.get_author_tokens(authors, only_first_author=True)
)
if author_tokens:
q += ((' ' if q != '' else '') + build_term('author', author_tokens))
t = 'search'
q = q.strip()
if not q:
return None
url = None
if t == "isbn":
url = ISBN_URL + q
elif t == 'subject':
url = SUBJECT_URL + q
else:
url = SEARCH_URL + urlencode({
'q': q,
})
if self.DOUBAN_API_KEY and self.DOUBAN_API_KEY != '':
if t == "isbn" or t == "subject":
url = url + "?apikey=" + self.DOUBAN_API_KEY
else:
url = url + "&apikey=" + self.DOUBAN_API_KEY
return url
# }}}
def download_cover(
self,
log,
result_queue,
abort, # {{{
title=None,
authors=None,
identifiers={},
timeout=30,
get_best_cover=False
):
cached_url = self.get_cached_cover_url(identifiers)
if cached_url is None:
log.info('No cached cover found, running identify')
rq = Queue()
self.identify(
log,
rq,
abort,
title=title,
authors=authors,
identifiers=identifiers
)
if abort.is_set():
return
results = []
while True:
try:
results.append(rq.get_nowait())
except Empty:
break
results.sort(
key=self.identify_results_keygen(
title=title, authors=authors, identifiers=identifiers
)
)
for mi in results:
cached_url = self.get_cached_cover_url(mi.identifiers)
if cached_url is not None:
break
if cached_url is None:
log.info('No cover found')
return
if abort.is_set():
return
br = self.browser
log('Downloading cover from:', cached_url)
try:
cdata = br.open_novisit(cached_url, timeout=timeout).read()
if cdata:
result_queue.put((self, cdata))
except:
log.exception('Failed to download cover from:', cached_url)
# }}}
def get_cached_cover_url(self, identifiers): # {{{
url = None
db = identifiers.get('douban', None)
if db is None:
isbn = identifiers.get('isbn', None)
if isbn is not None:
db = self.cached_isbn_to_identifier(isbn)
if db is not None:
url = self.cached_identifier_to_cover_url(db)
return url
# }}}
def get_all_details(
self,
br,
log,
entries,
abort, # {{{
result_queue,
timeout
):
for relevance, i in enumerate(entries):
try:
ans = self.to_metadata(br, log, i, timeout)
if isinstance(ans, Metadata):
ans.source_relevance = relevance
db = ans.identifiers['douban']
for isbn in getattr(ans, 'all_isbns', []):
self.cache_isbn_to_identifier(isbn, db)
if ans.has_douban_cover:
self.cache_identifier_to_cover_url(db, ans.has_douban_cover)
self.clean_downloaded_metadata(ans)
result_queue.put(ans)
except:
log.exception('Failed to get metadata for identify entry:', i)
if abort.is_set():
break
# }}}
def identify(
self,
log,
result_queue,
abort,
title=None,
authors=None, # {{{
identifiers={},
timeout=30
):
import json
query = self.create_query(
log, title=title, authors=authors, identifiers=identifiers
)
if not query:
log.error('Insufficient metadata to construct query')
return
br = self.browser
try:
raw = br.open_novisit(query, timeout=timeout).read()
except Exception as e:
log.exception('Failed to make identify query: %r' % query)
return as_unicode(e)
try:
j = json.loads(raw)
except Exception as e:
log.exception('Failed to parse identify results')
return as_unicode(e)
if 'books' in j:
entries = j['books']
else:
entries = []
entries.append(j)
if not entries and identifiers and title and authors and \
not abort.is_set():
return self.identify(
log,
result_queue,
abort,
title=title,
authors=authors,
timeout=timeout
)
# There is no point running these queries in threads as douban
# throttles requests returning 403 Forbidden errors
self.get_all_details(br, log, entries, abort, result_queue, timeout)
return None
# }}}
if __name__ == '__main__': # tests {{{
# To run these test use: calibre-debug -e src/calibre/ebooks/metadata/sources/douban.py
from calibre.ebooks.metadata.sources.test import (
test_identify_plugin, title_test, authors_test
)
test_identify_plugin(
Douban.name, [
({
'identifiers': {
'isbn': '9787536692930'
},
'title': '三体',
'authors': ['刘慈欣']
}, [title_test('三体', exact=True),
authors_test(['刘慈欣'])]),
({
'title': 'Linux内核修炼之道',
'authors': ['任桥伟']
}, [title_test('Linux内核修炼之道', exact=False)]),
]
)
# }}}
| 2.15625
| 2
|
squeezenet_test.py
|
smbadiwe/SqueezeNetWithCIFAR10
| 0
|
12783443
|
<filename>squeezenet_test.py
import matplotlib.pyplot as plot
import pickle
from funcy import last, partial
from operator import getitem
def plot_history(history):
legends = ["train loss", "test loss", "train accuracy", "test accuracy"]
i = 0
def plot_values_collection(title, values_collection):
plot.clf()
plot.title(title)
for values in values_collection:
plot.plot(values, label=legends.pop(0))
plot.legend()
plot.ylabel(title.split(' ')[0])
plot.xlabel("Epochs")
plot.show()
plot_values_collection('Loss', map(partial(getitem, history), ('loss', 'val_loss')))
plot_values_collection('Accuracy', map(partial(getitem, history), ('acc', 'val_acc')))
def main():
with open('./results/history.pickle', 'rb') as f:
history = pickle.load(f)
print(last(history['acc']))
print(last(history['val_acc']))
print(last(history['loss']))
print(last(history['val_loss']))
plot_history(history)
if __name__ == '__main__':
main()
| 2.96875
| 3
|
exercise 6.5.py
|
tuyanyang/python_exercise
| 0
|
12783444
|
str = 'X-DSPAM-Confidence: 0.8475'
colpos = str.find(':')
number = float(str[colpos+1:])
print(number)
| 2.90625
| 3
|
Build_Web_With_Flask/Building web applications with Flask_Code/chapter03/chapter03/ex01.py
|
abacuspix/NFV_project
| 0
|
12783445
|
# coding:utf-8
with open('parent.html', 'w') as file:
file.write("""
{% block template %}parent.html{% endblock %}
===========
I am a powerful psychic and will tell you your past
{#- "past" is the block identifier #}
{% block past %}
You had pimples by the age of 12.
{%- endblock %}
Tremble before my power!!!
""".strip())
with open('child.html', 'w') as file:
file.write("""
{% extends "parent.html" %}
{# overwriting the block called template from parent.html #}
{% block template %}child.html{% endblock %}
{#- overwriting the block called past from parent.html #}
{% block past %}
You've bought a ebook recently.
{%- endblock %}
""".strip())
from jinja2 import Environment, FileSystemLoader
env = Environment()
env.loader = FileSystemLoader('.')
tmpl = env.get_template('parent.html')
print tmpl.render()
print ""
tmpl = env.get_template('child.html')
print tmpl.render()
| 3.203125
| 3
|
.tox/scenario/lib/python2.7/site-packages/barbicanclient/tests/test_cas.py
|
bdrich/neutron-lbaas
| 0
|
12783446
|
# Copyright (c) 2013 Rackspace, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from oslo_utils import timeutils
from barbicanclient.tests import test_client
from barbicanclient import cas
class CAData(object):
def __init__(self):
self.name = u'Test CA'
self.description = u'Test CA description'
self.plugin_name = u'Test CA Plugin'
self.plugin_ca_id = 'plugin_uuid'
now = timeutils.utcnow()
self.expiration = str(now)
self.created = str(now)
self.meta = [
{'name': self.name},
{'description': self.description}
]
self.ca_dict = {'meta': self.meta,
'status': u'ACTIVE',
'plugin_name': self.plugin_name,
'plugin_ca_id': self.plugin_ca_id,
'created': self.created}
def get_dict(self, ca_ref=None):
ca = self.ca_dict
if ca_ref:
ca['ca_ref'] = ca_ref
return ca
class WhenTestingCAs(test_client.BaseEntityResource):
def setUp(self):
self._setUp('cas')
self.ca = CAData()
self.manager = self.client.cas
def test_should_get_lazy(self):
data = self.ca.get_dict(self.entity_href)
m = self.responses.get(self.entity_href, json=data)
ca = self.manager.get(ca_ref=self.entity_href)
self.assertIsInstance(ca, cas.CA)
self.assertEqual(self.entity_href, ca._ca_ref)
# Verify GET wasn't called yet
self.assertFalse(m.called)
# Check an attribute to trigger lazy-load
self.assertEqual(self.ca.plugin_ca_id, ca.plugin_ca_id)
# Verify the correct URL was used to make the GET call
self.assertEqual(self.entity_href, m.last_request.url)
def test_should_get_lazy_in_meta(self):
data = self.ca.get_dict(self.entity_href)
m = self.responses.get(self.entity_href, json=data)
ca = self.manager.get(ca_ref=self.entity_href)
self.assertIsInstance(ca, cas.CA)
self.assertEqual(self.entity_href, ca._ca_ref)
# Verify GET wasn't called yet
self.assertFalse(m.called)
# Check an attribute in meta to trigger lazy-load
self.assertEqual(self.ca.name, ca.name)
# Verify the correct URL was used to make the GET call
self.assertEqual(self.entity_href, m.last_request.url)
def test_should_get_list(self):
ca_resp = self.entity_href
data = {"cas": [ca_resp for v in range(3)]}
m = self.responses.get(self.entity_base, json=data)
ca_list = self.manager.list(limit=10, offset=5)
self.assertTrue(len(ca_list) == 3)
self.assertIsInstance(ca_list[0], cas.CA)
self.assertEqual(self.entity_href, ca_list[0].ca_ref)
# Verify the correct URL was used to make the call.
self.assertEqual(self.entity_base,
m.last_request.url.split('?')[0])
# Verify that correct information was sent in the call.
self.assertEqual(['10'], m.last_request.qs['limit'])
self.assertEqual(['5'], m.last_request.qs['offset'])
def test_should_fail_get_invalid_ca(self):
self.assertRaises(ValueError, self.manager.get,
**{'ca_ref': '12345'})
| 1.796875
| 2
|
config/arch/arm/devices_cortex_m4/mbedos_config.py
|
Microchip-MPLAB-Harmony/mbed_os_rtos
| 0
|
12783447
|
# coding: utf-8
"""*****************************************************************************
* Copyright (C) 2020 Microchip Technology Inc. and its subsidiaries.
*
* Subject to your compliance with these terms, you may use Microchip software
* and any derivatives exclusively with Microchip products. It is your
* responsibility to comply with third party license terms applicable to your
* use of third party software (including open source software) that may
* accompany Microchip software.
*
* THIS SOFTWARE IS SUPPLIED BY MICROCHIP "AS IS". NO WARRANTIES, WHETHER
* EXPRESS, IMPLIED OR STATUTORY, APPLY TO THIS SOFTWARE, INCLUDING ANY IMPLIED
* WARRANTIES OF NON-INFRINGEMENT, MERCHANTABILITY, AND FITNESS FOR A
* PARTICULAR PURPOSE.
*
* IN NO EVENT WILL MICROCHIP BE LIABLE FOR ANY INDIRECT, SPECIAL, PUNITIVE,
* INCIDENTAL OR CONSEQUENTIAL LOSS, DAMAGE, COST OR EXPENSE OF ANY KIND
* WHATSOEVER RELATED TO THE SOFTWARE, HOWEVER CAUSED, EVEN IF MICROCHIP HAS
* BEEN ADVISED OF THE POSSIBILITY OR THE DAMAGES ARE FORESEEABLE. TO THE
* FULLEST EXTENT ALLOWED BY LAW, MICROCHIP'S TOTAL LIABILITY ON ALL CLAIMS IN
* ANY WAY RELATED TO THIS SOFTWARE WILL NOT EXCEED THE AMOUNT OF FEES, IF ANY,
* THAT YOU HAVE PAID DIRECTLY TO MICROCHIP FOR THIS SOFTWARE.
*****************************************************************************"""
############################################################################
############### Cortex-M4 Architecture specific configuration ##############
############################################################################
#CPU Clock Frequency
mbedosSym_CpuClockHz.setDependencies(mbedosCpuClockHz, ["core.CPU_CLOCK_FREQUENCY"])
mbedosSym_CpuClockHz.setDefaultValue(int(Database.getSymbolValue("core", "CPU_CLOCK_FREQUENCY")))
#Set HEAP size to 20480
dummyDict = {}
dummyDict = Database.sendMessage("core", "HEAP_SIZE", {"heap_size" : 20480})
#Setup SysTick, PendSV and SVCall Interrupt Priorities.
#SysTick must be highest priority
SysTickInterruptHandlerIndex = Interrupt.getInterruptIndex("SysTick")
SysTickInterruptPri = "NVIC_"+ str(SysTickInterruptHandlerIndex) +"_0_PRIORITY"
SysTickInterruptPriLock = "NVIC_"+ str(SysTickInterruptHandlerIndex) +"_0_PRIORITY_LOCK"
if (Database.getSymbolValue("core", SysTickInterruptPri) != "1"):
Database.clearSymbolValue("core", SysTickInterruptPri)
Database.setSymbolValue("core", SysTickInterruptPri, "1")
if (Database.getSymbolValue("core", SysTickInterruptPriLock) == False):
Database.clearSymbolValue("core", SysTickInterruptPriLock)
Database.setSymbolValue("core", SysTickInterruptPriLock, True)
#SVCall must be lowest priority
SVCallInterruptHandlerIndex = Interrupt.getInterruptIndex("SVCall")
SVCallInterruptPri = "NVIC_"+ str(SVCallInterruptHandlerIndex) +"_0_PRIORITY"
SVCallInterruptPriLock = "NVIC_"+ str(SVCallInterruptHandlerIndex) +"_0_PRIORITY_LOCK"
if (Database.getSymbolValue("core", SVCallInterruptPri) != "7"):
Database.clearSymbolValue("core", SVCallInterruptPri)
Database.setSymbolValue("core", SVCallInterruptPri, "7")
if (Database.getSymbolValue("core", SVCallInterruptPriLock) == False):
Database.clearSymbolValue("core", SVCallInterruptPriLock)
Database.setSymbolValue("core", SVCallInterruptPriLock, True)
#PndSV must be lowest priority
PendSVInterruptHandlerIndex = Interrupt.getInterruptIndex("PendSV")
PendSVInterruptPri = "NVIC_"+ str(PendSVInterruptHandlerIndex) +"_0_PRIORITY"
PendSVInterruptPriLock = "NVIC_"+ str(PendSVInterruptHandlerIndex) +"_0_PRIORITY_LOCK"
if (Database.getSymbolValue("core", PendSVInterruptPri) != "7"):
Database.clearSymbolValue("core", PendSVInterruptPri)
Database.setSymbolValue("core", PendSVInterruptPri, "7")
if (Database.getSymbolValue("core", PendSVInterruptPriLock) == False):
Database.clearSymbolValue("core", PendSVInterruptPriLock)
Database.setSymbolValue("core", PendSVInterruptPriLock, True)
# Update C32 Include directories path
mbedosxc32PreprocessroMacroSym = thirdPartyMbedOS.createSettingSymbol("MBED_OS_XC32_PREPROC_MARCOS", None)
mbedosxc32PreprocessroMacroSym.setCategory("C32")
mbedosxc32PreprocessroMacroSym.setKey("preprocessor-macros")
mbedosxc32PreprocessroMacroSym.setValue("__CORTEX_M4;TARGET_M4;TARGET_CORTEX_M;TARGET_LIKE_MBED;__MBED__=1")
mbedosxc32cppPreprocessroMacroSym = thirdPartyMbedOS.createSettingSymbol("MBED_OS_XC32CPP_PREPROC_MARCOS", None)
mbedosxc32cppPreprocessroMacroSym.setCategory("C32CPP")
mbedosxc32cppPreprocessroMacroSym.setKey("preprocessor-macros")
mbedosxc32cppPreprocessroMacroSym.setValue("__CORTEX_M4;TARGET_M4;TARGET_CORTEX_M;TARGET_LIKE_MBED;__MBED__=1")
mbedosxc32asPreprocessroMacroSym = thirdPartyMbedOS.createSettingSymbol("MBED_OS_XC32AS_PREPROC_MARCOS", None)
mbedosxc32asPreprocessroMacroSym.setCategory("C32-AS")
mbedosxc32asPreprocessroMacroSym.setKey("preprocessor-macros")
mbedosxc32asPreprocessroMacroSym.setValue("__CORTEX_M4;TARGET_M4;TARGET_CORTEX_M;TARGET_LIKE_MBED;__MBED__=1")
mbedosOsXc32IncludeSettingSym = thirdPartyMbedOS.createSettingSymbol("MBED_OS_XC32_SETTING_INCLUDE_HEADER", None)
mbedosOsXc32IncludeSettingSym.setCategory("C32")
mbedosOsXc32IncludeSettingSym.setKey("appendMe")
mbedosOsXc32IncludeSettingSym.setValue("-include ../src/config/" + configName + "/mbedos_config/mbed_config.h")
mbedosOsXc32IncludeSettingSym.setAppend(True, " ")
mbedosOsXc32cppIncludeSettingSym = thirdPartyMbedOS.createSettingSymbol("MBED_OS_XC32CPP_SETTING_INCLUDE_HEADER", None)
mbedosOsXc32cppIncludeSettingSym.setCategory("C32CPP")
mbedosOsXc32cppIncludeSettingSym.setKey("appendMe")
mbedosOsXc32cppIncludeSettingSym.setValue("-include ../src/config/" + configName + "/mbedos_config/mbed_config.h")
mbedosOsXc32cppIncludeSettingSym.setAppend(True, " ")
mbedosOsXc32asIncludeSettingSym = thirdPartyMbedOS.createSettingSymbol("MBED_OS_XC32AS_SETTING_INCLUDE_HEADER", None)
mbedosOsXc32asIncludeSettingSym.setCategory("C32-AS")
mbedosOsXc32asIncludeSettingSym.setKey("appendMe")
mbedosOsXc32asIncludeSettingSym.setValue("-include ../src/config/" + configName + "/mbedos_config/mbed_config.h")
mbedosOsXc32asIncludeSettingSym.setAppend(True, " ")
mbedosIncludePath = "../src/config/" + configName + "/mbedos_config;../src/third_party/rtos/mbed-os;\
../src/third_party/rtos/mbed-os/rtos/source;../src/third_party/rtos/mbed-os/rtos/include;../src/third_party/rtos/mbed-os/rtos/include/rtos;../src/third_party/rtos/mbed-os/rtos/include/rtos/internal;\
../src/third_party/rtos/mbed-os/events/include;../src/third_party/rtos/mbed-os/events/include/events;../src/third_party/rtos/mbed-os/events/include/events/internal;\
../src/third_party/rtos/mbed-os/platform;../src/third_party/rtos/mbed-os/platform/include;../src/third_party/rtos/mbed-os/platform/include/platform;../src/third_party/rtos/mbed-os/platform/include/platform/internal;\
../src/third_party/rtos/mbed-os/platform/cxxsupport;../src/third_party/rtos/mbed-os/platform/source;\
../src/third_party/rtos/mbed-os/cmsis/CMSIS_5/CMSIS/RTOS2/Include;\
../src/third_party/rtos/mbed-os/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Config;../src/third_party/rtos/mbed-os/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Include;\
../src/third_party/rtos/mbed-os/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Include1;../src/third_party/rtos/mbed-os/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source;\
../src/third_party/rtos/mbed-os/cmsis/device;../src/third_party/rtos/mbed-os/cmsis/device/RTE/include;../src/third_party/rtos/mbed-os/cmsis/device/rtos/include;\
../src/third_party/rtos/mbed-os/targets;../src/third_party/rtos/mbed-os/drivers/include/drivers;../src/third_party/rtos/mbed-os/drivers/include;\
../src/third_party/rtos/mbed-os/hal/include/hal;../src/third_party/rtos/mbed-os/hal/include;"
AddMbedOSSingleFile(thirdPartyMbedOS, "templates/mbed_os/TARGET_RTOS_M4_M7/", "irq_cm4f.S", "../../third_party/rtos/mbed-os/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/TOOLCHAIN_GCC/TARGET_RTOS_M4_M7", "mbed-os/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/TOOLCHAIN_GCC/TARGET_RTOS_M4_M7")
mbedosOsXc32SettingSym = thirdPartyMbedOS.createSettingSymbol("MBED_OS_XC32_INCLUDE_DIRS", None)
mbedosOsXc32SettingSym.setCategory("C32")
mbedosOsXc32SettingSym.setKey("extra-include-directories")
mbedosOsXc32SettingSym.setValue(mbedosIncludePath)
mbedosOsXc32SettingSym.setAppend(True, ";")
mbedosOsXc32cppSettingSym = thirdPartyMbedOS.createSettingSymbol("MBED_OS_XC32CPP_INCLUDE_DIRS", None)
mbedosOsXc32cppSettingSym.setCategory("C32CPP")
mbedosOsXc32cppSettingSym.setKey("extra-include-directories")
mbedosOsXc32cppSettingSym.setValue(mbedosIncludePath)
mbedosOsXc32cppSettingSym.setAppend(True, ";")
mbedosIncDirForAsm = thirdPartyMbedOS.createSettingSymbol("MBED_OS_XC32_AS_INCLUDE_DIRS", None)
mbedosIncDirForAsm.setCategory("C32-AS")
mbedosIncDirForAsm.setKey("extra-include-directories-for-assembler")
mbedosIncDirForAsm.setValue(mbedosIncludePath)
mbedosIncDirForAsm.setAppend(True, ";")
mbedosIncDirForPre = thirdPartyMbedOS.createSettingSymbol("MBED_OS_XC32_AS_INCLUDE_PRE_PROC_DIRS", None)
mbedosIncDirForPre.setCategory("C32-AS")
mbedosIncDirForPre.setKey("extra-include-directories-for-preprocessor")
mbedosIncDirForPre.setValue(mbedosIncludePath)
mbedosIncDirForPre.setAppend(True, ";")
| 1.03125
| 1
|
computer_vision/06_translating_images.py
|
KECB/learn
| 2
|
12783448
|
import numpy as np
import cv2
# Translation is the shifting of objects location. If you know the shift in
# (x,y) direction, let it be (t_x,t_y), you can create the transformation matrix
# M as follows:
#
# M = | 1 0 t_x |
# | 0 1 t_y |
#
# You'll need to make it into a Numpy array of type np.float32 and pass it into
# cv2.warpAffine() function.
img = cv2.imread('images/saturn.png', 0)
rows, cols = img.shape
translate_x = -150
translate_y = 50
M = np.float32([[1, 0, translate_x], [0, 1, translate_y]])
img_translated = cv2.warpAffine(img, M, (cols, rows))
cv2.imshow('Translated Image', img_translated)
cv2.waitKey(0)
cv2.destroyAllWindows()
# WARNING: Third argument of the cv2.warpAffine() function is the size of the
# output image, which should be in the form of (width, height).
# Remember width = number of columns, and height = number of rows.
| 3.921875
| 4
|
DSA/stack/stack_using_linked_list.py
|
RohanMiraje/DSAwithPython
| 2
|
12783449
|
class Node:
def __init__(self, data):
self.data = data
self.next = None
class Stack:
"""ADT - abstract data type- just
LIFO - operations performed from top only
O(1) operations - push, pop
push- insert at beg in list
pop- delete at beg in list
applications : fun calls/ recursion
undo in editors, balanced parentheses
"""
def __init__(self):
self.top = None
def push(self, data):
print("pushed:{}".format(data))
new_top = Node(data)
new_top.next = self.top
self.top = new_top
def pop(self):
if self.is_empty():
print("stack is empty, pop operation aborted")
return
popped_item = self.top.data
temp = self.top.next # to update top
del self.top
self.top = temp
print("popped:{}".format(popped_item))
return popped_item
def _top(self):
if self.is_empty():
print("stack is empty, get top operation aborted")
return
return self.top.data
def is_empty(self):
return not self.top
def print_stack(self):
temp = self.top
if self.is_empty():
print("stack is empty, print stack operation aborted")
return
print("printing stack")
while temp:
print(temp.data, end=" ")
temp = temp.next
print("\n")
def get_top(self):
return self._top()
if __name__ == '__main__':
stack = Stack()
# print(stack.is_empty())
# print(stack.get_top())
stack.push(1)
stack.push(2)
stack.push(3)
stack.push(4)
stack.push(5)
stack.print_stack()
# print(stack.get_top())
stack.pop()
stack.pop()
stack.pop()
stack.print_stack()
| 3.953125
| 4
|
rebelykos/core/teamserver/modules/iam_role_info.py
|
Takahiro-Yoko/rebelykos
| 1
|
12783450
|
import boto3
from rebelykos.core.response import Response as res
from rebelykos.core.teamserver.module import Module
class RLModule(Module):
def __init__(self):
super().__init__()
self.name = "role_info"
self.description = ("List all RoleNames if RoleName not specified."
" If specified, Describe the role and get "
"policies attached to that role.")
self.author = "<NAME>"
self.options["RoleName"] = {
"Description": ("Describe role detail and policies"
" attached to this role."),
"Required": False,
"Value": ""
}
def run(self):
client = boto3.client("iam", **self["profile"])
if self["RoleName"]:
role = boto3.resource("iam").Role(self["RoleName"])
yield (res.INFO,
"Showing role arn and assume_role_policy_document.")
yield (
res.RESULT,
{"Arn": role.arn,
"Statement": role.assume_role_policy_document["Statement"]}
)
yield res.INFO, "Listing attached policies."
yield from self._iam_attached_policies(
client,
role.attached_policies.all()
)
yield res.INFO, "Listing inline policies."
yield from self._iam_inline_policies(role.policies.all())
elif not self["RoleName"]:
for result in self._handle_is_truncated(client.list_roles):
if result[0] == res.RESULT:
for role in result[1]["Roles"]:
yield res.RESULT, role["RoleName"]
else:
yield result
yield res.END, "End"
| 2.03125
| 2
|