blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2 values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 213 values | src_encoding stringclasses 30 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 2 10.3M | extension stringclasses 246 values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
b0f782f3579c4e977d327af73c9b766560f8407c | dde535d17e0431b4936a62313d4ffd3ca572ebed | /espnet/egs/spanish_mailabs/asr1/local/make_data.py | 9876e686018358e8d9cb47c47c642b99924c3f12 | [
"Apache-2.0"
] | permissive | vladk17/Domain-specific-ESPnet | 031e24b39adb3948f9dabf295ab910cab5430f81 | 3e9a782732cbd2fc012958c3904ec0914e32c180 | refs/heads/master | 2022-11-19T04:25:27.779454 | 2020-07-21T19:18:45 | 2020-07-21T19:18:45 | 271,080,652 | 0 | 0 | null | 2020-06-13T06:35:14 | 2020-06-09T18:31:34 | null | UTF-8 | Python | false | false | 756 | py | from pathlib import Path
from dataset_utils.dataset_downloader import download_and_extract_data
from dataset_utils.transformers.spanish_mailabs import MailabsKaldiTransformer
dataset_url = 'http://www.caito.de/data/Training/stt_tts/es_ES.tgz'
dataset_name = 'Mailabs'
eg_dir = Path('/espnet/egs/spanish_mailabs/asr1')
raw_data_folder = Path(eg_dir, 'raw_data')
if __name__ == '__main__':
dataset_location = download_and_extract_data(
dataset_urls=[dataset_url],
dataset_name=dataset_name,
download_folder=raw_data_folder)
print("Dataset location:", dataset_location)
transformer = MailabsKaldiTransformer()
transformer.transform(
raw_data_path=dataset_location,
espnet_kaldi_eg_directory=eg_dir)
| [
"stanislav.barabanov@getwizer.com"
] | stanislav.barabanov@getwizer.com |
f36a5e25d5acda9976d09de8bc449cfad154bd8b | 0cf59810f62eed9641d4da4c27491e7404a1be4c | /python_sample/for_and_while_chapter/for_range.py | 4b21ccfff46e38257565c9be1b0f1451420c39b9 | [] | no_license | Su-hub9/hello_python | 3c911662815b5c5636c0e3dd97db8bd7a7e759ed | 03798fd53e730e590f7f4179cd9009ce7b551c5f | refs/heads/master | 2023-06-16T11:19:42.789595 | 2021-07-13T10:32:05 | 2021-07-13T10:32:05 | 362,412,184 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 273 | py | # for 반복문과 범위를 함께 조합해서 사용합니다.
for i in range(5):
print(str(i) + "= 반복 변수")
print()
for i in range(5, 10):
print(str(i) + "= 반복 변수")
print()
for i in range(0, 10, 3):
print(str(i) + "= 반복 변수")
print() | [
"suji.bae@datastreams.co.kr"
] | suji.bae@datastreams.co.kr |
41743fd5652d745c199900d74e67de7a95e7d67c | ac4cff61429388cf4395568fbc6d3d8734ec2740 | /blog1/models.py | 6a53a56b2e3852027b1e054aebdff64b03082698 | [] | no_license | sumitbro/Simple_blog | 8d23ef4d7be642207daea33e60609dcb70a7d20d | 0bb8dd0faf9f3e2f83818c4b055681eee7eee9e1 | refs/heads/master | 2022-11-21T06:42:44.687897 | 2020-07-20T04:47:01 | 2020-07-20T04:47:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 904 | py | from django.db import models
from django.utils import timezone
from django.contrib.auth.models import User
from django.contrib import admin
# Create your models here.
class Blog(models.Model):
title= models.CharField(max_length=100)
writer=models.CharField(max_length=20)
content= models.TextField()
date_posted= models.DateTimeField(default=timezone.now)
def __str__(self):
return self.title
# ask to senior
class Admin:
list_display = ('title', 'writer', 'date_posted')
list_filter = ('writer', 'date_posted')
ordering = ('-date_posted',)
search_fields = ('title',)
TOPIC_CHOICES=(
('general', 'general enquiry'),
('bug', 'bug report'),
('suggestion', 'suggestion'),
)
class Feedback(models.Model):
topic = models.CharField(max_length=50)
message=models.TextField()
sender = models.EmailField()
| [
"sahsumit769@gmail.com"
] | sahsumit769@gmail.com |
5e1abe7464b79a58a760973b603aa73a3d1dd39c | f078355df11e6f8927a1217060ce9a14f339521c | /chapter7/algorithm_workbench/exercise3.py | 87b0f554fe80896c94936b663c881651485e0810 | [] | no_license | sairamprogramming/python_book1 | 6c4d63325a8465ad0ca885266ee070910d1edfac | f463c64c14e7a3c1737f2e01858afe8d78f7de33 | refs/heads/master | 2020-05-16T09:27:28.209867 | 2019-05-12T11:26:53 | 2019-05-12T11:26:53 | 182,948,440 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 220 | py | # Program to sort a list in descending order.
import random
def main():
lt = []
for i in range(100):
lt.append(random.randint(1,100))
print(lt)
lt.sort()
lt.reverse()
print(lt)
main()
| [
"sairamprogramming@gmail.com"
] | sairamprogramming@gmail.com |
f238aea7b20d899f7839e340dcf132a628a232ec | 0d60b870af238bb40764af9a5f8f2749da046ce7 | /Backend/Sistema_antiplagio/wsgi.py | 8de781a82d7c6a18858ddfd845ea0cdbc6c71978 | [] | no_license | luisillo21/SistemaAntiplagio | 5291e398a5e18c6c368f8be84ea9ea86764247a5 | 45b99dc76f7fe1b64e41729db5d66b4eabf8e396 | refs/heads/master | 2023-02-02T07:42:56.601455 | 2020-12-18T20:48:57 | 2020-12-18T20:48:57 | 311,553,490 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 413 | py | """
WSGI config for Sistema_antiplagio project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'Sistema_antiplagio.settings')
application = get_wsgi_application()
| [
"luisardilamacias@gmail.com"
] | luisardilamacias@gmail.com |
753b329ca779d81e324f8eb20038fe7e3fcb1596 | 43e900f11e2b230cdc0b2e48007d40294fefd87a | /ReferenceSolution/344.reverse-string.144960300.ac.py | 8b168bf0936591a1d4f6e4c9872e7d7a20112144 | [] | no_license | DarkAlexWang/leetcode | 02f2ed993688c34d3ce8f95d81b3e36a53ca002f | 89142297559af20cf990a8e40975811b4be36955 | refs/heads/master | 2023-01-07T13:01:19.598427 | 2022-12-28T19:00:19 | 2022-12-28T19:00:19 | 232,729,581 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 575 | py | #
# [344] Reverse String
#
# https://leetcode.com/problems/reverse-string/description/
#
# algorithms
# Easy (60.09%)
# Total Accepted: 223K
# Total Submissions: 371.1K
# Testcase Example: '"hello"'
#
# Write a function that takes a string as input and returns the string
# reversed.
#
#
# Example:
# Given s = "hello", return "olleh".
#
#
class Solution(object):
def reverseString(self, s):
r = list(s)
i, j = 0, len(r) - 1
while i < j:
r[i], r[j] = r[j], r[i]
i += 1
j -= 1
return "".join(r)
| [
"wangzhihuan0815@gmail.com"
] | wangzhihuan0815@gmail.com |
34ca6fa674e5f5fb35aa311698c5541dcc4cb985 | 8b277b2f7f9e9c58ad0e7075e9eda41780a216f6 | /networksimulator/__init__.py | 53533636667bd619e21114e1a71184bac3143f78 | [] | no_license | eliascodes/netxsim | a35c98afdfa75117cf3e6cec39499f13a55e4734 | f4357503c40c3a0c060ba82540a38125930d52cd | refs/heads/master | 2016-08-11T08:13:03.649960 | 2016-03-05T16:25:17 | 2016-03-05T16:25:17 | 53,211,304 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 104 | py | __all__ = ['agents', 'builders', 'environment', 'generators', 'grid', 'logger', 'results', 'simulator']
| [
"elias0789@gmail.com"
] | elias0789@gmail.com |
ff4ea47903526107d8b2849b43ec7a6da5d5f665 | c02b29676321c3d1a638e3a5aab5cc5ac6cae769 | /py/compare_images.py | 1052c774e557b112f46460281cbaf9605468a32b | [] | no_license | estaro/DetectSimilarVideo | baafe5ca1c5a3464a25e66f78c20f0fc87be379a | 8e2e8dd7e6e2406ee35c41af4bc909dcef292883 | refs/heads/master | 2021-05-08T01:41:41.361630 | 2017-11-13T21:51:46 | 2017-11-13T21:51:46 | 107,906,894 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,593 | py | # -*- coding: utf-8 -*-
"""
compare_image.py
Usage: python compare_image.py {dir1} {dir2}
"""
import configparser
from logging import getLogger, DEBUG, basicConfig
import os
from statistics import mean
import sys
import cv2
# --------------------------------------------------------------------------
# Preprocessing
# --------------------------------------------------------------------------
basicConfig(level=DEBUG)
logger = getLogger(__name__)
logger.setLevel(DEBUG)
# --------------------------------------------------------------------------
# constants
# --------------------------------------------------------------------------
# 動画からキャプチャするフレーム数
CAPUTURE_COUNT = 9
# メタ情報の保存ファイル
METAFILE = 'meta.dat'
# 許容する再生時間のずれ(%)
PERMIT_TIME_DIFF_RATE = 15
# --------------------------------------------------------------------------
# functions
# --------------------------------------------------------------------------
def compare_images(dir1, dir2, output_dir):
"""2つのディレクトリについて、同名画像ファイルを比較する
:param
:param
:return 結果コード
-1 error
"""
logger.debug("start capture_frame ... ")
config1 = configparser.ConfigParser()
config1.read(dir1 + '/' + METAFILE)
config2 = configparser.ConfigParser()
config2.read(dir2 + '/' + METAFILE)
#
time1 = float(config1['metadata']['time'])
time2 = float(config2['metadata']['time'])
res_time_diff = abs(time1 - time2)
# 明らかに再生時間が異なる
if res_time_diff >= time1 * PERMIT_TIME_DIFF_RATE / 100:
logger.warn("time1:" + str(time1) + ", time2:" + str(time2) + " , diff rate:" + str(time1 * PERMIT_TIME_DIFF_RATE / 100))
return
# 画像の比較
hist_sum = []
feature_sum = []
bf = cv2.BFMatcher(cv2.NORM_HAMMING)
detector = cv2.AKAZE_create()
for i in range(CAPUTURE_COUNT):
dir1_file = dir1 + "/" + str(i) + ".jpg"
dir2_file = dir2 + "/" + str(i) + ".jpg"
# 対象ファイルの存在チェック
if not (os.path.exists(dir1_file) and os.path.exists(dir2_file)):
continue;
# 画像の読み込み
dir1_img = cv2.imread(dir1_file)
dir2_img = cv2.imread(dir2_file)
# ヒストグラムを計算する
dir1_hist = cv2.calcHist([dir1_img], [0], None, [256], [0, 256])
dir2_hist = cv2.calcHist([dir2_img], [0], None, [256], [0, 256])
hist_diff = cv2.compareHist(dir1_hist, dir2_hist, 0)
hist_sum.append(hist_diff)
logger.debug(hist_diff)
# グレースケール化して特長量を抽出
dir1_img_g = cv2.cvtColor(dir1_img, cv2.COLOR_RGB2GRAY)
dir2_img_g = cv2.cvtColor(dir2_img, cv2.COLOR_RGB2GRAY)
(dir1_kp, dir1_des) = detector.detectAndCompute(dir1_img_g, None)
(dir2_kp, dir2_des) = detector.detectAndCompute(dir2_img_g, None)
# 比較
matches = bf.match(dir1_des, dir2_des)
dist = [m.distance for m in matches]
if len(dist) != 0:
feature_sum.append(sum(dist) / len(dist))
else:
feauture = ""
logger.debug(compare_result)
logger.debug("finish compare_images ... ")
return 0
# --------------------------------------------------------------------------
# main
# --------------------------------------------------------------------------
if __name__ == "__main__":
compare_images(sys.argv[1], sys.argv[2], sys.argv[3])
| [
"31145745+estaro@users.noreply.github.com"
] | 31145745+estaro@users.noreply.github.com |
8681ebea2bba52e99b7fe743a71c50a6abf1ac51 | 714250bfd38cdab62f19cb2db3e0e83ce1534520 | /Solutions/binning.py | 2332e4b7f87ac3956c0c3e85c7444b8834e716e8 | [] | no_license | r-Iyer/Data-Mining | 515d69a0f7aff8072f461e25ba21464d7d3b906e | 13305b66b9c3bd674699ba36e01562f914f3abf5 | refs/heads/master | 2022-09-17T11:08:12.296620 | 2020-06-03T23:46:56 | 2020-06-03T23:46:56 | 269,266,894 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,021 | py | # -*- coding: utf-8 -*-
"""
Created on Tue Feb 5 14:14:04 2019
@author: user
"""
import numpy as np
import math
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.datasets import load_boston
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error,r2_score
d = pd.read_csv('andrew.csv').as_matrix()
#boston_dataset = load_boston()
X = []
X=d[:,0]
Y = []
Y=d[:,1]
X=np.array(X)
Y=np.array(Y)
interval=300;
a=0
def sortSecond(val):
return val[1]
# list1 to demonstrate the use of sorting
# using using second key
print(X)
# sorts the array in ascending according to
# second element
data=d[:,0]
data2=[]
data.sort(axis=0)
Y.sort()
i=d[0,0]
while(i<d[len(d)-1,0]):
i=i+interval
a=a+1
binn=[]
for i in range(a):
binn.append([])
j=0
i=0
while(i<len(d)):
if(d[i,0]<=(d[0,0]+((j+1)*interval))):
binn[j].append((d[i,0],d[i,1]))
i=i+1
else:
j=j+1
| [
"rohitstar.iyer@gmail.com"
] | rohitstar.iyer@gmail.com |
84fbce073c9a21f6786912792c3918af99b242ce | 5f7fc00144218e45a4b9d1894cc5a75948b0b124 | /src/app1/admin.py | 2aa6b6283f88478a6de3cd7e31b967bae57c3601 | [] | no_license | Lavanya1211/SeaAnt_1 | a6b77d1ff128869e7256608455ae7ace4c017c65 | 8bb8add1b13399fedd9fb64ca3ed2bd0d2f130de | refs/heads/main | 2023-07-06T06:16:39.746838 | 2021-08-19T08:44:00 | 2021-08-19T08:44:00 | 381,921,989 | 0 | 0 | null | 2021-07-01T06:46:33 | 2021-07-01T05:39:38 | null | UTF-8 | Python | false | false | 165 | py | from django.contrib import admin
from.models import Contact_Us,PhoneBook
# Register your models here.
admin.site.register(Contact_Us)
admin.site.register(PhoneBook)
| [
"lavanya.b@seaant.com"
] | lavanya.b@seaant.com |
654d6ea67f271fae463c3629d042464a0cdbadc6 | a1ec21093177517ee7ca9e8fa0c00608e7874038 | /neural/jafeat.py | b1f8ee6a47bb157725ce7e154a9252a393fc2fbe | [] | no_license | ustimenv/ProductAnalysis | b8e8d45a0a7d2da274754a3fe4bcf6a1a9b39216 | ca3cbab2ad56720d3fe8bd1ec6a59f0b5a90da6c | refs/heads/master | 2020-06-23T10:06:25.181550 | 2020-05-26T15:18:56 | 2020-05-26T15:18:56 | 198,592,386 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,689 | py | from gluoncv.model_zoo import *
from gluoncv.model_zoo.ssd.vgg_atrous import VGGAtrousBase
from mxnet.gluon import nn
# Resnet multilayer feature extractor, does not really work
class JafeatRes(nn.HybridBlock):
def __init__(self, **kwargs):
super(JafeatRes, self).__init__(**kwargs)
self.block = BottleneckV2
self.layers = [3, 4, 6, 3]
self.channels = [64, 256, 512, 1024, 2048]
last_gamma = False
use_se = False
norm_kwargs = None
assert len(self.layers) == len(self.channels) - 1
with self.name_scope():
self.features = nn.HybridSequential(prefix='')
self.features.add(nn.BatchNorm())
self.features.add(nn.Conv2D(self.channels[0], 7, 2, 3, use_bias=False))
self.features.add(nn.BatchNorm())
self.features.add(nn.Activation('relu'))
self.features.add(nn.MaxPool2D(3, 2, 1))
in_channels = self.channels[0]
for i, num_layer in enumerate(self.layers):
stride = 1 if i == 0 else 2
self.features.add(self._make_layer(num_layer, self.channels[i + 1],
stride, i + 1, in_channels=in_channels,
last_gamma=last_gamma, use_se=use_se,
norm_layer=nn.BatchNorm, norm_kwargs=norm_kwargs))
in_channels = self.channels[i + 1]
def _make_layer(self, layers, channels, stride, stage_index, in_channels=0, last_gamma=False,
use_se=False, norm_layer=nn.BatchNorm, norm_kwargs=None):
layer = nn.HybridSequential(prefix='stage%d_' % stage_index)
with layer.name_scope():
layer.add(self.block(channels, stride, channels != in_channels, in_channels=in_channels,
last_gamma=last_gamma, use_se=use_se, prefix='',
norm_layer=norm_layer, norm_kwargs=norm_kwargs))
for _ in range(layers - 1):
layer.add(self.block(channels, 1, False, in_channels=channels,
last_gamma=last_gamma, use_se=use_se, prefix='',
norm_layer=norm_layer, norm_kwargs=norm_kwargs))
return layer
def hybrid_forward(self, F, x, **kwargs):
output = []
# apply some basic transforms indiscriminantly
for i in range(0, 5):
x = self.features[i](x)
# for every layer (4 in total), return the respective feature map
for i in range(5, 9):
x = self.features[i](x)
x = F.Pooling(x, pool_type='max', kernel=(2, 2), stride=(2, 2), pooling_convention='full')
output.append(x)
return output
class JafeatVgg(VGGAtrousBase):
layers, channels = ([2, 2, 4, 4, 4], [64, 128, 256, 512, 512])
def __init__(self, batch_norm=True, **kwargs):
super(JafeatVgg, self).__init__(self.layers, self.channels, batch_norm, **kwargs)
extras = [((256, 1, 1, 0), (512, 3, 2, 1)),
((128, 1, 1, 0), (256, 3, 2, 1)),
((128, 1, 1, 0), (256, 3, 1, 0)),
((128, 1, 1, 0), (256, 3, 1, 0))]
with self.name_scope():
self.extras = nn.HybridSequential()
for i, config in enumerate(extras):
extra = nn.HybridSequential(prefix='extra%d_' % (i))
with extra.name_scope():
for f, k, s, p in config:
extra.add(nn.Conv2D(f, k, s, p, **self.init))
if batch_norm:
extra.add(nn.BatchNorm())
extra.add(nn.Activation('relu'))
self.extras.add(extra)
def hybrid_forward(self, F, x, init_scale):
x = F.broadcast_mul(x, init_scale)
assert len(self.stages) == 6
outputs = []
for stage in self.stages[:3]:
x = stage(x)
x = F.Pooling(x, pool_type='max', kernel=(2, 2), stride=(2, 2),
pooling_convention='full')
x = self.stages[3](x)
norm = self.norm4(x)
outputs.append(norm)
x = F.Pooling(x, pool_type='max', kernel=(2, 2), stride=(2, 2),
pooling_convention='full')
x = self.stages[4](x)
x = F.Pooling(x, pool_type='max', kernel=(3, 3), stride=(1, 1), pad=(1, 1),
pooling_convention='full')
x = self.stages[5](x)
outputs.append(x)
for extra in self.extras:
x = extra(x)
outputs.append(x)
return outputs
| [
"ustimenkovlad@gmail.com"
] | ustimenkovlad@gmail.com |
50ac96ab80768609f1ec718a8974748b156d9c00 | 49201a80ee7cf00997861a45375de58e18e59a1c | /sentiment_analysis.py | 77111c52a27982579afa3e599e6c5ab882c2b9db | [
"MIT"
] | permissive | Ticunas/Sentiment-Analysis-for-Twitter | 76dba3f755246712a4998ac9cf5454dfc935220d | e5eabb5f1a4228ecbbcbdbb4978801c1093fd6c7 | refs/heads/master | 2020-03-09T00:07:50.839051 | 2018-04-25T03:40:59 | 2018-04-25T03:40:59 | 128,480,805 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,631 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Mar 23 00:38:05 2018
@author: Tadman Reis
"""
from nltk.classify import NaiveBayesClassifier
import pandas as pd
import re
def _word_feats(words):
return dict([(word, True) for word in words])
def _clean_sentence(sentence):
sentence = sentence.lower()
sentence = ' '.join(re.sub("(@[a-zA-Zà-úÀ0-Ú0-9]+)|([^a-zA-Zà-úÀ0-Ú0-9]| (\w+:\/\/\S+))",
" ", sentence).split())
return sentence.split(' ')
def analyse_setence(sentence):
_creating_classifier()
words = _clean_sentence(sentence)
neg = 0
pos = 0
for word in words:
classResult = classifier.classify( _word_feats(word))
if classResult == 'neg':
neg = neg + 1
if classResult == 'pos':
pos = pos + 1
result = (float(pos)/len(words) - (float(neg)/len(words)))
return result
def _creating_classifier():
"""
You can use your own dictionary
"""
dataframe = pd.read_csv('Dictionary_pt - Sheet1.csv')
positive_words = dataframe['Positive']
negative_words = dataframe['Negative']
neutral_words = dataframe['Neutral']
positive_features = [(_word_feats(pos), 'pos') for pos in positive_words]
negative_features = [(_word_feats(neg), 'neg') for neg in negative_words]
neutral_features = [(_word_feats(neu), 'neu') for neu in neutral_words]
train_set = negative_features + positive_features + neutral_features
global classifier
classifier = NaiveBayesClassifier.train(train_set)
| [
"tadman.freis@hotmail.com"
] | tadman.freis@hotmail.com |
5f1fd406e54591565450a64a7ce942a90dd558e7 | 8fe6993366229375a1f3978be5fda313476648b9 | /.eggs/PyScaffold-2.5.11-py2.7.egg/pyscaffold/contrib/__init__.py | 821046089738a7c3cc122fef219908509f3078f1 | [] | no_license | ArkhipovK/NER-report3 | 9b6fe6981abc884dec6e48831dff70257ba0efae | 150f7543050c73a89dc807fafdf75ded8ace25dd | refs/heads/master | 2020-03-31T12:15:46.727011 | 2019-01-22T11:12:52 | 2019-01-22T11:12:52 | 152,209,204 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,630 | py | # -*- coding: utf-8 -*-
"""
Contribution packages used by PyScaffold
All packages inside ``contrib`` are external packages that come with their
own licences and are not part of the PyScaffold sourcecode itself.
The reason for shipping these dependencies directly is to avoid problems in
the resolution of ``setup_requires`` dependencies that occurred more often
than not, see issues #71 and #72.
All contribution packages were added with the help of ``git subtree`` (git
version 1.7.11 and above)::
git subtree add --prefix pyscaffold/contrib/setuptools_scm --squash \
https://github.com/pypa/setuptools_scm.git v1.10.1
git subtree add --prefix pyscaffold/contrib/pbr --squash \
https://github.com/openstack-dev/pbr.git 1.8.1
Updating works with::
git subtree pull --prefix pyscaffold/contrib/setuptools_scm \
https://github.com/pypa/setuptools_scm.git NEW_TAG --squash
git subtree pull --prefix pyscaffold/contrib/pbr \
https://github.com/openstack-dev/pbr.git NEW_TAG --squash
Using ``subtree`` instead of git's ``submodule`` had several advantages.
.. note::
Updating pbr like described above only works if there was no change in the
pbr directory but in most cases we remove `test-requirements.txt` files
since otherwise Travis complains about them. In order to update it's best
to completely remove `contrib/pbr` first and then use the command above.
"""
from __future__ import division, print_function, absolute_import
import os
import sys
import inspect
from contextlib import contextmanager
from importlib import import_module
__location__ = os.path.join(os.getcwd(), os.path.dirname(
inspect.getfile(inspect.currentframe())))
@contextmanager
def add_dir_to_syspath(path):
"""
Contextmanager to temporarily prepend a path the :obj:`sys.path`
:param path: path as string
"""
sys.path.insert(1, path)
try:
yield
finally:
assert sys.path[1] == path
del sys.path[1]
def import_mod(module, path):
"""
Imports a module from a directory path
:param module: module name as string
:param path: path as string
:return: module
"""
with add_dir_to_syspath(path):
return import_module(module)
pbr_path = os.path.join(__location__, 'pbr')
scm_path = os.path.join(__location__, 'setuptools_scm')
# Import contribution packages
pbr_json = import_mod('pbr.pbr_json', pbr_path)
pbr_core = import_mod('pbr.core', pbr_path)
scm = import_mod('setuptools_scm', scm_path)
scm_integration = import_mod('setuptools_scm.integration', scm_path)
scm_version = import_mod('setuptools_scm.version', scm_path)
scm_git = import_mod('setuptools_scm.git', scm_path)
scm_hg = import_mod('setuptools_scm.hg', scm_path)
scm_hacks = import_mod('setuptools_scm.hacks', scm_path)
# Functions used by integration module
pbr_read_setup_cfg = pbr_core.pbr
scm_get_version = scm.get_version
scm_find_files = scm_integration.find_files
scm_parse_hg = scm_hg.parse
scm_parse_git = scm_git.parse
scm_parse_archival = scm_hg.parse_archival
scm_parse_pkginfo = scm_hacks.parse_pkginfo
scm_list_files_in_archive = scm_git.list_files_in_archive
SCM_GH_FILES_COMMAND = scm_hg.FILES_COMMAND
scm_guess_next_dev_version = scm_version.guess_next_dev_version
scm_postrelease_version = scm_version.postrelease_version
scm_get_local_node_and_date = scm_version.get_local_node_and_date
scm_get_local_node_and_timestamp = scm_version.get_local_node_and_timestamp
scm_get_local_dirty_tag = scm_version.get_local_dirty_tag
write_pbr_json = pbr_json.write_pbr_json
scm_setuptools_too_old = scm_version.VERSION_CLASS is None
| [
"tehbitardcity@gmail.com"
] | tehbitardcity@gmail.com |
77c64fb59368c9657f280b0c7ee631cebf1c2218 | 41de4210af23a8a8a3ca7dd090bb51faecf4a0c8 | /lib/python3.5/site-packages/statsmodels/stats/inter_rater.py | a2ee16b61ac6a112056d7aa8806b4121f13d39f5 | [
"Python-2.0"
] | permissive | randybrown-github/ziplineMacOS | 42a0c2bfca2a54baa03d2803dc41317647811285 | eb5872c0903d653e19f259f0800fb7aecee0ee5c | refs/heads/master | 2022-11-07T15:51:39.808092 | 2020-06-18T20:06:42 | 2020-06-18T20:06:42 | 272,631,387 | 0 | 1 | null | 2022-11-02T03:21:45 | 2020-06-16T06:48:53 | Python | UTF-8 | Python | false | false | 19,346 | py | # -*- coding: utf-8 -*-
"""Inter Rater Agreement
contains
--------
fleiss_kappa
cohens_kappa
aggregate_raters:
helper function to get data into fleiss_kappa format
to_table:
helper function to create contingency table, can be used for cohens_kappa
Created on Thu Dec 06 22:57:56 2012
Author: Josef Perktold
License: BSD-3
References
----------
Wikipedia: kappa's initially based on these two pages
http://en.wikipedia.org/wiki/Fleiss%27_kappa
http://en.wikipedia.org/wiki/Cohen's_kappa
SAS-Manual : formulas for cohens_kappa, especially variances
see also R package irr
TODO
----
standard errors and hypothesis tests for fleiss_kappa
other statistics and tests,
in R package irr, SAS has more
inconsistent internal naming, changed variable names as I added more
functionality
convenience functions to create required data format from raw data
DONE
"""
from __future__ import division
import numpy as np
from scipy import stats #get rid of this? need only norm.sf
class ResultsBunch(dict):
template = '%r'
def __init__(self, **kwds):
dict.__init__(self, kwds)
self.__dict__ = self
self._initialize()
def _initialize(self):
pass
def __str__(self):
return self.template % self
def _int_ifclose(x, dec=1, width=4):
'''helper function for creating result string for int or float
only dec=1 and width=4 is implemented
Parameters
----------
x : int or float
value to format
dec : 1
number of decimals to print if x is not an integer
width : 4
width of string
Returns
-------
xint : int or float
x is converted to int if it is within 1e-14 of an integer
x_string : str
x formatted as string, either '%4d' or '%4.1f'
'''
xint = int(round(x))
if np.max(np.abs(xint - x)) < 1e-14:
return xint, '%4d' % xint
else:
return x, '%4.1f' % x
def aggregate_raters(data, n_cat=None):
'''convert raw data with shape (subject, rater) to (subject, cat_counts)
brings data into correct format for fleiss_kappa
bincount will raise exception if data cannot be converted to integer.
Parameters
----------
data : array_like, 2-Dim
data containing category assignment with subjects in rows and raters
in columns.
n_cat : None or int
If None, then the data is converted to integer categories,
0,1,2,...,n_cat-1. Because of the relabeling only category levels
with non-zero counts are included.
If this is an integer, then the category levels in the data are already
assumed to be in integers, 0,1,2,...,n_cat-1. In this case, the
returned array may contain columns with zero count, if no subject
has been categorized with this level.
Returns
-------
arr : nd_array, (n_rows, n_cat)
Contains counts of raters that assigned a category level to individuals.
Subjects are in rows, category levels in columns.
'''
data = np.asarray(data)
n_rows = data.shape[0]
if n_cat is None:
#I could add int conversion (reverse_index) to np.unique
cat_uni, cat_int = np.unique(data.ravel(), return_inverse=True)
n_cat = len(cat_uni)
data_ = cat_int.reshape(data.shape)
else:
cat_uni = np.arange(n_cat) #for return only, assumed cat levels
data_ = data
tt = np.zeros((n_rows, n_cat), int)
for idx, row in enumerate(data_):
ro = np.bincount(row)
tt[idx, :len(ro)] = ro
return tt, cat_uni
def to_table(data, bins=None):
'''convert raw data with shape (subject, rater) to (rater1, rater2)
brings data into correct format for cohens_kappa
Parameters
----------
data : array_like, 2-Dim
data containing category assignment with subjects in rows and raters
in columns.
bins : None, int or tuple of array_like
If None, then the data is converted to integer categories,
0,1,2,...,n_cat-1. Because of the relabeling only category levels
with non-zero counts are included.
If this is an integer, then the category levels in the data are already
assumed to be in integers, 0,1,2,...,n_cat-1. In this case, the
returned array may contain columns with zero count, if no subject
has been categorized with this level.
If bins are a tuple of two array_like, then the bins are directly used
by ``numpy.histogramdd``. This is useful if we want to merge categories.
Returns
-------
arr : nd_array, (n_cat, n_cat)
Contingency table that contains counts of category level with rater1
in rows and rater2 in columns.
Notes
-----
no NaN handling, delete rows with missing values
This works also for more than two raters. In that case the dimension of
the resulting contingency table is the same as the number of raters
instead of 2-dimensional.
'''
data = np.asarray(data)
n_rows, n_cols = data.shape
if bins is None:
#I could add int conversion (reverse_index) to np.unique
cat_uni, cat_int = np.unique(data.ravel(), return_inverse=True)
n_cat = len(cat_uni)
data_ = cat_int.reshape(data.shape)
bins_ = np.arange(n_cat+1) - 0.5
#alternative implementation with double loop
#tt = np.asarray([[(x == [i,j]).all(1).sum() for j in cat_uni]
# for i in cat_uni] )
#other altervative: unique rows and bincount
elif np.isscalar(bins):
bins_ = np.arange(bins+1) - 0.5
data_ = data
else:
bins_ = bins
data_ = data
tt = np.histogramdd(data_, (bins_,)*n_cols)
return tt[0], bins_
def fleiss_kappa(table, method='fleiss'):
"""Fleiss' and Randolph's kappa multi-rater agreement measure
Parameters
----------
table : array_like, 2-D
assumes subjects in rows, and categories in columns
method : string
Method 'fleiss' returns Fleiss' kappa which uses the sample margin
to define the chance outcome.
Method 'randolph' or 'uniform' (only first 4 letters are needed)
returns Randolph's (2005) multirater kappa which assumes a uniform
distribution of the categories to define the chance outcome.
Returns
-------
kappa : float
Fleiss's or Randolph's kappa statistic for inter rater agreement
Notes
-----
no variance or hypothesis tests yet
Interrater agreement measures like Fleiss's kappa measure agreement relative
to chance agreement. Different authors have proposed ways of defining
these chance agreements. Fleiss' is based on the marginal sample distribution
of categories, while Randolph uses a uniform distribution of categories as
benchmark. Warrens (2010) showed that Randolph's kappa is always larger or
equal to Fleiss' kappa. Under some commonly observed condition, Fleiss' and
Randolph's kappa provide lower and upper bounds for two similar kappa_like
measures by Light (1971) and Hubert (1977).
References
----------
Wikipedia http://en.wikipedia.org/wiki/Fleiss%27_kappa
Fleiss, Joseph L. 1971. "Measuring Nominal Scale Agreement among Many
Raters." Psychological Bulletin 76 (5): 378-82.
https://doi.org/10.1037/h0031619.
Randolph, Justus J. 2005 "Free-Marginal Multirater Kappa (multirater
K [free]): An Alternative to Fleiss' Fixed-Marginal Multirater Kappa."
Presented at the Joensuu Learning and Instruction Symposium, vol. 2005
https://eric.ed.gov/?id=ED490661
Warrens, Matthijs J. 2010. "Inequalities between Multi-Rater Kappas."
Advances in Data Analysis and Classification 4 (4): 271-86.
https://doi.org/10.1007/s11634-010-0073-4.
"""
table = 1.0 * np.asarray(table) #avoid integer division
n_sub, n_cat = table.shape
n_total = table.sum()
n_rater = table.sum(1)
n_rat = n_rater.max()
#assume fully ranked
assert n_total == n_sub * n_rat
#marginal frequency of categories
p_cat = table.sum(0) / n_total
table2 = table * table
p_rat = (table2.sum(1) - n_rat) / (n_rat * (n_rat - 1.))
p_mean = p_rat.mean()
if method == 'fleiss':
p_mean_exp = (p_cat*p_cat).sum()
elif method.startswith('rand') or method.startswith('unif'):
p_mean_exp = 1 / n_cat
kappa = (p_mean - p_mean_exp) / (1- p_mean_exp)
return kappa
def cohens_kappa(table, weights=None, return_results=True, wt=None):
'''Compute Cohen's kappa with variance and equal-zero test
Parameters
----------
table : array_like, 2-Dim
square array with results of two raters, one rater in rows, second
rater in columns
weights : array_like
The interpretation of weights depends on the wt argument.
If both are None, then the simple kappa is computed.
see wt for the case when wt is not None
If weights is two dimensional, then it is directly used as a weight
matrix. For computing the variance of kappa, the maximum of the
weights is assumed to be smaller or equal to one.
TODO: fix conflicting definitions in the 2-Dim case for
wt : None or string
If wt and weights are None, then the simple kappa is computed.
If wt is given, but weights is None, then the weights are set to
be [0, 1, 2, ..., k].
If weights is a one-dimensional array, then it is used to construct
the weight matrix given the following options.
wt in ['linear', 'ca' or None] : use linear weights, Cicchetti-Allison
actual weights are linear in the score "weights" difference
wt in ['quadratic', 'fc'] : use linear weights, Fleiss-Cohen
actual weights are squared in the score "weights" difference
wt = 'toeplitz' : weight matrix is constructed as a toeplitz matrix
from the one dimensional weights.
return_results : bool
If True (default), then an instance of KappaResults is returned.
If False, then only kappa is computed and returned.
Returns
-------
results or kappa
If return_results is True (default), then a results instance with all
statistics is returned
If return_results is False, then only kappa is calculated and returned.
Notes
-----
There are two conflicting definitions of the weight matrix, Wikipedia
versus SAS manual. However, the computation are invariant to rescaling
of the weights matrix, so there is no difference in the results.
Weights for 'linear' and 'quadratic' are interpreted as scores for the
categories, the weights in the computation are based on the pairwise
difference between the scores.
Weights for 'toeplitz' are a interpreted as weighted distance. The distance
only depends on how many levels apart two entries in the table are but
not on the levels themselves.
example:
weights = '0, 1, 2, 3' and wt is either linear or toeplitz means that the
weighting only depends on the simple distance of levels.
weights = '0, 0, 1, 1' and wt = 'linear' means that the first two levels
are zero distance apart and the same for the last two levels. This is
the sampe as forming two aggregated levels by merging the first two and
the last two levels, respectively.
weights = [0, 1, 2, 3] and wt = 'quadratic' is the same as squaring these
weights and using wt = 'toeplitz'.
References
----------
Wikipedia
SAS Manual
'''
table = np.asarray(table, float) #avoid integer division
agree = np.diag(table).sum()
nobs = table.sum()
probs = table / nobs
freqs = probs #TODO: rename to use freqs instead of probs for observed
probs_diag = np.diag(probs)
freq_row = table.sum(1) / nobs
freq_col = table.sum(0) / nobs
prob_exp = freq_col * freq_row[:, None]
assert np.allclose(prob_exp.sum(), 1)
#print prob_exp.sum()
agree_exp = np.diag(prob_exp).sum() #need for kappa_max
if weights is None and wt is None:
kind = 'Simple'
kappa = (agree / nobs - agree_exp) / (1 - agree_exp)
if return_results:
#variance
term_a = probs_diag * (1 - (freq_row + freq_col) * (1 - kappa))**2
term_a = term_a.sum()
term_b = probs * (freq_col[:, None] + freq_row)**2
d_idx = np.arange(table.shape[0])
term_b[d_idx, d_idx] = 0 #set diagonal to zero
term_b = (1 - kappa)**2 * term_b.sum()
term_c = (kappa - agree_exp * (1-kappa))**2
var_kappa = (term_a + term_b - term_c) / (1 - agree_exp)**2 / nobs
#term_c = freq_col * freq_row[:, None] * (freq_col + freq_row[:,None])
term_c = freq_col * freq_row * (freq_col + freq_row)
var_kappa0 = (agree_exp + agree_exp**2 - term_c.sum())
var_kappa0 /= (1 - agree_exp)**2 * nobs
else:
if weights is None:
weights = np.arange(table.shape[0])
#weights follows the Wikipedia definition, not the SAS, which is 1 -
kind = 'Weighted'
weights = np.asarray(weights, float)
if weights.ndim == 1:
if wt in ['ca', 'linear', None]:
weights = np.abs(weights[:, None] - weights) / \
(weights[-1] - weights[0])
elif wt in ['fc', 'quadratic']:
weights = (weights[:, None] - weights)**2 / \
(weights[-1] - weights[0])**2
elif wt == 'toeplitz':
#assume toeplitz structure
from scipy.linalg import toeplitz
#weights = toeplitz(np.arange(table.shape[0]))
weights = toeplitz(weights)
else:
raise ValueError('wt option is not known')
else:
rows, cols = table.shape
if (table.shape != weights.shape):
raise ValueError('weights are not square')
#this is formula from Wikipedia
kappa = 1 - (weights * table).sum() / nobs / (weights * prob_exp).sum()
#TODO: add var_kappa for weighted version
if return_results:
var_kappa = np.nan
var_kappa0 = np.nan
#switch to SAS manual weights, problem if user specifies weights
#w is negative in some examples,
#but weights is scale invariant in examples and rough check of source
w = 1. - weights
w_row = (freq_col * w).sum(1)
w_col = (freq_row[:, None] * w).sum(0)
agree_wexp = (w * freq_col * freq_row[:, None]).sum()
term_a = freqs * (w - (w_col + w_row[:, None]) * (1 - kappa))**2
fac = 1. / ((1 - agree_wexp)**2 * nobs)
var_kappa = term_a.sum() - (kappa - agree_wexp * (1 - kappa))**2
var_kappa *= fac
freqse = freq_col * freq_row[:, None]
var_kappa0 = (freqse * (w - (w_col + w_row[:, None]))**2).sum()
var_kappa0 -= agree_wexp**2
var_kappa0 *= fac
kappa_max = (np.minimum(freq_row, freq_col).sum() - agree_exp) / \
(1 - agree_exp)
if return_results:
res = KappaResults( kind=kind,
kappa=kappa,
kappa_max=kappa_max,
weights=weights,
var_kappa=var_kappa,
var_kappa0=var_kappa0
)
return res
else:
return kappa
_kappa_template = '''\
%(kind)s Kappa Coefficient
--------------------------------
Kappa %(kappa)6.4f
ASE %(std_kappa)6.4f
%(alpha_ci)s%% Lower Conf Limit %(kappa_low)6.4f
%(alpha_ci)s%% Upper Conf Limit %(kappa_upp)6.4f
Test of H0: %(kind)s Kappa = 0
ASE under H0 %(std_kappa0)6.4f
Z %(z_value)6.4f
One-sided Pr > Z %(pvalue_one_sided)6.4f
Two-sided Pr > |Z| %(pvalue_two_sided)6.4f
'''
'''
Weighted Kappa Coefficient
--------------------------------
Weighted Kappa 0.4701
ASE 0.1457
95% Lower Conf Limit 0.1845
95% Upper Conf Limit 0.7558
Test of H0: Weighted Kappa = 0
ASE under H0 0.1426
Z 3.2971
One-sided Pr > Z 0.0005
Two-sided Pr > |Z| 0.0010
'''
class KappaResults(ResultsBunch):
'''Results for Cohen's kappa
Attributes
----------
kappa : cohen's kappa
var_kappa : variance of kappa
std_kappa : standard deviation of kappa
alpha : one-sided probability for confidence interval
kappa_low : lower (1-alpha) confidence limit
kappa_upp : upper (1-alpha) confidence limit
var_kappa0 : variance of kappa under H0: kappa=0
std_kappa0 : standard deviation of kappa under H0: kappa=0
z_value : test statistic for H0: kappa=0, is standard normal distributed
pvalue_one_sided : one sided p-value for H0: kappa=0 and H1: kappa>0
pvalue_two_sided : two sided p-value for H0: kappa=0 and H1: kappa!=0
distribution_kappa : asymptotic normal distribution of kappa
distribution_zero_null : asymptotic normal distribution of kappa under
H0: kappa=0
The confidence interval for kappa and the statistics for the test of
H0: kappa=0 are based on the asymptotic normal distribution of kappa.
'''
template = _kappa_template
def _initialize(self):
if not 'alpha' in self:
self['alpha'] = 0.025
self['alpha_ci'] = _int_ifclose(100 - 0.025 * 200)[1]
self['std_kappa'] = np.sqrt(self['var_kappa'])
self['std_kappa0'] = np.sqrt(self['var_kappa0'])
self['z_value'] = self['kappa'] / self['std_kappa0']
self['pvalue_one_sided'] = stats.norm.sf(self['z_value'])
self['pvalue_two_sided'] = stats.norm.sf(np.abs(self['z_value'])) * 2
delta = stats.norm.isf(self['alpha']) * self['std_kappa']
self['kappa_low'] = self['kappa'] - delta
self['kappa_upp'] = self['kappa'] + delta
self['distribution_kappa'] = stats.norm(loc=self['kappa'],
scale=self['std_kappa'])
self['distribution_zero_null'] = stats.norm(loc=0,
scale=self['std_kappa0'])
def __str__(self):
return self.template % self
| [
"randybrown18@me.com"
] | randybrown18@me.com |
20bc07fa1e2d06f6313b56656a9fa09a69569d74 | b5c220fdc79864004fc6b0868156c480355cf6cd | /bugtracker2/tickets/models.py | 45f6e6fdb5d880f37032b9320a0ce2d7785946cd | [] | no_license | mander5/BugTracker | 82ed20e046cdc73497c39d4dee73816288cd86a3 | 183ee19a4693164ddb7cd628b57d585e36af02b2 | refs/heads/master | 2022-08-16T01:53:23.439920 | 2020-06-02T16:23:45 | 2020-06-02T16:23:45 | 267,913,860 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,009 | py | from django.db import models
from django.urls import reverse
from django.conf import settings
import misaka
from projects.models import Project
# Create your models here.
from django.contrib.auth import get_user_model
User = get_user_model()
class Ticket(models.Model):
user = models.ForeignKey(User,related_name='tickets', on_delete=models.CASCADE)
created_at = models.DateTimeField(auto_now=True)
message = models.TextField()
message_html = models.TextField(editable=False)
project = models.ForeignKey(Project, related_name='tickets', null=True, blank=True, on_delete=models.CASCADE)
def __str__(self):
return self.message
def save(self,*args,**kwargs):
self.message_html = misaka.html(self.message)
super().save(*args,**kwargs)
def get_absolute_url(self):
return reverse('tickets:single',kwargs={'username':self.user.username,'pk':self.pk})
class Meta:
ordering = ['-created_at']
unique_together = ['user','message']
| [
"64420595+mander5@users.noreply.github.com"
] | 64420595+mander5@users.noreply.github.com |
a561294f414cdede2677f634fa2f2688d79bdc8d | ef187d259d33e97c7b9ed07dfbf065cec3e41f59 | /work/atcoder/abc/abc042/A/answers/14790_tomatohakase.py | 9e999407dd318c9db54c654161c95688f0f3533a | [] | no_license | kjnh10/pcw | 847f7295ea3174490485ffe14ce4cdea0931c032 | 8f677701bce15517fb9362cc5b596644da62dca8 | refs/heads/master | 2020-03-18T09:54:23.442772 | 2018-07-19T00:26:09 | 2018-07-19T00:26:09 | 134,586,379 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 234 | py | cond = [int(n) for n in input().split(" ")]
five,seven = 0,0
for c in cond:
if c == 5:
five += 1
elif c == 7:
seven += 1
else:
if five == 2 and seven == 1:
print("YES")
else:
print("NO") | [
"kojinho10@gmail.com"
] | kojinho10@gmail.com |
8ce19e4677ee96e3ff5e62ff2ebc87cd99f3bf5e | 13426e3cbd9f2ec27fd58571f313a49ed6a2d1f9 | /todo/models.py | 2cf7afadd2168328b3866c622f13bda6f8f90245 | [] | no_license | abuhijleh/pipeline-test | 9d326cd42c397d0930c30d722401584bc7432c4e | 6582c629595c3aef86e8c7c91794811a1f343320 | refs/heads/master | 2023-08-02T16:25:35.514425 | 2021-03-25T10:51:46 | 2021-03-25T10:51:46 | 324,395,376 | 0 | 1 | null | 2021-09-22T19:41:08 | 2020-12-25T16:25:12 | Python | UTF-8 | Python | false | false | 271 | py | from django.db import models
# Create your models here.
class Item(models.Model):
name = models.CharField(max_length=50, null=False, blank=False)
done = models.BooleanField(default=False, null=False, blank=False)
def __str__(self):
return self.name
| [
"abuhijleh@abuhijleh.net"
] | abuhijleh@abuhijleh.net |
7ca6a1f200f35c09c861cf3bb0cefddde58fbcbe | 9659ebdfb1a6f5e5691a880bb5768d1fa5a52215 | /HW07/provision.py | d9bc35279529d6a6284f86fd549b03d882251746 | [] | no_license | timpson78/otus-linux | 5119ecf41d8f77df3a6673b9d19be43cce97d7f4 | 0bf64b2f3b2863a35f382fe2ea2a7bb50382f99b | refs/heads/master | 2020-12-30T06:25:01.562159 | 2020-03-26T11:41:26 | 2020-03-26T11:41:26 | 238,891,726 | 0 | 0 | null | 2020-02-07T09:58:21 | 2020-02-07T09:58:20 | null | UTF-8 | Python | false | false | 734 | py | #!/usr/bin/python
import os
os.system('setenforce Permissive')
os.system('yum install epel-release -y && yum install spawn-fcgi php php-cli mod_fcgid httpd -y')
os.system('cp ./opt/* /opt/')
os.system('chmod 755 /opt/watchlog.sh')
os.system('cp ./var/log/* /var/log/')
os.system('cp ./etc/sysconfig/* /etc/sysconfig/')
os.system('cp -f ./etc/systemd/system/* /etc/systemd/system/')
os.system('cp -f ./etc/rc.d/init.d/* /etc/rc.d/init.d/')
os.system('cp -f ./etc/httpd/conf/* /etc/httpd/conf/')
os.system('cp -f ./usr/lib/systemd/system/* /usr/lib/systemd/system/')
os.system('systemctl start watchlog.timer')
os.system('systemctl start spawn-fcgi')
os.system('systemctl start httpd@first')
os.system('systemctl start httpd@second')
| [
"kubkredit@mail.ru"
] | kubkredit@mail.ru |
131e0c45e5f19bb98323d41650bdd1da3ce8557f | 6495942c22be240bb138a72f02519ec1f16cc976 | /jumeg_cfg_gui01.py | 55fc7eb263467c771a479b7faf0b963d1fd969e9 | [
"BSD-3-Clause"
] | permissive | fboers/JuMEGwxGUI | ba29f8070b6a751365bd23e5a92efd26a2379e00 | 0f2c3b8a997338cd3bda48f84a42a9b012dee05c | refs/heads/master | 2020-08-30T02:05:50.622339 | 2019-10-29T10:59:59 | 2019-10-29T10:59:59 | 218,231,556 | 0 | 0 | BSD-3-Clause | 2019-10-29T11:00:00 | 2019-10-29T07:48:26 | null | UTF-8 | Python | false | false | 4,573 | py | #!/usr/bin/env python3
# -+-coding: utf-8 -+-
"""
"""
#--------------------------------------------
# Authors: Frank Boers <f.boers@fz-juelich.de>
#
#--------------------------------------------
# Date: 29.10.19
#--------------------------------------------
# License: BSD (3-clause)
#--------------------------------------------
# Updates
#--------------------------------------------
import os,sys,argparse
import wx
from jumeg_base_config import JuMEG_CONFIG_YAML_BASE
import logging
from jumeg.base import jumeg_logger
logger = logging.getLogger('jumeg')
__version__="2019-10-29-001"
class CtrlPanel(wx.Panel):
def __init__(self,parent,**kwargs):
super().__init__(parent)
self._wx_init(**kwargs)
self._ApplyLayout()
@property
def cfg(self): return self._CFG
def _wx_init(self,**kwargs):
self.SetBackgroundColour(wx.GREEN)
#--- load cfg
self._CFG = JuMEG_CONFIG_YAML_BASE(**kwargs)
self._CFG.update(**kwargs)
#--- init panel
self._pnl1 = wx.Panel(self)
self._pnl1.SetBackgroundColour(wx.BLUE)
#--- init show button
self._bt = wx.Button(self,label="Show Config",name="BT_INFO")
self.Bind(wx.EVT_BUTTON,self.ClickOnButton)
def _ApplyLayout(self):
LEA = wx.LEFT|wx.EXPAND|wx.ALL
vbox = wx.BoxSizer(wx.VERTICAL)
#---
st1 = wx.StaticLine(self)
st1.SetBackgroundColour("GREY85")
st2 = wx.StaticLine(self)
st2.SetBackgroundColour("GREY80")
vbox.Add(st1,0,LEA,1)
vbox.Add(self._pnl1,1,LEA,1)
vbox.Add(st2,0,LEA,1)
vbox.Add(self._bt,0,LEA,2)
self.SetAutoLayout(True)
self.SetSizer(vbox)
self.Fit()
self.Layout()
def ClickOnButton(self,evt):
obj = evt.GetEventObject()
if obj.GetName() == "BT_INFO":
self.cfg.info()
class MainWindow(wx.Frame):
def __init__(self, parent, title, **kwargs):
wx.Frame.__init__(self, parent, -1,title=title)
self._wx_init(**kwargs)
def _update_from_kwargs(self,**kwargs):
pass
def _wx_init(self,**kwargs):
w,h = wx.GetDisplaySize()
self.SetSize(w/4.0,h/3.0)
self.Center()
self._update_from_kwargs(**kwargs)
#--- init STB in a new CLS
self._STB = self.CreateStatusBar()
self._PNL = CtrlPanel(self,**kwargs)
#--- ToDo init OnClose
#self.Bind(wx.EVT_CLOSE, self.ClickOnClose)
#---
def run(opt):
if opt.debug:
opt.verbose = True
opt.debug = True
opt.path = "./config/"
opt.config = "test_config.yaml"
app = wx.App()
if opt.path:
cfg = os.path.join((opt).path,opt.config)
else:
cfg = opt.config
frame = MainWindow(None,'JuMEG Config',config=cfg,verbose=opt.verbose,debug=opt.debug)
frame.Show()
app.MainLoop()
#----
def get_args(argv):
info_global = """
JuMEG Config GUI Start Parameter
---> view time series data FIF file
jumeg_cfg_gui01.py --config=test_config.yaml --path=./config -v
"""
parser = argparse.ArgumentParser(info_global)
parser.add_argument("-p","--path",help="config file path")
parser.add_argument("-cfg","--config",help="config file name")
parser.add_argument("-v","--verbose",action="store_true",help="verbose mode")
parser.add_argument("-d","--debug",action="store_true",help="debug mode")
#--- init flags
# ck if flag is set in argv as True
# problem can not switch on/off flag via cmd call
opt = parser.parse_args()
for g in parser._action_groups:
for obj in g._group_actions:
if str(type(obj)).endswith('_StoreTrueAction\'>'):
if vars(opt).get(obj.dest):
opt.__dict__[obj.dest] = False
for flg in argv:
if flg in obj.option_strings:
opt.__dict__[obj.dest] = True
break
return opt,parser
#=========================================================================================
#==== MAIN
#=========================================================================================
if __name__ == "__main__":
opt,parser = get_args(sys.argv)
if len(sys.argv) < 2:
parser.print_help()
sys.exit(-1)
jumeg_logger.setup_script_logging(name=sys.argv[0],opt=opt,logger=logger)
run(opt) | [
"f.boers@fz-juelich.de"
] | f.boers@fz-juelich.de |
da2088e7e4204d7b3ab24fc6b5acd9b307c2b56c | 50fb25631cdc03a868f09061e76f4dedf85f2d3f | /crawler_sys/hot_words_crawler/crawler_toutiao.py | 6cec79d644d7641382dfe21fbc22ef52402c8fe2 | [] | no_license | silade/crawler | 20a88c0eb6471f79a5d5daf947dcbff681d11e6e | fbfe3c4feca8be61186aec986b600b36f513f7f4 | refs/heads/main | 2023-03-10T10:06:21.097103 | 2021-02-19T16:00:45 | 2021-02-19T16:00:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 20,406 | py | # -*- coding:utf-8 -*-
# @Time : 2020/3/3 14:51
# @Author : litao
# -*- coding:utf-8 -*-
# @Time : 2020/3/2 16:37
# @Author : litao
# -*- coding:utf-8 -*-
# @Time : 2020/3/2 11:07
# @Author : litao
# -*- coding:utf-8 -*-
# @Time : 2020/2/28 12:09
# @Author : litao
import requests
import json, re, datetime, urllib
from crawler.crawler_sys.utils.output_results import retry_get_url
from crawler.crawler_sys.utils.output_results import hot_words_output_result, output_result
from crawler.crawler_sys.utils.trans_strtime_to_timestamp import trans_strtime_to_timestamp
from crawler.crawler_sys.utils.trans_str_play_count_to_int import trans_play_count
from write_data_into_es.func_cal_doc_id import *
import random
from urllib.parse import parse_qs, urlparse
class Crawler_toutiao(object):
def __init__(self):
self.platform = "toutiao"
self.headers = {
"Host": "i.snssdk.com",
"Connection": "keep-alive",
"Accept": "application/json, text/javascript",
"X-Requested-With": "XMLHttpRequest",
"User-Agent": "Mozilla/5.0 (Linux; Android 5.1.1; OPPO R11 Build/NMF26X; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/74.0.3729.136 Mobile Safari/537.36 JsSdk/2 NewsArticle/7.6.3 NetType/wifi",
"Content-Type": "application/x-www-form-urlencoded",
"Referer": "https://i.snssdk.com/feoffline/hot_list/template/hot_list/?fe_api_version=2&is_in_channel=1&count=50&fe_source=hot_board&tab_name=tab_hot_board&is_web_refresh=1&style_type=18&client_extra_params=%7B%22hot_board_source%22%3A%22hot_board%22%7D&extra=%7B%22CardStyle%22%3A0%2C%22JumpToWebList%22%3Atrue%7D&category=hot_board&stream_api_version=88&tt_daymode=1&iid=105857671701&device_id=70787469432&ac=wifi&mac_address=48%3AA4%3A72%3A58%3A86%3AD5&channel=store_yingyonghui_0107&aid=13&app_name=news_article&version_code=763&version_name=7.6.3&device_platform=android&ab_version=801968%2C1419043%2C668775%2C1462526%2C1512584%2C1190522%2C1489307%2C1157750%2C1157634%2C1419598%2C1493796%2C1439625%2C1469498%2C668779%2C1417597%2C662099%2C1403340%2C668774%2C1509255%2C1396151%2C821967%2C857803%2C660830%2C1434501%2C662176%2C1491631&ab_feature=102749%2C94563&device_type=OPPO+R11&device_brand=OPPO&language=zh&os_api=22&os_version=5.1.1&uuid=866174725888628&openudid=48a4725886d57203&manifest_version_code=7630&resolution=900*1600&dpi=320&update_version_code=76309&_rticket=1583217846540&plugin=0&tma_jssdk_version=1.54.0.3&rom_version=coloros__r11-user+5.1.1+nmf26x+500200210+release-keys&cdid=754b9ff9-5880-48b2-ac40-3880effd3f33",
"Accept-Encoding": "gzip, deflate",
"Accept-Language": "zh-CN,zh;q=0.9,en-US;q=0.8,en;q=0.7",
}
def get_hot_words(self):
bulk_list = []
url = "https://i.snssdk.com/api/feed/hotboard_online/v1/?fe_api_version=2&is_in_channel=1&count=50&fe_source=hot_board&tab_name=tab_hot_board&is_web_refresh=1&style_type=18&client_extra_params=%7B%22hot_board_source%22%3A%22hot_board%22%2C%22fe_version%22%3A%22v11%22%7D&extra=%7B%22CardStyle%22%3A0%2C%22JumpToWebList%22%3Atrue%7D&category=hotboard_online&stream_api_version=88&tt_daymode=1&iid=105857671701&device_id=70787469432&ac=wifi&mac_address=48%3AA4%3A72%3A58%3A86%3AD5&channel=store_yingyonghui_0107&aid=13&app_name=news_article&version_code=763&version_name=7.6.3&device_platform=android&ab_version=801968%2C1419043%2C668775%2C1462526%2C1512584%2C1190522%2C1489307%2C1157750%2C1157634%2C1419598%2C1493796%2C1439625%2C1469498%2C668779%2C1417597%2C662099%2C1403340%2C668774%2C1509255%2C1396151%2C821967%2C857803%2C660830%2C1434501%2C662176%2C1491631&ab_feature=102749%2C94563&device_type=OPPO%2BR11&device_brand=OPPO&language=zh&os_api=22&os_version=5.1.1&uuid=866174725888628&openudid=48a4725886d57203&manifest_version_code=7630&resolution=900*1600&dpi=320&update_version_code=76309&plugin=0&tma_jssdk_version=1.54.0.3&rom_version=coloros__r11-user%2B5.1.1%2Bnmf26x%2B500200210%2Brelease-keys&cdid=754b9ff9-5880-48b2-ac40-3880effd3f33"
page_res = retry_get_url(url, headers=self.headers, proxies=3, timeout=5)
page_json = page_res.json()
for data in page_json["data"]:
contect = data["content"]
data = json.loads(contect)
schema = data["raw_data"]["schema"]
# search_str = urllib.parse.unquote(schema)
query = urlparse(schema).query # wd=python&ie=utf-8
params = parse_qs(query) # {'wd': ['python'], 'ie': ['utf-8']}
"""所得的字典的value都是以列表的形式存在,若列表中都只有一个值"""
result = {key: params[key][0] for key in params}
search_title = result.get("keyword")
# search_json = result.get("search_json")
if search_title:
dic = {
"platform": self.platform,
"title": search_title,
"fetch_time": int(datetime.datetime.now().timestamp() * 1e3),
"search_json": schema
}
bulk_list.append(dic)
hot_words_output_result(bulk_list)
return True
def search_page(self, title=None, search_json=None, **kwargs):
data_list = []
timestamp = int(datetime.datetime.now().timestamp() * 1e3)
title = urllib.parse.quote(title)
headers = {
"Accept-Encoding": "gzip",
# "X-SS-REQ-TICKET": "1587102750860",
"passport-sdk-version": "14",
"sdk-version": "2",
#"Cookie": "odin_tt=d5d96b2812637e9d20681530fbbe4d52e8f76ae1b6afa8c0a173260321611c507ac6eca10991b21fc4f023e94371d457df784f959e94db673ef29a5bd2137091; qh[360]=1; history=alrvlFic6pJZXJCTWBmSmZt6KW6mevZSz5LU3OJ7DEKX42Zw%2Bc84wMR3iYGBweFy3EzZsPcNTLyXWN1AvLYP8%2BQPMLFfEpUA8bo%2F7nNtYOK7xNwC4k3XmMHe5MtzSTiM48DluNr01dkNTDyXuHrApsi4ejkwsV%2BSmAPmSeXoMzDxXhKcAuIVrRfWAJnJJwA25fG1DoezvFBTZrzZeg6kT%2BwWSG7Gx3UJB5h4L%2FH4gXlVn%2BtAtkvFMQRcjpv%2B%2Be9TBib2S%2BwcYBuUn8xsYGK%2FJKMAkptgfXrDASaOS4yHQHJVPy6UOjDxXuI4BeJN26Fs6MDEcYn%2FEoMDAAAA%2F%2F8%3D; install_id=112651077855; ttreq=1$0b37d53ca5c301ce96959dc97a67886da420b294",
# "X-Gorgon": "0401007140017aae019cc2020b1c48dbab0ba42839014487648a",
#"X-Khronos": "1587102750",
"Host": "is.snssdk.com",
"Connection": "Keep-Alive",
"User-Agent": "okhttp/3.10.0.1",
}
url = "https://is.snssdk.com/api/search/content/?os_api=23&device_type=oneplus+a5010&from_search_subtab=synthesis&manifest_version_code=7690&source=search_subtab_switch&offset=0&is_ttwebview=0&action_type&is_incognito=0&keyword_type&rom_version=23&app_name=news_article&format=json&version_name=7.6.9&ac=wifi&host_abi=armeabi-v7a&update_version_code=76909&channel=baidu_0411&is_native_req=1&loadId=1&longitude=116.40717530841052&isIncognito=0&plugin=2050&forum=1&latitude=39.904680919672145&language=zh&pd=video&cur_tab_title=search_tab&aid=13&dpi=270&qrecImprId&fetch_by_ttnet=1&count=10&plugin_enable=3&search_position&ab_group=100167%2C94569%2C102754&keyword={0}&scm_version=1.0.2.830&search_json=%7B%22comment_ids%22%3A%5B%5D%2C%22event_discussion%22%3A74123%2C%22event_impression%22%3A17270790%2C%22forum_id%22%3A1664181806902302%2C%22forum_recall_wtt%22%3A%5B1664190666034183%2C1664192273575943%2C1664184430218253%2C1664185769175051%2C1664184985139212%2C1664196237152267%2C1664186792648732%2C1664188755414019%2C1664187055838215%2C1664184182571022%2C1664185938950148%2C1664188041995268%2C1664188322863172%2C1664190185024520%2C1664185602828300%2C1664184276484099%2C1664188211399684%2C1664187870713868%2C1664184484958211%2C1664183864289288%2C1664186825487371%2C1664195548700686%2C1664186585780228%2C1664197296210947%2C1664188146725901%2C1664191748459523%5D%2C%22group_source%22%3Anull%2C%22hot_gid%22%3A6816255461172445703%2C%22log_pb%22%3A%7B%22cluster_type%22%3A%220%22%2C%22entrance_hotspot%22%3A%22channel%22%2C%22hot_board_cluster_id%22%3A%226816091697949180424%22%2C%22hot_board_impr_id%22%3A%22202004171352010100140411610B1A7741%22%2C%22location%22%3A%22hot_board%22%2C%22rank%22%3A%225%22%2C%22source%22%3A%22trending_tab%22%2C%22style_id%22%3A%2210005%22%7D%2C%22mix_stick_ids%22%3A%5B1664190666034183%2C1664192273575943%2C1664184430218253%2C1664185769175051%2C1664184985139212%2C1664196237152267%2C1664186792648732%2C1664188755414019%2C1664187055838215%2C1664184182571022%2C1664185938950148%2C1664188041995268%2C1664188322863172%2C1664190185024520%2C1664185602828300%2C1664184276484099%2C1664188211399684%2C1664187870713868%2C1664184484958211%2C1664183864289288%2C1664186825487371%2C1664195548700686%2C1664186585780228%2C1664197296210947%2C1664188146725901%2C1664191748459523%5D%2C%22stick_group_ids%22%3A%5B%5D%7D&device_platform=android&search_id&has_count=0&version_code=769&from=video&device_id={1}&resolution=1080*1920&os_version=6.0.1&device_brand=Oneplus&search_sug=1&qc_query".format(
title,random.randint(69418800000,69418899999))
res = retry_get_url(url, headers=headers, timeout=5, proxies=3)
page_text = res.json()
for one_video in page_text["data"]:
video_dic = {}
try:
video_dic['title'] = one_video.get('title')
video_dic['url'] = one_video.get('display').get("info").get("url")
releaser_id = re.findall("user_id=(\d+)", one_video.get('user_source_url'))[0]
video_dic['releaser'] = one_video.get('media_name')
video_dic['releaserUrl'] = "https://www.toutiao.com/c/user/%s/" % releaser_id
release_time = int(one_video.get('create_time'))
video_dic['release_time'] = int(release_time * 1e3)
video_dic['duration'] = int(one_video.get('video_duration'))
video_dic['play_count'] = trans_play_count(one_video.get('play_effective_count'))
video_dic['repost_count'] = 0
video_dic['comment_count'] = one_video.get('comment_count')
video_dic['favorite_count'] = one_video.get('digg_count')
video_dic['fetch_time'] = int(datetime.datetime.now().timestamp() * 1e3)
video_dic['releaser_id_str'] = "toutiao_%s" % releaser_id
video_dic['video_img'] = one_video.get('display').get('self_info').get('image_url')
video_dic['platform'] = "toutiao"
video_dic["is_hot"] = 1
video_dic["data_provider"] = "CCR"
except Exception as e:
print(e)
continue
data_list.append(video_dic)
output_result(result_Lst=data_list,
platform=self.platform,
output_to_es_raw=True,
)
data_list.clear()
def search_short_video_page(self, title=None, search_json=None, **kwargs):
data_list = []
timestamp = int(datetime.datetime.now().timestamp() * 1e3)
title = urllib.parse.quote(title)
headers = {
"Accept-Encoding": "gzip",
# "X-SS-REQ-TICKET": "1587103224961",
"passport-sdk-version": "14",
"sdk-version": "2",
#"Cookie": "odin_tt=d5d96b2812637e9d20681530fbbe4d52e8f76ae1b6afa8c0a173260321611c507ac6eca10991b21fc4f023e94371d457df784f959e94db673ef29a5bd2137091; qh[360]=1; history=alrvlFic6pJZXJCTWBmSmZt6KW6mevZSz5LU3OJ7DEKX42Zw%2Bc84wMR3iYGBweFy3EzZsPcNTLyXWN1AvLYP8%2BQPMLFfEpUA8bo%2F7nNtYOK7xNwC4k3XmMHe5MtzSTiM48DluNr01dkNTDyXuHrApsi4ejkwsV%2BSmAPmSeXoMzDxXhKcAuIVrRfWAJnJJwA25fG1DoezvFBTZrzZeg6kT%2BwWSG7Gx3UJB5h4L%2FH4gXlVn%2BtAtkvFMQRcjpv%2B%2Be9TBib2S%2BwcYBuUn8xsYGK%2FJKMAkptgfXrDASaOS4yHQHJVPy6UOjDxXuI4BeJN26Fs6MDEcYn%2FEoMDAAAA%2F%2F8%3D; install_id=112651077855; ttreq=1$0b37d53ca5c301ce96959dc97a67886da420b294",
# "X-Gorgon": "0401e08b4001a628dcf96b16d01278ad842e915d905b213dc48f",
# "X-Khronos": "1587103224",
"Host": "is.snssdk.com",
"Connection": "Keep-Alive",
"User-Agent": "okhttp/3.10.0.1",
}
url = "https://is.snssdk.com/api/search/content/?os_api=23&device_type=oneplus%2Ba5010&from_search_subtab=video&manifest_version_code=7690&source=search_subtab_switch&offset=0&is_ttwebview=0&uuid=440000000189785&action_type&is_incognito=0&keyword_type&rom_version=23&app_name=news_article&format=json&version_name=7.6.9&ac=wifi&host_abi=armeabi-v7a&update_version_code=76909&channel=baidu_0411&is_native_req=1&loadId=1&longitude=113.40717530841052&isIncognito=0&plugin=2050&openudid=e44cc0264b92bcbf&forum=1&latitude=39.904680919672145&search_start_time=1587102733626&language=zh&pd=xiaoshipin&cur_tab_title=search_tab&aid=13&pos=5r_-9Onkv6e_eBEKeScxeCUfv7G_8fLz-vTp6Pn4v6esrKuzqa2qrKqorq2lqaytqK-xv_H86fTp6Pn4v6eupLOkramrpa2krKSrqq-sqaixv_zw_O3e9Onkv6e_eBEKeScxeCUfv7G__PD87dHy8_r06ej5-L-nrKyrs6mtqqyqqK6tpamsraivsb_88Pzt0fzp9Ono-fi_p66ks6StqaulraSspKuqr6ypqOA%253D&dpi=270&qrecImprId&fetch_by_ttnet=1&count=10&plugin_enable=3&search_position&ab_group=100167%252C94569%252C102754&keyword={0}&scm_version=1.0.2.830&search_json=%257B%2522comment_ids%2522%253A%255B%255D%252C%2522event_discussion%2522%253A74123%252C%2522event_impression%2522%253A17270790%252C%2522forum_id%2522%253A1664181806902302%252C%2522forum_recall_wtt%2522%253A%255B1664190666034183%252C1664192273575943%252C1664184430218253%252C1664185769175051%252C1664184985139212%252C1664196237152267%252C1664186792648732%252C1664188755414019%252C1664187055838215%252C1664184182571022%252C1664185938950148%252C1664188041995268%252C1664188322863172%252C1664190185024520%252C1664185602828300%252C1664184276484099%252C1664188211399684%252C1664187870713868%252C1664184484958211%252C1664183864289288%252C1664186825487371%252C1664195548700686%252C1664186585780228%252C1664197296210947%252C1664188146725901%252C1664191748459523%255D%252C%2522group_source%2522%253Anull%252C%2522hot_gid%2522%253A6816255461172445703%252C%2522log_pb%2522%253A%257B%2522cluster_type%2522%253A%25220%2522%252C%2522entrance_hotspot%2522%253A%2522channel%2522%252C%2522hot_board_cluster_id%2522%253A%25226816091697949180424%2522%252C%2522hot_board_impr_id%2522%253A%2522202004171352010100140411610B1A7741%2522%252C%2522location%2522%253A%2522hot_board%2522%252C%2522rank%2522%253A%25225%2522%252C%2522source%2522%253A%2522trending_tab%2522%252C%2522style_id%2522%253A%252210005%2522%257D%252C%2522mix_stick_ids%2522%253A%255B1664190666034183%252C1664192273575943%252C1664184430218253%252C1664185769175051%252C1664184985139212%252C1664196237152267%252C1664186792648732%252C1664188755414019%252C1664187055838215%252C1664184182571022%252C1664185938950148%252C1664188041995268%252C1664188322863172%252C1664190185024520%252C1664185602828300%252C1664184276484099%252C1664188211399684%252C1664187870713868%252C1664184484958211%252C1664183864289288%252C1664186825487371%252C1664195548700686%252C1664186585780228%252C1664197296210947%252C1664188146725901%252C1664191748459523%255D%252C%2522stick_group_ids%2522%253A%255B%255D%257D&device_platform=android&search_id&has_count=0&version_code=769&mac_address=08%253A00%253A27%253A1F%253A7E%253AA0&from=xiaoshipin&device_id={1}&resolution=810*1440&os_version=6.0.1&device_brand=Oneplus&search_sug=1&qc_query".format(
title,random.randint(69418800000,69418899999))
res = retry_get_url(url, headers=headers, timeout=5, proxies=3)
page_text = res.json()
for one_video in page_text["data"]:
video_dic = {}
try:
one_video = one_video["raw_data"]
video_dic['title'] = one_video.get('title')
video_dic['url'] = one_video.get('share').get("share_url")
releaser_id = one_video.get('user').get("info").get("user_id")
video_dic['releaser'] = one_video.get('user').get("info").get("name")
video_dic['releaserUrl'] = "https://www.toutiao.com/c/user/%s/" % releaser_id
release_time = int(one_video.get('create_time'))
video_dic['release_time'] = int(release_time * 1e3)
video_dic['duration'] = int(one_video.get('video').get("duration"))
video_dic['play_count'] = one_video.get('action').get("play_count")
video_dic['repost_count'] = one_video.get('action').get("share_count")
video_dic['comment_count'] = one_video.get('action').get("comment_count")
video_dic['favorite_count'] = one_video.get('action').get("digg_count")
video_dic['fetch_time'] = int(datetime.datetime.now().timestamp() * 1e3)
video_dic['releaser_id_str'] = "toutiao_%s" % releaser_id
video_dic['video_img'] = one_video.get('video').get('origin_cover').get('url_list')[0]
video_dic['platform'] = "toutiao"
if "iesdouyin" in video_dic['url']:
video_dic['releaserUrl'] = "https://www.douyin.com/share/user/%s/" % releaser_id
video_dic['platform'] = "抖音"
video_dic['releaser_id_str'] = "抖音_%s" % releaser_id
video_dic['play_count'] = 0
video_dic["is_hot"] = 1
video_dic["data_provider"] = "CCR"
except:
continue
data_list.append(video_dic)
output_result(result_Lst=data_list,
platform=self.platform,
output_to_es_raw=True,
)
data_list.clear()
def get_hot_videos(self, *args, **kwargs):
self.search_page(*args, **kwargs)
self.search_short_video_page(*args, **kwargs)
if __name__ == "__main__":
crawler = Crawler_toutiao()
crawler.get_hot_words()
# crawler.search_page("山东 火线提拔",
# 'sslocal://search?disable_record_history=true&from=trending_tab&hot_board_cluster_id=1654391459809283&hot_board_impr_id=202003031451280100150450182605F63D&keyword=%23%E5%B1%B1%E4%B8%9C%20%E7%81%AB%E7%BA%BF%E6%8F%90%E6%8B%94%23&search_json=%7B%22comment_ids%22%3A%5B%5D%2C%22event_discussion%22%3A116635%2C%22event_impression%22%3A27176007%2C%22forum_id%22%3A1660114103413768%2C%22forum_recall_wtt%22%3A%5B1660117192554499%2C1660119602787340%2C1660116731441155%2C1660122355505164%2C1660117380056075%2C1660120151918599%2C1660120404429828%2C1660119017074695%2C1660122046612493%2C1660124327045131%2C1660116231574535%5D%2C%22group_source%22%3Anull%2C%22hot_gid%22%3A6799674340544610823%2C%22log_pb%22%3A%7B%22hot_board_cluster_id%22%3A%221654391459809283%22%2C%22hot_board_impr_id%22%3A%22202003031451280100150450182605F63D%22%2C%22source%22%3A%22trending_tab%22%7D%2C%22mix_stick_ids%22%3A%5B1660117192554499%2C1660119602787340%2C1660116731441155%2C1660122355505164%2C1660117380056075%2C1660120151918599%2C1660120404429828%2C1660119017074695%2C1660122046612493%2C1660124327045131%2C1660116231574535%5D%2C%22stick_group_ids%22%3A%5B%5D%7D&source=trending_tab')
# crawler.search_short_video_page("山东 火线提拔",
# 'sslocal://search?disable_record_history=true&from=trending_tab&hot_board_cluster_id=1654391459809283&hot_board_impr_id=202003031451280100150450182605F63D&keyword=%23%E5%B1%B1%E4%B8%9C%20%E7%81%AB%E7%BA%BF%E6%8F%90%E6%8B%94%23&search_json=%7B%22comment_ids%22%3A%5B%5D%2C%22event_discussion%22%3A116635%2C%22event_impression%22%3A27176007%2C%22forum_id%22%3A1660114103413768%2C%22forum_recall_wtt%22%3A%5B1660117192554499%2C1660119602787340%2C1660116731441155%2C1660122355505164%2C1660117380056075%2C1660120151918599%2C1660120404429828%2C1660119017074695%2C1660122046612493%2C1660124327045131%2C1660116231574535%5D%2C%22group_source%22%3Anull%2C%22hot_gid%22%3A6799674340544610823%2C%22log_pb%22%3A%7B%22hot_board_cluster_id%22%3A%221654391459809283%22%2C%22hot_board_impr_id%22%3A%22202003031451280100150450182605F63D%22%2C%22source%22%3A%22trending_tab%22%7D%2C%22mix_stick_ids%22%3A%5B1660117192554499%2C1660119602787340%2C1660116731441155%2C1660122355505164%2C1660117380056075%2C1660120151918599%2C1660120404429828%2C1660119017074695%2C1660122046612493%2C1660124327045131%2C1660116231574535%5D%2C%22stick_group_ids%22%3A%5B%5D%7D&source=trending_tab')
| [
"593516104@qq.com"
] | 593516104@qq.com |
fc0532bcf40eaac7f3f64dbe0d5bd189c7dbc722 | c3d45539a1334e81af02f7a4dde89fe7943b61ab | /jsontut.py | cdabc57346ce1265db590e5dca51b331f0b93307 | [] | no_license | Total-tech/Pthon | fdeb0c217eb9aeabff95e89507060e2438364788 | 69aa4b120a9b555af0ce72e34ee277d9c23eee15 | refs/heads/master | 2022-11-10T01:51:05.786976 | 2020-06-26T14:18:45 | 2020-06-26T14:18:45 | 275,146,158 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 100 | py | import json
data = '{"var1":"harry","var2":"yovi"}'
parsed = json.loads(data)
print(parsed['var1']) | [
"anant.yovi2@gmail.com"
] | anant.yovi2@gmail.com |
57c882e9c375aedf3dda38c1aab0835d0396ef37 | e4e7116b205ce79526ec2246c94b165748ab461a | /addition.py | b0856a06505f8545aa8725a41d4f63e201c1e537 | [
"MIT"
] | permissive | pooja-solaikannu/jubilant-garbanzo | b96174aba11db835fc8e3681182e846fb21deebd | c6c3b46970b3fee2da5f660562097d00709ec4e2 | refs/heads/master | 2023-06-05T03:37:42.450101 | 2021-06-23T16:05:23 | 2021-06-23T16:05:23 | 379,660,659 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 53 | py | def add_two_numbers(a, b):
c = a + b
return c | [
"pooja.solaikannu@gmail.com"
] | pooja.solaikannu@gmail.com |
54e559b154612aaaf2032b3856243aa5fdf1cca0 | a914a9b5e36cfc49a1fa3fecacdbe6212dd85e02 | /Predictor V3.0/NN.py | 01492eb338252b63db2e0f827eb432aab5c54fc8 | [] | no_license | polklabs/RedditGenerator | e956f6a28141ff107de00c09a928a42351665492 | d526ddc9e46689bc9ec655ac73e5896fd0462d50 | refs/heads/master | 2020-04-16T01:43:17.757830 | 2019-01-11T05:37:00 | 2019-01-11T05:37:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,378 | py | import numpy as np
from keras.models import Sequential
from keras.layers import Dense, LSTM, TimeDistributed, Activation
import math
DATA_DIR = 'data/reddit.txt'
SEQ_LENGTH = 200
HIDDEN_DIM = 700
LAYER_NUM = 3
BATCH_SIZE = 128
GENERATE_LENGTH = 200
data = open(DATA_DIR, 'r', encoding='utf-8').read()
chars = sorted(list(set(data)))
VOCAB_SIZE = len(chars)
print('Vocab size: '+str(VOCAB_SIZE))
ix_to_char = {ix:char for ix, char in enumerate(chars)}
char_to_ix = {char:ix for ix, char in enumerate(chars)}
def generate_text(model, length=100):
ix = [np.random.randint(VOCAB_SIZE)]
y_char = [ix_to_char[ix[-1]]]
X = np.zeros((1, length, VOCAB_SIZE))
for i in range(length):
X[0, i, :][ix[-1]] = 1
print(ix_to_char[ix[-1]], end="")
ix = np.argmax(model.predict(X[:, :i+1, :])[0], 1)
y_char.append(ix_to_char[ix[-1]])
return ('').join(y_char)
X = np.zeros((int(len(data)/SEQ_LENGTH), SEQ_LENGTH, VOCAB_SIZE))
y = np.zeros((int(len(data)/SEQ_LENGTH), SEQ_LENGTH, VOCAB_SIZE))
for i in range(0, int(math.floor(len(data)/SEQ_LENGTH))):
X_sequence = data[i*SEQ_LENGTH:(i+1)*SEQ_LENGTH]
X_sequence_ix = [char_to_ix[value] for value in X_sequence]
input_sequence = np.zeros((SEQ_LENGTH, VOCAB_SIZE))
for j in range(SEQ_LENGTH):
input_sequence[j][X_sequence_ix[j]] = 1
X[i] = input_sequence
y_sequence = data[i*SEQ_LENGTH+1:(i+1)*SEQ_LENGTH+1]
y_sequence_ix = [char_to_ix[value] for value in y_sequence]
target_sequence = np.zeros((SEQ_LENGTH, VOCAB_SIZE))
for j in range(SEQ_LENGTH-1):
target_sequence[j][y_sequence_ix[j]] = 1
y[i] = target_sequence
model = Sequential()
model.add(LSTM(HIDDEN_DIM, input_shape=(None, VOCAB_SIZE), return_sequences=True))
for i in range(LAYER_NUM - 1):
model.add(LSTM(HIDDEN_DIM, return_sequences=True))
model.add(TimeDistributed(Dense(VOCAB_SIZE)))
model.add(Activation('softmax'))
#model.load_weights('')
model.compile(loss="categorical_crossentropy", optimizer="rmsprop")
nb_epoch = 0
while True:
print('\n\n')
model.fit(X, y, batch_size=BATCH_SIZE, verbose=1, epochs=1)
nb_epoch += 1
generate_text(model, GENERATE_LENGTH)
if nb_epoch % 2 == 0:
model.save_weights('weights/reddit_{}_epoch_{}.hdf5'.format(HIDDEN_DIM, nb_epoch))
| [
"noreply@github.com"
] | polklabs.noreply@github.com |
d11f0943f08e8d318012aa36be71b8fab3149ec3 | e9cc17b38479616a28762d119f02fc348c0862f4 | /Lecture 19/Lecture19HWAssignment1.py | 15182e298533b90a261943e7aecbc551d7a7a3b8 | [
"MIT"
] | permissive | AtharvaJoshi21/PythonPOC | 8f419672480f7df974b4e75b1010c344be73c389 | 6b95eb5bab7b28e9811e43b39e863faf2ee7565b | refs/heads/master | 2020-04-16T07:39:15.256822 | 2019-05-05T15:48:01 | 2019-05-05T15:48:01 | 165,394,707 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,529 | py | # WAP to accept a filename from user and no of lines to be copied to another file. Accept inputs as cmd line params.
# - Hint use "argparse" module
import shutil
import optparse
def CopyLines(srcFilePath, destFilePath, countOfLines=0):
srcFile = open(srcFilePath)
destFile = open(destFilePath, "w")
line = srcFile.readline()
count = 0
if countOfLines == 0:
shutil.copyfileobj(srcFile, destFile)
print("Whole file copied successfully!")
else:
while line != "":
if count > countOfLines:
break
destFile.write(line)
line = srcFile.readline()
count += 1
else:
print("Whole file copied successfully!")
destFile.close()
srcFile.close()
def main():
# Invoke argument parser from argparse module
parser = optparse.OptionParser()
# Setup arguments list
# parser.add_option("")
parser.add_option("-i", type = str, help = "Input / Source File Name", dest = "input")
parser.add_option("-d", type = str, help = "Output / Destination File Name", dest = "output")
parser.add_option("-n", type = int, help = "Number of lines to be copied, default 0", dest = "numArgs", default = 0)
# Parse the arguments into list
(options, args) = parser.parse_args()
# Invoke copy function
CopyLines(options.input, options.output, options.numArgs)
# print(options['i'])
# print(args)
print("File copy successful!")
if __name__ == "__main__":
main() | [
"joshi.atharva92@hotmail.com"
] | joshi.atharva92@hotmail.com |
edc3eb522ed87cd511fbbd1f029f3376d79b1645 | 474525154a4e1d48ef5242d1f44164d05399b145 | /tensorflow_probability/python/internal/auto_composite_tensor_test.py | 5f13ff5b55d2cb0cbbe2c4094627baed2881f04d | [
"Apache-2.0"
] | permissive | svshivapuja/probability | 9855737790f74a39169688fbfec9671deef804d9 | af7ccb22d972329633530c3b754ed1f49472f6a7 | refs/heads/main | 2023-07-17T04:14:53.703622 | 2021-08-30T17:47:06 | 2021-08-30T17:47:06 | 400,983,015 | 1 | 0 | Apache-2.0 | 2021-08-29T07:51:29 | 2021-08-29T07:51:29 | null | UTF-8 | Python | false | false | 26,047 | py | # Copyright 2020 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for auto_composite_tensor."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import os
from absl import flags
from absl.testing import absltest
from absl.testing import parameterized
import tensorflow.compat.v2 as tf
import tensorflow_probability as tfp
from tensorflow_probability.python.internal import auto_composite_tensor
from tensorflow_probability.python.internal import tensor_util
from tensorflow_probability.python.internal import test_util
tf.enable_v2_behavior()
flags.DEFINE_string(
'model_output_path',
None,
'If defined, serialize a `tf.Module` instance to this directory with '
'`tf.saved_model`.')
FLAGS = flags.FLAGS
TFP_PYTHON_DIR = 'tensorflow_probability/tensorflow_probability/python'
tfb = tfp.bijectors
tfd = tfp.distributions
AutoIdentity = tfp.experimental.auto_composite_tensor(
tf.linalg.LinearOperatorIdentity, non_identifying_kwargs=('name',))
AutoDiag = tfp.experimental.auto_composite_tensor(
tf.linalg.LinearOperatorDiag, non_identifying_kwargs=('name',))
AutoBlockDiag = tfp.experimental.auto_composite_tensor(
tf.linalg.LinearOperatorBlockDiag, non_identifying_kwargs=('name',))
AutoTriL = tfp.experimental.auto_composite_tensor(
tf.linalg.LinearOperatorLowerTriangular, non_identifying_kwargs=('name',))
AutoNormal = tfp.experimental.auto_composite_tensor(
tfd.Normal, non_identifying_kwargs=('name',))
AutoIndependent = tfp.experimental.auto_composite_tensor(
tfd.Independent, non_identifying_kwargs=('name',))
AutoReshape = tfp.experimental.auto_composite_tensor(
tfb.Reshape, non_identifying_kwargs=('name',))
class Model(tf.Module):
def __init__(self):
self.scale = tf.Variable([0., 1.], shape=[None])
@tf.function(input_signature=(
tfb.Scale([1., 2.], validate_args=True)._type_spec,))
def make_bij(self, b):
return tfb.Scale(
tf.convert_to_tensor(self.scale) + b.scale,
validate_args=True)
@tfp.experimental.auto_composite_tensor
class ThingWithCallableArg(tfp.experimental.AutoCompositeTensor):
def __init__(self, a, f):
self.a = tf.convert_to_tensor(a, dtype_hint=tf.float32, name='a')
self.f = f
self.parameters = dict(a=self.a, b=self.f)
def call(self):
return self.f(self.a)
def tearDownModule():
# If `FLAGS.model_output_path` is set, serialize a `Model` instance to disk.
# To update the serialized data read by `test_saved_model_from_disk`, pass
# the local path to
# `tensorflow_probability/python/internal/testdata/auto_composite_tensor`.
# You may need to pass `--test_strategy=local` to avoid permissions errors.
if FLAGS.model_output_path is not None:
model = Model()
tf.saved_model.save(model, FLAGS.model_output_path)
@test_util.test_all_tf_execution_regimes
class AutoCompositeTensorTest(test_util.TestCase):
def test_example(self):
@tfp.experimental.auto_composite_tensor(non_identifying_kwargs=('name',))
class Adder(object):
def __init__(self, x, y, name=None):
with tf.name_scope(name or 'Adder') as name:
self._x = tensor_util.convert_nonref_to_tensor(x)
self._y = tensor_util.convert_nonref_to_tensor(y)
self._name = name
def xpy(self):
return self._x + self._y
x = 1.
y = tf.Variable(1.)
self.evaluate(y.initializer)
def body(obj):
return Adder(obj.xpy(), y),
result, = tf.while_loop(
cond=lambda _: True,
body=body,
loop_vars=(Adder(x, y),),
maximum_iterations=3)
self.assertAllClose(5., result.xpy())
def test_function(self):
lop = AutoDiag(2. * tf.ones([3]))
self.assertAllClose(
6. * tf.ones([3]),
tf.function(lambda lop: lop.matvec(3. * tf.ones([3])))(lop))
def test_loop(self):
def body(lop):
return AutoDiag(lop.matvec(tf.ones([3]) * 2.)),
init_lop = AutoDiag(tf.ones([3]))
lop, = tf.while_loop(
cond=lambda _: True,
body=body,
loop_vars=(init_lop,),
maximum_iterations=3)
self.assertAllClose(2.**3 * tf.ones([3]), lop.matvec(tf.ones([3])))
def test_shape_parameters(self):
dist = AutoIndependent(AutoNormal(0, tf.ones([1])),
reinterpreted_batch_ndims=1)
stream = test_util.test_seed_stream()
lp = dist.log_prob(dist.sample(seed=stream()))
lp, _ = tf.while_loop(
lambda *_: True,
lambda lp, d: (d.log_prob(d.sample(seed=stream())), d),
(lp, dist),
maximum_iterations=2)
self.evaluate(lp)
def test_prefer_static_shape_params(self):
@tf.function
def f(b):
return b
b = AutoReshape(
event_shape_out=[2, 3],
event_shape_in=[tf.reduce_prod([2, 3])]) # Tensor in a list.
f(b)
def test_nested(self):
lop = AutoBlockDiag([AutoDiag(tf.ones([2]) * 2), AutoIdentity(1)])
self.assertAllClose(
tf.constant([6., 6, 3]),
tf.function(lambda lop: lop.matvec(3. * tf.ones([3])))(lop))
def test_preconditioner(self):
xs = self.evaluate(tf.random.uniform([30, 30], seed=test_util.test_seed()))
cov_linop = tf.linalg.LinearOperatorFullMatrix(
tf.matmul(xs, xs, transpose_b=True) + tf.linalg.eye(30) * 1e-3,
is_self_adjoint=True,
is_positive_definite=True)
tfed = tfp.experimental.distributions
auto_ct_mvn_prec_linop = tfp.experimental.auto_composite_tensor(
tfed.MultivariateNormalPrecisionFactorLinearOperator,
non_identifying_kwargs=('name',))
tril = AutoTriL(**cov_linop.cholesky().parameters)
momentum_distribution = auto_ct_mvn_prec_linop(precision_factor=tril)
def body(d):
return d.copy(precision_factor=AutoTriL(
**dict(d.precision_factor.parameters,
tril=d.precision_factor.to_dense() + tf.linalg.eye(30),))),
after_loop = tf.while_loop(lambda d: True, body, (momentum_distribution,),
maximum_iterations=1)
tf.nest.map_structure(self.evaluate,
after_loop,
expand_composites=True)
def test_already_ct_subclass(self):
@tfp.experimental.auto_composite_tensor
class MyCT(tfp.experimental.AutoCompositeTensor):
def __init__(self, tensor_param, non_tensor_param, maybe_tensor_param):
self._tensor_param = tf.convert_to_tensor(tensor_param)
self._non_tensor_param = non_tensor_param
self._maybe_tensor_param = maybe_tensor_param
def body(obj):
return MyCT(obj._tensor_param + 1,
obj._non_tensor_param,
obj._maybe_tensor_param),
init = MyCT(0., 0, 0)
result, = tf.while_loop(
cond=lambda *_: True,
body=body,
loop_vars=(init,),
maximum_iterations=3)
self.assertAllClose(3., result._tensor_param)
init = MyCT(0., 0, tf.constant(0))
result, = tf.while_loop(
cond=lambda *_: True,
body=body,
loop_vars=(init,),
maximum_iterations=3)
self.assertAllClose(3., result._tensor_param)
def test_parameters_lookup(self):
@tfp.experimental.auto_composite_tensor
class ThingWithParametersButNoAttrs(tfp.experimental.AutoCompositeTensor):
def __init__(self, a, b):
self.a = tf.convert_to_tensor(a, dtype_hint=tf.float32, name='a')
self.b = tf.convert_to_tensor(b, dtype_hint=tf.float32, name='a')
self.parameters = dict(a=self.a, b=self.b)
t = ThingWithParametersButNoAttrs(1., 2.)
self.assertIsInstance(t, tf.__internal__.CompositeTensor)
ts = t._type_spec
components = ts._to_components(t)
self.assertAllEqualNested(components, dict(a=1., b=2.))
t2 = ts._from_components(components)
self.assertIsInstance(t2, ThingWithParametersButNoAttrs)
def test_wrapped_constructor(self):
def add_tag(f):
@functools.wraps(f)
def wrapper(*args, **kwargs):
args[0]._tag = 'tagged'
return f(*args, **kwargs)
return wrapper
@tfp.experimental.auto_composite_tensor
class ThingWithWrappedInit(tfp.experimental.AutoCompositeTensor):
@add_tag
def __init__(self, value):
self.value = tf.convert_to_tensor(value)
init = ThingWithWrappedInit(3)
def body(obj):
return ThingWithWrappedInit(value=obj.value + 1),
out, = tf.while_loop(
cond=lambda *_: True,
body=body,
loop_vars=(init,),
maximum_iterations=3)
self.assertEqual(self.evaluate(out.value), 6)
def test_deferred_assertion_context(self):
# If `validate_args` assertions in `__init__` are not deferred, a graph
# cycle is created when `d._type_spec` calls `__init__` and this test fails.
d = AutoNormal(0., 1., validate_args=True)
@tf.function
def f(d):
return d
f(d)
def test_function_with_variable(self):
loc = tf.Variable(3.)
dist = AutoIndependent(
AutoNormal(loc, scale=tf.ones([3])), reinterpreted_batch_ndims=1)
new_loc = 32.
@tf.function
def f(d):
d.distribution.loc.assign(new_loc)
self.assertLen(d.trainable_variables, 1)
return d
dist_ = f(dist)
self.evaluate(loc.initializer)
self.assertEqual(self.evaluate(dist_.distribution.loc), new_loc)
self.assertEqual(self.evaluate(dist.distribution.loc), new_loc)
self.assertLen(dist.trainable_variables, 1)
def test_export_import(self):
path = self.create_tempdir().full_path
m1 = Model()
self.evaluate([v.initializer for v in m1.variables])
self.evaluate(m1.scale.assign(m1.scale + 1.))
tf.saved_model.save(m1, os.path.join(path, 'saved_model1'))
m2 = tf.saved_model.load(os.path.join(path, 'saved_model1'))
self.evaluate(m2.scale.initializer)
b = tfb.Scale([5., 9.], validate_args=True)
self.evaluate(m2.make_bij(b).forward(2.))
self.evaluate(m2.scale.assign(m2.scale + [1., 2.]))
self.evaluate(m2.make_bij(b).forward(2.))
self.evaluate(m2.scale.assign([1., 2., 3.]))
tf.saved_model.save(m2, os.path.join(path, 'saved_model2'))
m3 = tf.saved_model.load(os.path.join(path, 'saved_model2'))
self.evaluate(m3.scale.initializer)
with self.assertRaisesOpError('compatible shape'):
self.evaluate(m3.make_bij(b).forward([3.]))
def test_saved_model_from_disk(self):
test_srcdir = absltest.get_default_test_srcdir()
relative_testdata_path = os.path.join(
TFP_PYTHON_DIR, 'internal/testdata/auto_composite_tensor')
absolute_testdata_path = os.path.join(test_srcdir, relative_testdata_path)
m = tf.saved_model.load(absolute_testdata_path)
self.evaluate(m.scale.initializer)
b = tfb.Scale([5., 9.], validate_args=True)
self.assertAllClose(self.evaluate(m.make_bij(b).forward(2.)), [10., 20.])
self.evaluate(m.scale.assign(m.scale + [1., 2.]))
self.assertAllClose(self.evaluate(m.make_bij(b).forward(2.)), [12., 24.])
def test_callable_arg(self):
t = ThingWithCallableArg(1., lambda x: x + 2.)
self.assertIsInstance(t, tf.__internal__.CompositeTensor)
ts = t._type_spec
components = ts._to_components(t)
self.assertAllEqualNested(components, dict(a=1.))
t2 = ts._from_components(components)
self.assertIsInstance(t2, ThingWithCallableArg)
self.assertAllClose(tf.function(lambda t: t.call())(t2), 3.)
def test_different_names_type_specs_equal(self):
dist_1 = AutoNormal([0., 2.], scale=1., name='FirstNormal')
dist_2 = AutoNormal([1., 3.], scale=2., name='SecondNormal')
self.assertEqual(dist_1._type_spec, dist_2._type_spec)
def test_save_restore_functor(self):
f = lambda x: x ** 2
a = tf.constant([3., 2.])
ct = ThingWithCallableArg(a, f=f)
struct_coder = tf.__internal__.saved_model.StructureCoder()
with self.assertRaisesRegex(ValueError, 'Cannot serialize'):
struct_coder.encode_structure(ct._type_spec) # pylint: disable=protected-access
@tfp.experimental.auto_composite_tensor(module_name='my.module')
class F(tfp.experimental.AutoCompositeTensor):
def __call__(self, *args, **kwargs):
return f(*args, **kwargs)
ct_functor = ThingWithCallableArg(a, f=F())
enc = struct_coder.encode_structure(ct_functor._type_spec)
dec = struct_coder.decode_proto(enc)
self.assertEqual(dec, ct_functor._type_spec)
def test_composite_tensor_callable_arg(self):
# Parameters that are both `CompositeTensor` and callable should be
# handled by the `_type_spec` as `CompositeTensor`.
inner_bij = tfb.Scale([[1., 3.]], validate_args=True)
bij = tfb.TransformDiagonal(inner_bij, validate_args=True)
self.assertLen(tf.nest.flatten(bij), 1)
self.assertLen(bij._type_spec._callable_params, 0) # pylint: disable=protected-access
self.assertIn('diag_bijector', bij._type_spec._param_specs) # pylint: disable=protected-access
def test_subclass_with_inherited_type_spec_raises(self):
@tfp.experimental.auto_composite_tensor(
omit_kwargs=('parameters',), non_identifying_kwargs=('name',))
class ParentBijector(
tfb.Bijector, tfp.experimental.AutoCompositeTensor):
"""Minimal specification of a `Bijector`.
We do not subclass `AutoCompositeTensorBijector` since its metaclass
would make subclasses automatically re-generate their `TypeSpec`.
"""
def __init__(self, a):
parameters = dict(locals())
self.a = a
super(ParentBijector, self).__init__(
forward_min_event_ndims=0,
parameters=parameters)
class ChildBijector(ParentBijector):
def __init__(self, b):
self.b = b
super(ChildBijector, self).__init__(a=b+1)
b = ChildBijector(b=4)
with self.assertRaisesRegex(
ValueError,
'`ChildBijector` has inherited the `_type_spec` of `ParentBijector`'):
tf.nest.flatten(b, expand_composites=True)
AutoChildBijector = tfp.experimental.auto_composite_tensor(ChildBijector) # pylint: disable=invalid-name
b_ct = AutoChildBijector(b=2)
self.assertLen(tf.nest.flatten(b_ct, expand_composites=True), 0)
def test_names_preserved_through_flatten(self):
dist = AutoNormal(0., scale=3., name='ScaleThreeNormal')
flat = tf.nest.flatten(dist, expand_composites=True)
unflat = tf.nest.pack_sequence_as(dist, flat, expand_composites=True)
unflat_name = ('ScaleThreeNormal' if tf.executing_eagerly()
else 'ScaleThreeNormal_1')
self.assertEqual(unflat.name, unflat_name)
class _TestTypeSpec(auto_composite_tensor._AutoCompositeTensorTypeSpec):
def __init__(self, param_specs, non_tensor_params=None, omit_kwargs=(),
prefer_static_value=(), non_identifying_kwargs=(),
callable_params=None):
non_tensor_params = {} if non_tensor_params is None else non_tensor_params
super(_TestTypeSpec, self).__init__(
param_specs, non_tensor_params=non_tensor_params,
omit_kwargs=omit_kwargs, prefer_static_value=prefer_static_value,
non_identifying_kwargs=non_identifying_kwargs,
callable_params=callable_params)
@property
def value_type(self):
"""Unused `value_type` to allow the `TypeSpec` to be instantiated."""
pass
@test_util.test_all_tf_execution_regimes
class AutoCompositeTensorTypeSpecTest(test_util.TestCase):
@parameterized.named_parameters(
('WithoutCallable',
_TestTypeSpec(
param_specs={'a': tf.TensorSpec([3, None], tf.float32)},
non_tensor_params={'validate_args': True},
omit_kwargs=('name',),
prefer_static_value=('a',)),
_TestTypeSpec(
param_specs={'a': tf.TensorSpec([3, None], tf.float32)},
non_tensor_params={'validate_args': True},
omit_kwargs=('name',),
prefer_static_value=('a',))),
('WithCallable',
_TestTypeSpec(
param_specs={'a': tf.TensorSpec([3, None], tf.float32),
'b': tfb.Scale(3.)._type_spec},
omit_kwargs=('name', 'foo'),
prefer_static_value=('a',),
callable_params={'f': tf.math.exp}),
_TestTypeSpec(
param_specs={'a': tf.TensorSpec([3, None], tf.float32),
'b': tfb.Scale(3.)._type_spec},
omit_kwargs=('name', 'foo'),
prefer_static_value=('a',),
callable_params={'f': tf.math.exp})),
('DifferentNonIdentifyingKwargsValues',
_TestTypeSpec(
param_specs={'x': tf.TensorSpec([], tf.float64)},
non_tensor_params={'name': 'MyAutoCT'},
non_identifying_kwargs=('name')),
_TestTypeSpec(
param_specs={'x': tf.TensorSpec([], tf.float64)},
non_tensor_params={'name': 'OtherAutoCT'},
non_identifying_kwargs=('name'))),
)
def testEquality(self, v1, v2):
# pylint: disable=g-generic-assert
self.assertEqual(v1, v2)
self.assertEqual(v2, v1)
self.assertFalse(v1 != v2)
self.assertFalse(v2 != v1)
self.assertEqual(hash(v1), hash(v2))
@parameterized.named_parameters(
('DifferentTensorSpecs',
_TestTypeSpec(
param_specs={'a': tf.TensorSpec([3, 2], tf.float32)},
non_tensor_params={'validate_args': True},
omit_kwargs=('name',)),
_TestTypeSpec(
param_specs={'a': tf.TensorSpec([3, None], tf.float32)},
non_tensor_params={'validate_args': True},
omit_kwargs=('name',))),
('DifferentCallables',
_TestTypeSpec(
param_specs={'a': tf.TensorSpec([3, None], tf.float32)},
omit_kwargs=('name', 'foo'),
callable_params={'f': tf.math.exp}),
_TestTypeSpec(
param_specs={'a': tf.TensorSpec([3, None], tf.float32)},
omit_kwargs=('name', 'foo'),
callable_params={'f': tf.math.sigmoid})),
('DifferentMetadata',
_TestTypeSpec(
param_specs={'a': tf.TensorSpec([3, 2], tf.float32)},
non_tensor_params={'validate_args': True},
non_identifying_kwargs=('name',)),
_TestTypeSpec(
param_specs={'a': tf.TensorSpec([3, None], tf.float32)},
non_tensor_params={'validate_args': True})),
)
def testInequality(self, v1, v2):
# pylint: disable=g-generic-assert
self.assertNotEqual(v1, v2)
self.assertNotEqual(v2, v1)
self.assertFalse(v1 == v2)
self.assertFalse(v2 == v1)
@parameterized.named_parameters(
('WithoutCallable',
_TestTypeSpec(
param_specs={'a': tf.TensorSpec([4, 2], tf.float32)},
non_tensor_params={'validate_args': True, 'b': 3.},
omit_kwargs=('name',),
prefer_static_value=('b',)),
_TestTypeSpec(
param_specs={'a': tf.TensorSpec([4, None], tf.float32)},
non_tensor_params={'validate_args': True, 'b': 3.},
omit_kwargs=('name',),
prefer_static_value=('b',))),
('WithCallable',
_TestTypeSpec(
param_specs={'a': tf.TensorSpec([3, None], tf.float32),
'b': tfb.Scale(
tf.Variable(2., shape=None))._type_spec},
omit_kwargs=('name', 'foo'),
callable_params={'f': tf.math.exp}),
_TestTypeSpec(
param_specs={'a': tf.TensorSpec([3, None], tf.float32),
'b': tfb.Scale(3.)._type_spec},
omit_kwargs=('name', 'foo'),
callable_params={'f': tf.math.exp})),
('DifferentNonIdentifyingKwargsValues',
_TestTypeSpec(
param_specs={'x': tf.TensorSpec(None, tf.float64)},
non_tensor_params={'name': 'MyAutoCT'},
non_identifying_kwargs=('name')),
_TestTypeSpec(
param_specs={'x': tf.TensorSpec([], tf.float64)},
non_tensor_params={'name': 'OtherAutoCT'},
non_identifying_kwargs=('name'))),
)
def testIsCompatibleWith(self, v1, v2):
self.assertTrue(v1.is_compatible_with(v2))
self.assertTrue(v2.is_compatible_with(v1))
@parameterized.named_parameters(
('IncompatibleTensorSpecs',
_TestTypeSpec(
param_specs={'a': tf.TensorSpec([4, 2, 3], tf.float32)},
non_tensor_params={'validate_args': True, 'b': [3, 2]},
omit_kwargs=('name',),
prefer_static_value=('b',)),
_TestTypeSpec(
param_specs={'a': tf.TensorSpec([4, None], tf.float32)},
non_tensor_params={'validate_args': True, 'b': [3, 2]},
omit_kwargs=('name',),
prefer_static_value=('b',))),
('DifferentMetadataSameCallables',
_TestTypeSpec(
param_specs={'a': tf.TensorSpec([4, 2], tf.float32)},
non_tensor_params={'validate_args': True},
omit_kwargs=('name',),
callable_params={'g': tf.math.softplus}),
_TestTypeSpec(
param_specs={'a': tf.TensorSpec([4, None], tf.float32)},
non_tensor_params={'validate_args': False},
omit_kwargs=('name',),
callable_params={'g': tf.math.softplus})),
('DifferentCallables',
_TestTypeSpec(
param_specs={'a': tf.TensorSpec([3, None], tf.float32),
'b': tfb.Scale(
tf.Variable(2., shape=None))._type_spec},
omit_kwargs=('name', 'foo'),
callable_params={'f': tf.math.exp}),
_TestTypeSpec(
param_specs={'a': tf.TensorSpec([3, None], tf.float32),
'b': tfb.Scale(3.)._type_spec},
omit_kwargs=('name', 'foo'),
callable_params={'f': tf.math.sigmoid}))
)
def testIsNotCompatibleWith(self, v1, v2):
self.assertFalse(v1.is_compatible_with(v2))
self.assertFalse(v2.is_compatible_with(v1))
@parameterized.named_parameters(
('WithoutCallable',
_TestTypeSpec(
param_specs={'a': tf.TensorSpec([4, 2], tf.float32)},
omit_kwargs=('name',)),
_TestTypeSpec(
param_specs={'a': tf.TensorSpec([4, None], tf.float32)},
omit_kwargs=('name',)),
_TestTypeSpec(
param_specs={'a': tf.TensorSpec([4, None], tf.float32)},
omit_kwargs=('name',))),
('WithCallable',
_TestTypeSpec(
param_specs={'a': tf.TensorSpec(None, tf.float32),
'b': tfb.Scale(
tf.Variable(2., shape=None))._type_spec},
callable_params={'f': tf.math.exp}),
_TestTypeSpec(
param_specs={'a': tf.TensorSpec([3, None], tf.float32),
'b': tfb.Scale(tf.Variable(3.))._type_spec},
callable_params={'f': tf.math.exp}),
_TestTypeSpec(
param_specs={'a': tf.TensorSpec(None, tf.float32),
'b': tfb.Scale(
tf.Variable(2., shape=None))._type_spec},
callable_params={'f': tf.math.exp})),
)
def testMostSpecificCompatibleType(self, v1, v2, expected):
self.assertEqual(v1.most_specific_compatible_type(v2), expected)
self.assertEqual(v2.most_specific_compatible_type(v1), expected)
@parameterized.named_parameters(
('DifferentParamSpecs',
_TestTypeSpec(
param_specs={'a': tf.TensorSpec([4, 2], tf.float32)},
omit_kwargs=('foo',)),
_TestTypeSpec(
param_specs={'b': tf.TensorSpec([5, None], tf.float32)},
omit_kwargs=('foo',))),
('DifferentMetadata',
_TestTypeSpec(
param_specs={'a': tf.TensorSpec([4, 2], tf.float32)},
omit_kwargs=('foo',)),
_TestTypeSpec(
param_specs={'a': tf.TensorSpec([4, None], tf.float32)},
omit_kwargs=('bar',))),
('DifferentCallables',
_TestTypeSpec(
param_specs={'a': tf.TensorSpec(None, tf.float32),
'b': tfb.Scale(
tf.Variable(2., shape=None))._type_spec},
callable_params={'f': tf.math.exp}),
_TestTypeSpec(
param_specs={'a': tf.TensorSpec([3, None], tf.float32),
'b': tfb.Scale(tf.Variable(3.))._type_spec},
callable_params={'f': tf.math.softplus})),
)
def testMostSpecificCompatibleTypeException(self, v1, v2):
with self.assertRaises(ValueError):
v1.most_specific_compatible_type(v2)
with self.assertRaises(ValueError):
v2.most_specific_compatible_type(v1)
@parameterized.named_parameters(
('WithoutCallable',
_TestTypeSpec(
param_specs={'a': tf.TensorSpec([4, 2], tf.float32)},
omit_kwargs=('parameters',), non_identifying_kwargs=('name',))),
('WithCallable',
_TestTypeSpec(
param_specs={'a': tf.TensorSpec(None, tf.float32),
'b': tfb.Scale(
tf.Variable(2., shape=None))._type_spec},
callable_params={'f': tf.math.exp})),
)
def testRepr(self, spec):
spec_data = (auto_composite_tensor._AUTO_COMPOSITE_TENSOR_VERSION,
spec._param_specs, spec._non_tensor_params, spec._omit_kwargs,
spec._prefer_static_value, spec._non_identifying_kwargs,
spec._callable_params)
self.assertEqual(repr(spec), f'_TestTypeSpec{spec_data}')
if __name__ == '__main__':
test_util.main()
| [
"gardener@tensorflow.org"
] | gardener@tensorflow.org |
fbfb1e835c20065b4cec19170dfd7fc7bd172c3c | 973042c86edd5b2a3edd406a15d34a92dc7b6921 | /pokemon_entities/admin.py | 16abdfd79e9ed5cfaba9939af57cd1415ecf0133 | [
"MIT"
] | permissive | KirillYabl/pokemon_map | 83ec8897d8b204a661758adb88f5b088f604eb1d | 704d7d171e3639c8e7694effd10f84087b8e5cad | refs/heads/master | 2022-11-22T04:19:23.688730 | 2020-07-25T10:26:00 | 2020-07-25T10:26:00 | 273,661,931 | 0 | 0 | null | 2020-06-20T07:40:33 | 2020-06-20T07:40:33 | null | UTF-8 | Python | false | false | 753 | py | from django.contrib import admin
from .models import Pokemon, PokemonEntity, PokemonElementType
class ElementTypeInline(admin.TabularInline):
model = Pokemon.element_type.through
extra = 0
@admin.register(Pokemon)
class PokemonAdmin(admin.ModelAdmin):
inlines = [
ElementTypeInline,
]
exclude = ('element_type',)
list_filter = ('element_type',)
@admin.register(PokemonElementType)
class PokemonElementTypeAdmin(admin.ModelAdmin):
filter_horizontal = ('strong_against',)
list_filter = ('strong_against',)
@admin.register(PokemonEntity)
class PokemonEntityAdmin(admin.ModelAdmin):
list_display = ('pokemon', 'lat', 'lon', 'appeared_at', 'disappeared_at', 'level')
list_filter = ('pokemon', 'level')
| [
"kirill-yablunovskii@mail.ru"
] | kirill-yablunovskii@mail.ru |
53ddd555f8dc820d61c3622dbff8fbb70b16f8a9 | f9acbe1b91a1bda3db8e35df7518a68c397a7a89 | /pluserable/exceptions.py | 9a5fe767b68ad111f0ebd2c66ca9998235df15cd | [] | no_license | nandoflorestan/pluserable | 90abe1fac90a605c930928754e2d2a3e3e61f605 | bd524b14ff85a246cc5fb672aa05473a3a8006f3 | refs/heads/master | 2023-08-09T04:20:12.724982 | 2023-08-01T12:42:29 | 2023-08-01T12:42:29 | 33,750,793 | 4 | 1 | null | 2019-11-01T11:06:43 | 2015-04-10T21:08:13 | Python | UTF-8 | Python | false | false | 1,034 | py | """Custom exceptions raised by pluserable."""
from kerno.typing import DictStr
from pluserable.web.pyramid.typing import PRequest
class AuthenticationFailure(Exception):
"""Raised when handle and password do not match, during login."""
seconds: int # user must wait until next login attempt
class FormValidationFailure(Exception): # TODO REMOVE
def __init__(self, form, exc): # noqa
Exception.__init__(self)
self.form = form
self.exc = exc
def result(self, request: PRequest, **cstruct) -> DictStr: # noqa
retail = request.kerno.pluserable_settings[ # type: ignore[attr-defined]
"deform_retail"
]
if retail:
form = self.form
errors = self.form.error.children
else:
form = self.exc
errors = self.exc.error.children
for k, v in cstruct.items():
form.cstruct[k] = v
if not retail:
form = form.render()
return {"form": form, "errors": errors}
| [
"nandoflorestan@gmail.com"
] | nandoflorestan@gmail.com |
275e18a3996d28f311296cd58ed15c2619180243 | d392323c233c75c495a3b86eabc59b672538fd95 | /lab14/lab14.py | b04a3c6a62f814225eb0c93bc954a274f530d51a | [] | no_license | zacharyzhu2023/cs61a | 0b4197dbf2eb6c54ad8bf1b1db97d89692d3d914 | 45358b3f5b9154d28ba72a8249b96f6390f0d615 | refs/heads/master | 2022-11-03T14:48:57.418887 | 2020-06-11T04:55:41 | 2020-06-11T04:55:41 | 241,757,587 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,551 | py | def prune_min(t):
"""Prune the tree mutatively from the bottom up.
>>> t1 = Tree(6)
>>> prune_min(t1)
>>> t1
Tree(6)
>>> t2 = Tree(6, [Tree(3), Tree(4)])
>>> prune_min(t2)
>>> t2
Tree(6, [Tree(3)])
>>> t3 = Tree(6, [Tree(3, [Tree(1), Tree(2)]), Tree(5, [Tree(3), Tree(4)])])
>>> prune_min(t3)
>>> t3
Tree(6, [Tree(3, [Tree(1)])])
"""
"*** YOUR CODE HERE ***"
if not t.is_leaf():
if t.branches[0].label < t.branches[1].label:
t.branches = [t.branches[0]]
prune_min(t.branches[0])
else:
t = [t.branches[1]]
prune_min(t.branches[0])
# Tree Class
class Tree:
"""
>>> t = Tree(3, [Tree(2, [Tree(5)]), Tree(4)])
>>> t.label
3
>>> t.branches[0].label
2
>>> t.branches[1].is_leaf()
True
"""
def __init__(self, label, branches=[]):
for b in branches:
assert isinstance(b, Tree)
self.label = label
self.branches = list(branches)
def is_leaf(self):
return not self.branches
def map(self, fn):
"""
Apply a function `fn` to each node in the tree and mutate the tree.
>>> t1 = Tree(1)
>>> t1.map(lambda x: x + 2)
>>> t1.map(lambda x : x * 4)
>>> t1.label
12
>>> t2 = Tree(3, [Tree(2, [Tree(5)]), Tree(4)])
>>> t2.map(lambda x: x * x)
>>> t2
Tree(9, [Tree(4, [Tree(25)]), Tree(16)])
"""
self.label = fn(self.label)
for b in self.branches:
b.map(fn)
def __contains__(self, e):
"""
Determine whether an element exists in the tree.
>>> t1 = Tree(1)
>>> 1 in t1
True
>>> 8 in t1
False
>>> t2 = Tree(3, [Tree(2, [Tree(5)]), Tree(4)])
>>> 6 in t2
False
>>> 5 in t2
True
"""
if self.label == e:
return True
for b in self.branches:
if e in b:
return True
return False
def __repr__(self):
if self.branches:
branch_str = ', ' + repr(self.branches)
else:
branch_str = ''
return 'Tree({0}{1})'.format(self.label, branch_str)
def __str__(self):
def print_tree(t, indent=0):
tree_str = ' ' * indent + str(t.label) + "\n"
for b in t.branches:
tree_str += print_tree(b, indent + 1)
return tree_str
return print_tree(self).rstrip()
| [
"zacharyzhu@berkeley.edu"
] | zacharyzhu@berkeley.edu |
c95f313ade249ca67f5f1278c59aa0bdc2e5c1de | a5ce0460f7b1823ec04b857810910dca552347b6 | /Multiplier.py | 4a424ad6ff8d395f77ed82babe2a91ba82da5cc9 | [] | no_license | dobbse42/arithmetic | 0550f904ab4e08ded4821425c934b53beea6c66a | d5e2c1e4e79bf8446be7f073080226282fa1999e | refs/heads/master | 2021-11-13T16:48:36.287279 | 2021-10-31T04:43:37 | 2021-10-31T04:43:37 | 242,825,696 | 0 | 0 | null | 2020-02-24T19:36:07 | 2020-02-24T19:36:06 | null | UTF-8 | Python | false | false | 1,874 | py | import cirq
from Control_add import ctrl_add
# testing qubit setup
# circuit.append(cirq.H(q) for q in qubitsA)
# circuit.append(cirq.Z(q) for q in qubitsB)
# circuit.append(cirq.X(q) for q in qubitsOut)
"""
Pseudocode for full multiplier:
//step 1: toffolis
for(int i = 0; i < n; i++)
{
toffoli(qubitsSumnB(0), qubitsSumnA(i), qubitsOut(i);
}
//step 2: ctrladd
for(int i = 0; i < n+1; i++)
{
ctrladd(qubitsSumnB(1), qubitsSumnA(0:i-1), qubitsOut(0:i))
}
for(int i = 0; i < n+1; i++)
{
ctrladd(qubitsSumnB(i), qubitsSumnA, qubitsOut(i:i+n+1)) //ctrl on qubitsSumnB[i], sum mapped to qubitsOut[i:i+n-1],
//the last qubit in qubitsOut(i:i+n) is ancillary. Also it may be n+2 instead of n+1, not sure.
}
"""
# def the following as ctrl_add with ctrl as the ctrl, qubitsSumnA as the bits of one summand, and qubitsSumnB as the
# bits of the other, with the result stored in qubitsSumnB. qubitsSumnA[n] is ancillary, qubitsSumnB[n] = s[4] dot ctrl
# NOTE: The Coreas-Thapliyal paper defines the ancillary bit as A[n+1] and the final sum bit as A[n]. I define them as
# B[n], B[n+1] respectively to stay consistent with the definitions in the multiplier.
class multiplier:
def __init__(self, A, B, out):
self.size = len(A)
self.A = A
self.B = B
self.out = out
def multiply(self):
circuit = cirq.Circuit()
#for gate-counting purposes
toffcount = 0
# step 1: toffolis
for i in range(0, self.size):
circuit.append(cirq.decompose(cirq.TOFFOLI(self.B[0], self.A[i], self.out[i])))
toffcount += 1
# step 2 (and 3):
for i in range(1, self.size):
circuit += ctrl_add(self.B[i], self.A, self.out[i:i+self.size+2]).construct_circuit()
print("Toffoli count in multiply: ")
print(toffcount)
return circuit;
| [
"dobbse42@gmail.com"
] | dobbse42@gmail.com |
4df672237deb6b434fcf93c6671365f52998df9d | 92f47cdf958f4f3acc1f36972c49175f6a20d3cc | /que65.py | a19831b60733320891df0234f6cdb3378ad6b69c | [] | no_license | MAJEEDKAM/code-kata-problems | 1301490d3b3894316e0744450f60cd18f8a57e1c | 560ec4fafb8d1abb6285721a39db75f35591c86a | refs/heads/master | 2020-05-25T04:02:45.162126 | 2019-05-31T14:36:06 | 2019-05-31T14:36:06 | 187,618,850 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 20 | py | x= input()
print(x)
| [
"noreply@github.com"
] | MAJEEDKAM.noreply@github.com |
5b5cc43e628169bafe2ab5b3918132340f8d47d4 | 66607c695a2e12d7640eef7b4641f79c34054ebd | /screenplay/wsgi.py | c50d453c4ea25b54a0f3b3bd32aeb0645dab4fc0 | [] | no_license | manikhanuja/screenplay-api | add56968cb131c36e8367291d192009daa16add0 | ef228e0cc208f567062688785c9d169897d205e5 | refs/heads/master | 2020-03-24T11:44:39.367493 | 2018-07-29T21:55:09 | 2018-07-29T21:55:09 | 142,694,053 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 397 | py | """
WSGI config for screenplay project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "screenplay.settings")
application = get_wsgi_application()
| [
"manikhanuja1981@hotmail.com"
] | manikhanuja1981@hotmail.com |
75607d3d646d6150c82a85472ab943c297ff0728 | 2a02eed888314a8c91cccd86c7f8ec572760d421 | /problem.py | 657cb6467504840aba017313c3faaf884dcb35bb | [] | no_license | uranix/tvd-dg | 46d4512737be6b1375992ff9e7020f9e7b9466b6 | de19c6da18e268331f0ed5ab1dd47fa254cd20e8 | refs/heads/master | 2021-05-06T04:17:51.591053 | 2018-02-21T12:54:21 | 2018-02-21T12:54:21 | 114,929,063 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,435 | py |
class Problem(object):
INNER = 0
LEFTBC = 1
RIGHTBC = 2
"""
Termination time
"""
@staticmethod
def T():
raise NotImplementedError
"""
Domain size
"""
@staticmethod
def L():
raise NotImplementedError
"""
A fuction returning the initial state at point x
"""
@staticmethod
def u0(x):
raise NotImplementedError
"""
A fuction returning the medium parameters at point x
"""
@staticmethod
def p(x):
raise NotImplementedError
"""
Differential problem flux. The equation is
:math:`u_t + F(u)_x = 0`
F may also depend on medium parameters p
"""
@staticmethod
def F(u, p):
raise NotImplementedError
"""
Returns the left eigevectors matrix
"""
@staticmethod
def Omega(u, p):
raise NotImplementedError
"""
Returns the inverse of the left eigevectors matrix
"""
@staticmethod
def invOmega(u, p):
raise NotImplementedError
"""
Returns the eigenvalues as a vector
"""
@staticmethod
def lamb(u, p):
raise NotImplementedError
"""
Returns the fastest travelling wave speed
"""
@staticmethod
def aMax(u, p):
raise NotImplementedError
"""
Solves the Riemann problem
"""
@staticmethod
def R(uL, uR, pL, pR, bctype):
raise NotImplementedError
| [
"tsybulin@crec.mipt.ru"
] | tsybulin@crec.mipt.ru |
2be03e7acbcba9e13fb012254a8b99a4d5142e65 | ccf4a743d32a1babda3cbca1d0f622340179527f | /leetcode/2017Sep/ReverseInteger.py | 2377043163c8f2f740fc0cb1f3b428bed50a8272 | [] | no_license | zbo/zbodo | 678226a630eb49f2587d3b3cac62745930600188 | 98c7f3f6e76bd769ff0a6ed3f7117a49bbf015cd | refs/heads/master | 2021-05-19T02:57:40.375642 | 2020-04-23T07:35:50 | 2020-04-23T07:35:50 | 17,794,106 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 872 | py | '''Reverse digits of an integer.
Example1: x = 123, return 321
Example2: x = -123, return -321
click to show spoilers.
Note:
The input is assumed to be a 32-bit signed integer. Your function should return 0 when the reversed integer overflows.
'''
class Solution(object):
def reverse(self, x):
if x<-2147483648 or x>2147483647:
return 0
s=str(x)
if s[0]=='-':
ss=Solution()
return -1*ss.reverse(int(s[1:]))
else:
t = list(s)
l = len(t)
for i,j in zip(range(l-1, 0, -1), range(l//2)):
t[i], t[j] = t[j], t[i]
result = int("".join(t))
if result<-2147483648 or result>2147483647:
return 0
else:
return result
if __name__ == "__main__":
s=Solution()
print s.reverse(123)
| [
"zhu@mac-zhubo.local"
] | zhu@mac-zhubo.local |
258dc0059100be0f2f1b4b7be3d1e49e0f6ea074 | 65078e52efc716e1b427213d6e12056371fa3614 | /lab11/todo/todo-back/todoback/todoback/urls.py | 419446560754bbb2bdda8e134a82991b74019af1 | [] | no_license | Alimur4ikS9/Webtech19_labs | 5d54b0c31e5f3ef205b275ebd32f73789efaa6f3 | c64ff2be3a0fcfac4f0f54587bd725e19aaeacb8 | refs/heads/master | 2022-10-28T00:12:04.320858 | 2019-04-16T07:12:02 | 2019-04-16T07:12:02 | 181,624,153 | 0 | 1 | null | 2022-10-27T04:45:20 | 2019-04-16T05:58:50 | Python | UTF-8 | Python | false | false | 750 | py | """todoback URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
urlpatterns = [
path('admin/', admin.site.urls),
]
| [
"alinur.sabit.9@gmail.com"
] | alinur.sabit.9@gmail.com |
f102c62063372ca015e48a2acf1b4c2a3b0d94e8 | 52bd21812518ffb3af3cb4237cc337e98fbced3c | /Original Example Code/Starter_ z_Original Kaggle Code.py | 197cf68554c124ae90d3d35a22eb50c365b7b378 | [
"MIT"
] | permissive | etv-america/keras-corn | 337efa792a652830144b59938890dd59636063b9 | d65d8c1cbab9364c639b610d4f3aeeda941a772e | refs/heads/master | 2020-06-24T20:11:38.104770 | 2019-08-09T21:41:05 | 2019-08-09T21:41:05 | 199,074,388 | 1 | 5 | null | null | null | null | UTF-8 | Python | false | false | 6,818 | py | #!/usr/bin/env python
# coding: utf-8
# ## Introduction
# Greetings from the Kaggle bot! This is an automatically-generated kernel with starter code demonstrating how to read in the data and begin exploring. Click the blue "Edit Notebook" or "Fork Notebook" button at the top of this kernel to begin editing.
# ## Exploratory Analysis
# To begin this exploratory analysis, first use `matplotlib` to import libraries and define functions for plotting the data. Depending on the data, not all plots will be made. (Hey, I'm just a kerneling bot, not a Kaggle Competitions Grandmaster!)
# In[1]:
import cv2
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# get_ipython().run_line_magic('matplotlib', 'inline')
# To see our directory
import os
import random
import gc # Garbage collector for cleaning deleted data from memory
# There is 0 csv file in the current version of the dataset:
#
# In[2]:
train_healthy_dir = './input/dataset/train_corn/healthy/'
train_spot_dir = './input/dataset/train_corn/spot/'
train_rust_dir = './input/dataset/train_corn/rust/'
train_blight_dir = './input/dataset/train_corn/blight/'
test_healthy_dir = './input/dataset/test_corn/healthy/'
test_spot_dir = './input/dataset/test_corn/spot/'
test_rust_dir = './input/dataset/test_corn/rust/'
test_blight_dir = './input/dataset/test_corn/blight/'
train_healthy = [train_healthy_dir+'{}'.format(i) for i in os.listdir(train_healthy_dir)]
train_spot = [train_spot_dir+'{}'.format(i) for i in os.listdir(train_spot_dir)]
train_rust = [train_rust_dir+'{}'.format(i) for i in os.listdir(train_rust_dir)]
train_blight = [train_blight_dir+'{}'.format(i) for i in os.listdir(train_blight_dir)]
test_healthy = [test_healthy_dir+'{}'.format(i) for i in os.listdir(test_healthy_dir)]
test_spot = [test_spot_dir+'{}'.format(i) for i in os.listdir(test_spot_dir)]
test_rust = [test_rust_dir+'{}'.format(i) for i in os.listdir(test_rust_dir)]
test_blight = [test_blight_dir+'{}'.format(i) for i in os.listdir(test_blight_dir)]
# The next hidden code cells define functions for plotting data. Click on the "Code" button in the published kernel to reveal the hidden code.
# In[3]:
train_imgs = train_blight[:len(train_blight)-1] + train_healthy[:len(train_blight)-1]
random.shuffle(train_imgs)
gc.collect()
# In[4]:
import matplotlib.image as mpimg
for ima in train_imgs[0:3]:
img = mpimg.imread(ima)
imgplot = plt.imshow(img)
plt.show()
# In[5]:
nrows = 256
ncolumns = 256
channels = 3
# In[6]:
print(train_healthy[:5])
# In[7]:
print(train_imgs[:5])
# In[8]:
def read_and_process_image(list_of_images):
x = []
y = []
for image in list_of_images:
x.append(cv2.resize(cv2.imread(image, cv2.IMREAD_COLOR), (nrows, ncolumns), interpolation=cv2.INTER_CUBIC))
if image in train_healthy:
y.append(1)
else:
y.append(0)
return x, y
x, y = read_and_process_image(train_imgs)
# In[9]:
plt.figure(figsize=(20, 10))
columns = 5
for i in range(columns):
print(i,"is a", y[i])
plt.subplot(5 / columns + 1, columns, i + 1)
plt.imshow(x[i])
# In[10]:
import seaborn as sns
gc.collect()
x = np.array(x)
y = np.array(y)
sns.countplot(y)
plt.title('Labels for Healthy and Blight')
# In[11]:
print("Shape of train images is:", x.shape)
print("Shape of labels is: ", y.shape)
print(x)
# In[12]:
test_imgs = test_blight[:len(test_blight)-1] + test_healthy[:len(test_blight)-1]
random.shuffle(test_imgs)
gc.collect()
# In[13]:
ntrain = len(train_imgs)
nval = len(test_imgs)
batch_size = 32
# In[14]:
import keras
from keras import layers
from keras import models
from keras import optimizers
from keras.preprocessing.image import ImageDataGenerator
from keras.preprocessing.image import img_to_array, load_img
# In[15]:
model = models.Sequential()
model.add(layers.Conv2D(32, (3, 3), activation='relu',input_shape=(256, 256, 3)))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(64, (3, 3), activation='relu'))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(128, (3, 3), activation='relu'))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(128, (3, 3), activation='relu'))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Flatten())
model.add(layers.Dropout(0.5)) #Dropout for regularization
model.add(layers.Dense(512, activation='relu'))
model.add(layers.Dense(1, activation='sigmoid')) #Sigmoid function at the end because we have just two classes
# In[16]:
model.summary()
# In[17]:
model.compile(loss='binary_crossentropy', optimizer=optimizers.RMSprop(lr=1e-4), metrics=['acc'])
# In[18]:
#Lets create the augmentation configuration
#This helps prevent overfitting, since we are using a small dataset
train_datagen = ImageDataGenerator(rescale=1./255, #Scale the image between 0 and 1
rotation_range=40,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True,)
val_datagen = ImageDataGenerator(rescale=1./255) #We do not augment validation data. we only perform rescale
# In[19]:
def read_and_process_image_test(list_of_images):
X = []
Y = []
for image in list_of_images:
X.append(cv2.resize(cv2.imread(image, cv2.IMREAD_COLOR), (nrows, ncolumns), interpolation=cv2.INTER_CUBIC))
if image in test_healthy:
Y.append(1)
else:
Y.append(0)
return X, Y
x_test, y_test = read_and_process_image_test(test_imgs)
x_test = np.array(x_test)
y_test = np.array(y_test)
# In[20]:
#x, y = read_and_process_image_test(train_imgs)
# In[21]:
#Create the image generators
train_generator = train_datagen.flow(x, y, batch_size=batch_size)
val_generator = val_datagen.flow(x_test, y_test, batch_size=batch_size)
# In[22]:
print(x.shape)
# In[23]:
from tensorflow.python.client import device_lib
print(device_lib.list_local_devices())
# In[24]:
from keras import backend as K
K.tensorflow_backend._get_available_gpus()
# In[25]:
import tensorflow as tf
sess = tf.Session(config=tf.ConfigProto(log_device_placement=True))
# In[26]:
#The training part
#We train for 64 epochs with about 100 steps per epoch
history = model.fit_generator(train_generator,
steps_per_epoch=ntrain // batch_size,
epochs=64,
validation_data=val_generator,
validation_steps=nval // batch_size)
# In[ ]:
# In[ ]:
| [
"ziada@nmsu.edu"
] | ziada@nmsu.edu |
af4fdfa3a9fcb189f8904e1b4439ff0138b37449 | f0e048b2398b42a3c3ec42925ab75f754cd8d214 | /mmdet/datasets/__init__.py | 64f8760cc4d253c5324b32a3ae05f8a9ac1b7094 | [] | no_license | myknowntime/RIDet | c56535f52ccf76e41bd181faf2bceb2f0e8fbd57 | 96bee9a7089a267855d494fbf9d2f2f78064c54e | refs/heads/master | 2023-08-14T23:46:32.849835 | 2021-10-06T14:29:31 | 2021-10-06T14:29:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,284 | py | from .builder import build_dataset
from .cityscapes import CityscapesDataset
from .coco import CocoDataset
from .custom import CustomDataset
from .dataset_wrappers import ConcatDataset, RepeatDataset, ClassBalancedDataset
from .loader import DistributedGroupSampler, GroupSampler, build_dataloader
from .registry import DATASETS
from .voc import VOCDataset
from .wider_face import WIDERFaceDataset
from .xml_style import XMLDataset
from .dota_obb import DotaOBBDataset
from .hrsc2016 import HRSC2016Dataset
from .gaofen2020 import GF2020PlaneDataset
from .gaofen2020 import GF2020ShipDataset
from .RAChallenge import RAChallengeDataset
from .ucas_aod import UCAS_AODDataset
from .icdar2015 import ICDAR2015Dataset
from .RAChallenge_airCarrier import RAChallengeAirCarrierDataset
from .msra_td500 import MSRA_TD500Dataset
__all__ = [
'CustomDataset', 'XMLDataset', 'CocoDataset', 'VOCDataset',
'CityscapesDataset', 'GroupSampler', 'DistributedGroupSampler',
'build_dataloader', 'ConcatDataset', 'RepeatDataset', 'ClassBalancedDataset',
'WIDERFaceDataset', 'DATASETS', 'build_dataset', 'DotaOBBDataset', 'UCAS_AODDataset'
'GF2020PlaneDataset', 'GF2020ShipDataset', 'RAChallengeDataset', 'RAChallengeAirCarrierDataset'
'ICDAR2015Dataset', 'MSRA_TD500Dataset'
]
| [
"mq_chaser@126.com"
] | mq_chaser@126.com |
b96eabaf1b89b7420a9ccfa64eb37b87da2a2380 | e63969b446eae3e971baae954d0b2b7283b1e20f | /draw_barcode.py | 6a6b4643735a5555fc4061f5f3440d90d01cf4a6 | [] | no_license | jinjiel1994/CIS210 | 7b751453efd662c0118fe344cd7a6cfa61d59f33 | 8cb73a67353a3633e307053f102eb978daf43529 | refs/heads/master | 2021-09-27T22:12:48.484776 | 2018-11-12T06:19:34 | 2018-11-12T06:19:34 | 109,931,302 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,561 | py | """
draw_barcode.py: Draw barcode representing a ZIP code using Turtle graphics
Authors: Jim Li ID:951532421
Credits: Starter code
CIS 210 assignment 3, part 2, Fall 2015.
"""
import argparse
import time
import turtle
SLEEP_TIME = 30
ENCODINGS = [[1, 1, 0, 0, 0],
[0, 0, 0, 1, 1],
[0, 0, 1, 0, 1],
[0, 0, 1, 1, 0],
[0, 1, 0, 0, 1],
[0, 1, 0, 1, 0],
[0, 1, 1, 0, 0],
[1, 0, 0, 0, 1],
[1, 0, 0, 1, 0],
[1, 0, 1, 0, 0]
]
SINGLE_LENGTH = 25
def compute_check_digit(digits):
"""
Compute the check digit for use in ZIP barcodes
args:
digits: list of 5 integers that make up zip code
returns:
check digit as an integer
"""
sum = 0
for i in range(len(digits)):
sum = sum + int(digits[i])
check_digit = 10 - (sum % 10)
if (check_digit == 10):
check_digit = 0
return check_digit
"""
Draw each bar
args:
my_turtle: inport turtle
digit: input binary digit to draw bar
"""
def draw_bar(my_turtle, digit):
my_turtle.left(90)
if digit == 0:
length = SINGLE_LENGTH
else:
length = 2 * SINGLE_LENGTH
my_turtle.forward(length)
my_turtle.up()
my_turtle.backward(length)
my_turtle.right(90)
my_turtle.forward(10)
my_turtle.down()
"""
Encode the zip and compute the check zip to binary digit and then draw the bar
arg:
zip: the zip prepared to be printed by barcode
"""
def draw_zip(my_turtle, zip):
draw_bar(my_turtle, 1) # start bar
binary = []
origin_zip = str(zip) # save the original zip for computing check digit
while zip > 0:
reminder = zip%10 # extract each digit from zip
binary = ENCODINGS[reminder] + binary # encode each digit to binary number
zip = zip//10
binary = binary + ENCODINGS[compute_check_digit(origin_zip)]
for i in range(len(binary)):
draw_bar(my_turtle, binary[i])
draw_bar(my_turtle, 1) # end bar
def main():
parser = argparse.ArgumentParser()
parser.add_argument("ZIP", type=int)
args = parser.parse_args()
zip = args.ZIP
if zip <= 0 or zip > 99999:
print("zip must be > 0 and < 100000; you provided", zip)
else:
my_turtle = turtle.Turtle()
draw_zip(my_turtle, zip)
time.sleep(SLEEP_TIME)
if __name__ == "__main__":
main()
| [
"jinjiel1994@gmail.com"
] | jinjiel1994@gmail.com |
6174cf4ec932f9d51f5254f410611377765074c3 | 4fde32723e04cbb9c929c54dad55f4333ba48d90 | /tests/test_filesink_delay.py | c9019a01aede25e85021924fd72a1021d0fbf0b0 | [
"MIT"
] | permissive | Delgan/loguru | 29f1b64a4944886559d76e22029fe1e5c88e7fec | 80bcf4e25da9f79617ef23ca0fb29b100caac8f2 | refs/heads/master | 2023-09-03T20:18:46.892038 | 2023-09-03T16:19:15 | 2023-09-03T16:19:15 | 100,401,612 | 15,902 | 775 | MIT | 2023-09-11T14:19:42 | 2017-08-15T17:22:32 | Python | UTF-8 | Python | false | false | 3,435 | py | import datetime
import time
from loguru import logger
from .conftest import check_dir
def test_file_not_delayed(tmp_path):
file = tmp_path / "test.log"
logger.add(file, format="{message}", delay=False)
assert file.read_text() == ""
logger.debug("Not delayed")
assert file.read_text() == "Not delayed\n"
def test_file_delayed(tmp_path):
file = tmp_path / "test.log"
logger.add(file, format="{message}", delay=True)
assert not file.exists()
logger.debug("Delayed")
assert file.read_text() == "Delayed\n"
def test_compression(tmp_path):
i = logger.add(tmp_path / "file.log", compression="gz", delay=True)
logger.debug("a")
logger.remove(i)
check_dir(tmp_path, files=[("file.log.gz", None)])
def test_compression_early_remove(tmp_path):
i = logger.add(tmp_path / "file.log", compression="gz", delay=True)
logger.remove(i)
check_dir(tmp_path, size=0)
def test_retention(tmp_path):
for i in range(5):
tmp_path.joinpath("test.2020-01-01_01-01-%d_000001.log" % i).write_text("test")
i = logger.add(tmp_path / "test.log", retention=0, delay=True)
logger.debug("a")
logger.remove(i)
check_dir(tmp_path, size=0)
def test_retention_early_remove(tmp_path):
for i in range(5):
tmp_path.joinpath("test.2020-01-01_01-01-%d_000001.log" % i).write_text("test")
i = logger.add(tmp_path / "test.log", retention=0, delay=True)
logger.remove(i)
check_dir(tmp_path, size=0)
def test_rotation(tmp_path, freeze_time):
with freeze_time("2001-02-03"):
i = logger.add(tmp_path / "file.log", rotation=0, delay=True, format="{message}")
logger.debug("a")
logger.remove(i)
check_dir(
tmp_path,
files=[
("file.2001-02-03_00-00-00_000000.log", ""),
("file.log", "a\n"),
],
)
def test_rotation_early_remove(tmp_path):
i = logger.add(tmp_path / "file.log", rotation=0, delay=True, format="{message}")
logger.remove(i)
check_dir(tmp_path, size=0)
def test_rotation_and_retention(freeze_time, tmp_path):
with freeze_time("1999-12-12") as frozen:
filepath = tmp_path / "file.log"
logger.add(filepath, rotation=30, retention=2, delay=True, format="{message}")
for i in range(1, 10):
time.sleep(0.05) # Retention is based on mtime.
frozen.tick(datetime.timedelta(seconds=0.05))
logger.info(str(i) * 20)
check_dir(
tmp_path,
files=[
("file.1999-12-12_00-00-00_350000.log", "7" * 20 + "\n"),
("file.1999-12-12_00-00-00_400000.log", "8" * 20 + "\n"),
("file.log", "9" * 20 + "\n"),
],
)
def test_rotation_and_retention_timed_file(freeze_time, tmp_path):
with freeze_time("1999-12-12") as frozen:
filepath = tmp_path / "file.{time}.log"
logger.add(filepath, rotation=30, retention=2, delay=True, format="{message}")
for i in range(1, 10):
time.sleep(0.05) # Retention is based on mtime.
frozen.tick(datetime.timedelta(seconds=0.05))
logger.info(str(i) * 20)
check_dir(
tmp_path,
files=[
("file.1999-12-12_00-00-00_350000.log", "7" * 20 + "\n"),
("file.1999-12-12_00-00-00_400000.log", "8" * 20 + "\n"),
("file.1999-12-12_00-00-00_450000.log", "9" * 20 + "\n"),
],
)
| [
"delgan.py@gmail.com"
] | delgan.py@gmail.com |
f193f858819d4a0f5eda79045596187844d5468a | 5a1132fac9bc79ee1deeb8ed7d428deeb7a5fb91 | /FuncionesLambda.py | aaedbfbb4731a31036732109ffa27a0467618329 | [] | no_license | mariacamila11/algoritmo-y-estructura-datos | 002e788bb4e39df7fd53e3780586039efaed7897 | 95d2a15a66e870313a63c642f252152fe11ba394 | refs/heads/master | 2023-01-23T04:27:56.221608 | 2020-11-24T19:15:03 | 2020-11-24T19:15:03 | 281,713,589 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,868 | py | #Muestre en pantalla n veces un string ingresado
potenciador = lambda base, exponente : base ** exponente
print (potenciador (2,4))
#Muestre en pantalla n veces un string ingresado
mostrarPalabra = lambda palabra, cantidad_veces : print (palabra*cantidad_veces)
mostrarPalabra ('Me gusta programar en python \n', 15)
#Muestre en pantalla el máximo número de dos listas ingresadas
encontrarMaximos = lambda lista1, lista2 : print (max(lista1), max (lista2))
edadesGrupoA = [18,29,23,26,27,20,22,19,16]
edadesGrupoB = [18,49,43,26,27,40,22,19,16]
encontrarMaximos (edadesGrupoA,edadesGrupoB)
#Devuelva verdadero o falso dependiendo si un número es par o no
par = lambda valor : valor %2 == 0
print (par(2))
print (par(3))
#Devuelva verdadero o falso dependiendo si un número es impar o no
impar = lambda valor : valor % 2 != 0
print (impar (2))
print (impar (3))
# Que devuelva la unión de dos palabras
unionPalabras = lambda palabra1 , palabra2 : palabra1 + ' ' + palabra2
print (unionPalabras('Hola','amigo'))
#Que dado un nombre salude a la persona ingresada
preguntaNombre = 'Ingrese su nombre por favor : '
nombre = input(preguntaNombre)
saludar = lambda name = '' : print (f'Bienvenido {name} a este programa')
saludar(nombre)
# Que dada una palabra devuelva el largo de la misma
palabraDada = 'Hola'
lenPalabra = lambda palabra : len (palabra)
print (lenPalabra(palabraDada))
#Que utilizando la anterior muestre en pantalla el resultado
showLen = lambda funcion, palabra : print (funcion(palabra))
showLen(lenPalabra, palabraDada)
#Devuelva el área de un triángulo dada su base y altura
areaTriangulo = lambda base, altura : base*altura/2
area = areaTriangulo (8,4)
print (area)
#Calcule el imc sabiendo la altura y el peso (imc = peso / altura^2)
imcCalculator = lambda peso, altura : peso/(altura**2)
imc= imcCalculator(80,1.67)
print (imc)
| [
"herreram.maria@uces.edu.co"
] | herreram.maria@uces.edu.co |
fef8be44fbe71dedb6bac7285358f66e64a58ea5 | e6fac8e0289d9f82369d2eb8e22bc175c6f51b3b | /Arcade/Intro/Level 3/All Longest Strings/code.py | efca5418d8e685435a8919240333d5b39189d5a6 | [] | no_license | Zahidsqldba07/CodeFights-9 | f361c15d24f96afa26de08af273a7f8f507ced4a | 6c5d152b1ad35cf178dd74acbc44ceb5fdcdf139 | refs/heads/master | 2023-03-18T23:52:43.274786 | 2017-05-12T07:28:08 | 2017-05-12T07:28:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 454 | py | def allLongestStrings(inputArray):
length = len(inputArray[0])
strings = [inputArray[0]]
for string in inputArray[1:]:
if len(string) == length:
strings.append(string)
continue
if len(string) > length:
length = len(string)
strings = [string]
continue
if len(string) < length:
# Nothing to do. Skip.
continue
return strings
| [
"hallosputnik@gmail.com"
] | hallosputnik@gmail.com |
c72c223dd78798e9b6e0be403eff60ba558c15d9 | 9a987c4c30c7dd9c314e89bb335a818500c20b11 | /accounts/Models/TrangThaiCXModel.py | 6fcbe0ce40fde6d14c9d055b1d32a0898a7044b4 | [] | no_license | tandaica0612/BackHTQLCX | 1e7d7306889ac9d16232bd4a7aff3113a6371f28 | 8e56cfb5b54ebfb7e3e6acdfdb2bace2a1e2918b | refs/heads/master | 2023-06-06T13:35:32.046042 | 2019-12-10T08:56:07 | 2019-12-10T08:56:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 561 | py | from django.db import models
class Trangthaicx(models.Model):
matinhtrang = models.IntegerField(db_column='MaTinhTrang', primary_key=True) # Field name made lowercase.
tinhtrang = models.CharField(db_column='TinhTrang', max_length=255, blank=True, null=True) # Field name made lowercase.
ghichu = models.TextField(db_column='GhiChu', blank=True, null=True) # Field name made lowercase.
class Meta:
managed = False
db_table = 'TrangThaiCX'
def __str__(self):
return "%d: %s", (self.matinhtrang, self.tinhtrang)
| [
"ptttien97@gmail.com"
] | ptttien97@gmail.com |
18ea2b4293e7049f258560047dad6b41caf8e47f | 6602f9dd684ab09686703d8d2da6c8f17762a3be | /dataserver.py | d074a2b88e935add962e263d37906400a6fc6c07 | [] | no_license | PradeepJampani/Fault-Tolerance-for-File-operations-Systems- | a0a0da1d9f69f50ab83f93622cb390ac01edc175 | 20e93250687f7eb359925381b59a6168c3aa46eb | refs/heads/master | 2020-09-06T06:57:49.251153 | 2019-11-08T00:59:10 | 2019-11-08T00:59:10 | 220,357,538 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 687 | py | #!/usr/bin/env python
import sys, os
from sys import argv, exit
import time
def launch_servers(port_id):
cmd = ['xterm', '-e', 'python' , 'metaserver.py', '--port=%s' % port_id]
os.execvp('xterm', cmd)
if __name__ == '__main__':
if len(argv) < 1:
print( "usage: python dataserver.py <port for local instance> <port for data-server2> ..<port for data-server-n>")
sys.exit(1)
server_Ids = []
for id in range(len(argv) -1 ):
print id
server_Ids.append(argv[id+1])
print server_Ids
for servers in server_Ids:
pid = os.fork()
if pid == 0:
if pid == 0:
launch_servers(servers)
break
| [
"noreply@github.com"
] | PradeepJampani.noreply@github.com |
b10b37f6b93fd5bbad4230577984f72d46c5c7be | 65a709ed1228e014834bb8eae03e8f02ff3086dd | /RPI/webstreaming.py | 800fa0da489be3f7d1e7696e1023a53366d3e7fa | [] | no_license | GUSecLab/privDoorbell | bc9749c77e20beecf4252b5554f152cfb572e18d | ee8ca096259152955f17455a4e8b857d304fc0a5 | refs/heads/master | 2023-03-11T01:40:03.504441 | 2021-02-09T20:17:11 | 2021-02-09T20:17:11 | 285,355,508 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,657 | py | from detectors.motion_detection.singlemotiondetector import SingleMotionDetector
from detectors.face_detection.opencv_detection import OpenCVDetector
from cryptoutils import HMACSHA256, AESCipher
from tokenList import TokenList
from utils.helper import StringHelper
from utils.param import Param
import threading
import argparse
import datetime
import time
from random import SystemRandom
import json
import re
import socket
import imutils
from imutils.video import VideoStream
import cv2
from flask import Response, Flask, render_template, request, abort
import firebase_admin
from firebase_admin import credentials, messaging
from gpiozero import Button
# Multithreading vars
outputFrame = None
stream = True
tokens = TokenList()
outputFrame_lock = threading.Lock()
notification_flag = Param.NOTIFICATION_NONE
notification_lock = threading.Lock()
# GPIO
doorbell_button = Button(23)
# Flask
app = Flask(__name__)
# Static
try:
with open("config.json") as f:
settings = json.load(f)
seed = settings['seed']
except:
print("Seed is not available. Try running init script again.", flush=True)
exit(0)
# Don't forget to remove special chars
HMACMachine = HMACSHA256(seed, "2")
pwd = re.sub("[^A-Za-z0-9]", "", AESCipher.bytesToBase64(HMACMachine.getBinDigest()))
RTMP_ADDR = 'rtmp://127.0.0.1:1935/live/mystream' + '?psk=' + pwd
print("RTMP_ADDR: " + RTMP_ADDR)
# RTMP_ADDR = 'http://127.0.0.1:8000/live?port=1935&app=live&stream=mystream'
DUMMY_PROB = 1e-1
DUMMY_INTERVAL = 5.0
# Firebase authentication
cred = credentials.Certificate("privdoorbell-af796472f9a4.json")
firebase_admin.initialize_app(cred)
# Streaming source
if stream:
vs = cv2.VideoCapture(RTMP_ADDR)
else:
vs = VideoStream(src=0).start()
def send_to_token(token: str, msg_type='face'):
try:
with open("config.json") as f:
settings = json.load(f)
seed = settings['seed']
except:
print("Seed is not available. Try running init script again.", flush=True)
return
HMACMachine = HMACSHA256(seed, "1")
AESMachine = AESCipher(HMACMachine.getBinDigest())
'''
ciphertext, tag = AESMachine.encrypt_base64(msg_type)
timestamp = str(time.time())
'''
timestamp = str(time.time())
plaintext = "type: {}; timestamp: {}".format(msg_type, timestamp)
ciphertext, tag = AESMachine.encrypt_base64(plaintext)
# Restruct:
# message = {
# 'encrypted("type: face; timestamp: timestamp") + tag(24 char) + iv(16 char)' : '',
# }
'''
message = messaging.Message(
data={
'type': ciphertext,
'tag': tag,
'iv': AESMachine.getIV_base64(),
'timestamp': timestamp
},
token=token,
)
'''
message = messaging.Message(
data={
ciphertext+tag+AESMachine.getIV_base64(): '',
},
token=token,
)
response = messaging.send(message)
print("send_to_token(): " + str({
'AESKey': AESCipher.bytesToBase64(HMACMachine.getBinDigest()),
'type': ciphertext + ' (' + msg_type + ')',
'tag': tag,
'iv': AESMachine.getIV_base64(),
'timestamp': timestamp + ' (' + StringHelper.timestamp2Readable(timestamp) + ')'
}))
print('send_to_token(): Attempted to send msg, res:', response, flush=True)
def send_dummy_packet():
global notification_flag
print("send_dummy_packet(): Started dummy packet thread", flush=True)
starttime = time.time()
#cryptogen = SystemRandom()
while True:
#if cryptogen.random() < DUMMY_PROB and not tokens.isEmpty():
if not tokens.isEmpty():
with notification_lock:
if notification_flag == Param.NOTIFICATION_FACE:
for t in tokens.getList():
send_to_token(t, Param.NOTIFICATION_STRING_FACE)
notification_flag = Param.NOTIFICATION_STRING_NONE
elif notification_flag == Param.NOTIFICATION_BELL:
for t in tokens.getList():
send_to_token(t, Param.NOTIFICATION_STRING_BELL)
notification_flag = Param.NOTIFICATION_STRING_NONE
else:
for t in tokens.getList():
send_to_token(t, Param.NOTIFICATION_STRING_NONE)
time.sleep(DUMMY_INTERVAL - ((time.time() - starttime) % DUMMY_INTERVAL))
@app.route("/")
def index():
return render_template("index.html")
def bell_button_callback():
global notification_flag
with notification_lock:
notification_flag = Param.NOTIFICATION_BELL
@app.route("/bell", methods = ['GET'])
def bell():
global notification_flag
with notification_lock:
notification_flag = Param.NOTIFICATION_BELL
return Param.HTTP_DEFAULT_RETURN
@app.route("/manageToken", methods = ['POST', 'GET'])
def manageToken():
global tokens
if request.method == 'GET':
return render_template("token_management.html", tokens = tokens.getDict())
elif request.method == 'POST':
print("manageToken(): " + str(request.form.to_dict()), flush=True)
for k, v in request.form.to_dict().items():
print("manageToken(): " + k)
tokens.delete(k)
return render_template("token_management_confirmed.html")
@app.route("/register", methods = ['GET', 'POST'])
def register():
global tokens
if request.method == 'GET':
return render_template("token_management.html", tokens = tokens.getDict())
print("register(): Start recving post")
data = request.form.to_dict()
print("register(): " + str(data), flush=True)
# Delimiter
delim = "---"
# If either is not available, return an error string
try:
with open("config.json") as f:
settings = json.load(f)
ret_msg = settings['seed'] + delim + settings['onion_hostname'] + delim + settings['onion_auth_cookie']
except:
return Param.HTTP_DEFAULT_RETURN
firebase_token, nickname, device_token = StringHelper.extractFromPassedDict(data)
tokens.insert(firebase_token, time.time(), device_token, nickname)
print("register(): Returned " + ret_msg)
tokens.dump()
return ret_msg
@app.route("/video_feed")
def video_feed():
return Response(generate(), mimetype = "multipart/x-mixed-replace; boundary=frame")
def detect_motion(frameCount):
global vs, outputFrame, outputFrame_lock
md = SingleMotionDetector(accumWeight=0.1)
total = 0
while True:
if stream:
_, frame = vs.read()
else:
frame = vs.read()
frame = imutils.resize(frame, width=400)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (7, 7), 0)
timestamp = datetime.datetime.now()
cv2.putText(frame, timestamp.strftime("%A %d %B %Y %I:%M:%S%p"), (10, frame.shape[0] - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.35, (0, 0, 255), 1)
if total > frameCount:
motion = md.detect(gray)
if motion is not None:
print(motion, flush=True)
(thresh, (minX, minY, maxX, maxY)) = motion
cv2.rectangle(frame, (minX, minY), (maxX, maxY), (0, 0, 255), 2)
else:
#raise Exception('')
pass
md.update(gray)
total += 1
with outputFrame_lock:
outputFrame = frame.copy()
def detect_face(frameCount):
global vs, outputFrame, outputFrame_lock, notification_flag
fd = OpenCVDetector()
total = 0
cur_time = time.time() - 15
print_flag = True
while True:
num_faces = 0
if stream:
_, frame = vs.read()
else:
frame = vs.read()
if not vs.isOpened() and print_flag:
print("Empty capture object.")
print_flag = False
continue
if frame is None and print_flag:
print("Empty frame.")
print_flag = False
continue
frame = imutils.resize(frame, width=400)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (7, 7), 0)
timestamp = datetime.datetime.now()
cv2.putText(frame, timestamp.strftime("%A %d %B %Y %I:%M:%S%p"), (10, frame.shape[0] - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.35, (0, 0, 255), 1)
if total > frameCount:
num_faces, faces = fd.detect(gray)
if num_faces:
#print(num_faces, faces, flush=True)
for (x, y, w, h) in faces:
cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 2)
else:
#raise Exception('')
pass
total += 1
if num_faces:
if (time.time() - cur_time > 30) and not tokens.isEmpty():
with notification_lock:
notification_flag = Param.NOTIFICATION_FACE
cur_time = time.time()
else:
print("detect_face(): " + str(time.time() - cur_time))
with outputFrame_lock:
outputFrame = frame.copy()
def generate():
global outputFrame, outputFrame_lock
while True:
with outputFrame_lock:
if outputFrame is None:
continue
(flag, encodedImage) = cv2.imencode(".jpg", outputFrame)
if not flag:
continue
yield(b'--frame\r\n' b'Content-Type: image/jpeg\r\n\r\n' + bytearray(encodedImage) + b'\r\n')
def simpleTCPPush():
global notification_flag
port = 8090
s = socket.socket()
s.bind(('', port))
s.listen(5)
try:
while True:
c, addr = s.accept()
while True:
data = c.recv(1024)
if not data:
break
c.sendall(notification_flag)
except:
print("Socket Interrupt.")
if __name__ == "__main__":
ap = argparse.ArgumentParser()
ap.add_argument("-f", "--frame-count", type=int, default=32)
ap.add_argument("-d", "--detector", type=str, default='face')
ap.add_argument("-t", "--tcp-push", type=int, default=1)
args = vars(ap.parse_args())
# Multi-thread tasking
# The detector thread
if args['detector'] == 'face':
detector_thread = threading.Thread(target = detect_face, args = (args["frame_count"], ))
else:
detector_thread = threading.Thread(target = detect_motion, args = (args["frame_count"], ))
detector_thread.daemon = True
detector_thread.start()
# The dummy packet thread
dummy_thread = threading.Thread(target = send_dummy_packet)
dummy_thread.daemon = True
dummy_thread.start()
# The TCP push thread
if args['tcp_push']:
tcp_thread = threading.Thread(target = simpleTCPPush)
tcp_thread.daemon = True
tcp_thread.start()
# GPIO
doorbell_button.when_pressed = bell_button_callback
app.run(host = "0.0.0.0", port = 8080, debug=True, threaded=True, use_reloader=False)
if not stream:
vs.stop()
else:
vs.release() | [
"fy92@georgetown.edu"
] | fy92@georgetown.edu |
9206c648fe0456aa59c3334211cc4850c0877167 | 3a5a3e4c1b12c860c8b584606aaea3b3c6c850a0 | /team/models.py | 0a51f1b5f6cfad9685206c6915c9893e2e744848 | [] | no_license | 519389812/teamwork | 4ca5599bb15796c6b5cabfa3b5ad53ed8a4f6689 | 726af6994ef27f12fa630bf9e22047dc54ed32e6 | refs/heads/master | 2022-11-06T14:21:43.752127 | 2020-06-16T03:21:21 | 2020-06-16T03:21:21 | 267,204,428 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 460 | py | from django.db import models
from user.models import User
class Team(models.Model):
id = models.AutoField(primary_key=True)
name = models.CharField(max_length=20, unique=True, verbose_name="组名")
member = models.ManyToManyField(User, related_name="member", default=None, blank=True, verbose_name="成员")
class Meta:
verbose_name = "分组"
verbose_name_plural = "分组"
def __str__(self):
return self.name
| [
"45133358@qq.com"
] | 45133358@qq.com |
6bc2c114bd9494ab951507009fe37deac87901f8 | 42cd507cb9be5811a1158b33f94451dbc9b07fa8 | /NMAR.py | 6bed6ee01743593ca09e1154974975b054a5b9d1 | [] | no_license | SuzanaM/Missing_data_imputation | 329e5e48a75624aebdf1514ba5a7bc0a60b8dc54 | 1125bd217dc18034c07b10997ae039699a9105ad | refs/heads/master | 2020-09-16T08:20:07.504935 | 2019-11-24T19:33:06 | 2019-11-24T19:33:06 | 223,710,269 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,198 | py | import pandas as pd
from scipy.stats import norm
import numpy as np
# Read dataframe with all features, but without ID_Numbers etc.
#df = pd.read_csv(r"C:\Users\Suzana\Desktop\Iris\iris2.csv")
'''
# Get the number of features (#columns)
n_features = df.shape[1]
# For each feature get the threshold value
thresholds = df.quantile(q=0.3, axis=0)
print(thresholds)
# Find std for every features
stds = df.std(axis=0)
print(stds)
# Find probabilities
probabilities = 1 - norm.cdf(thresholds, df, stds)
print(probabilities)
# Generate zeros and ones
m = np.random.binomial(n=1, size=df.shape, p=probabilities)
# print(m)
# Generate nans based on zeroes
full = np.where(m==0, np.nan, df)
# Ukoliko svi atributi imaju nan vrijednosti, onda izabrati random jedan koji ce da ima neku vrijednost:
for i in range(0, full.shape[0]):
if (np.isnan(full[i,:]).all()):
full[i,0] = df.iloc[i,0]
df1 = pd.DataFrame(full, columns=df.columns)'''
df1 = pd.read_csv(r"C:\Users\Suzana\Desktop\Iris\NMAR_30.csv")
print(df1.isna().sum())
print(df1.isna().sum().sum())
print(df1.shape[0]*df1.shape[1]*0.3)
#df1.to_csv(r"C:\Users\Suzana\Desktop\Iris\NMAR_30.csv") | [
"noreply@github.com"
] | SuzanaM.noreply@github.com |
6b8c90c7cb78e6418d1e0d60cd3461eef5e4cab4 | e3c68753f15076d6c158635c8bc80f8da1c4879e | /task1/calc.py | fe383a64bfa05c4cec3b8b87120ad6cd34fdfb5b | [] | no_license | IonianIronist/parallel_programming | fb793e2b06b51f502ad1a30103c727909b9a0784 | b7f044a0c6102cb6f774a9c00b0fdb233344a80a | refs/heads/main | 2023-04-30T09:25:25.088366 | 2021-05-24T17:19:04 | 2021-05-24T17:19:04 | 353,409,248 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 206 | py | with open('output.txt', 'r') as output:
lines = [float(line) for line in output.readlines()]
with open('mean.txt', 'w') as mean:
mean.write(str(sum(lines)/len(lines)))
print(len(lines))
| [
"noreply@github.com"
] | IonianIronist.noreply@github.com |
4ef667f542aa3f59abd58194c4353b87fbb4098f | e73ccdc70b13183e786af35ed71444780eb6b79f | /django_oauth_web/dictionary_ui/apps.py | 3692009b6589912478491e1597842305f2a6ed14 | [
"Apache-2.0"
] | permissive | XSEDE/oauth-secured-api-example-web | 66e035cfbbd7669ed7011a0543d068c9a76dc214 | 980c86ad26a791b9ca42641104d053b2ea25bf66 | refs/heads/master | 2022-11-18T18:44:04.320824 | 2020-07-15T01:26:39 | 2020-07-15T01:26:39 | 259,757,816 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 97 | py | from django.apps import AppConfig
class ExampleUiConfig(AppConfig):
name = 'dictionary_ui'
| [
"navarro@anl.gov"
] | navarro@anl.gov |
8c4bb5334598d8e75ee55f9f3acc037310ae37ed | 15a5ec2c20de03708228962aee2b86d279778b57 | /test/readPdf.py | 2218777de874b1ae607247d509e9433d4706ae11 | [] | no_license | just4jc/gooseeker-master | c7605b19818254dcde04bceb6deb531aaa332872 | bb346484104f5da90289b745d3ae54f44d9bbc85 | refs/heads/master | 2020-06-07T16:17:01.402655 | 2019-01-28T15:27:46 | 2019-01-28T15:27:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 718 | py | # -*_coding:utf-8-*-
# 使用GsExtractor类的示例程序
# 访问集搜客论坛,以xslt为模板提取论坛内容
# xslt保存在xslt_bbs.xml中
from urllib import request
from lxml import etree
from gooseeker import GsExtractor
import html
# 访问并读取网页内容
url = "http://im.nju.edu.cn/teachers.do?type=1&mid=4"
conn = request.urlopen(url)
doc = etree.HTML(conn.read())
bbsExtra = GsExtractor() # 生成xsltExtractor对象
bbsExtra.setXsltFromAPI("e346796c93c6ba7441636666e401e5cc", "im.nju.edu.cn")
xs = bbsExtra.getXslt()
result = bbsExtra.extract(doc) # 调用extract方法提取所需内容
# out file
file_name = 'E:/parse_detail_' + '.xml'
open(file_name, "w").write(result)
print(result)
| [
"1368306623@qq.com"
] | 1368306623@qq.com |
fde98085cf1161f3585aeffbfa7e64a80679eb5f | b320e9de9909afad97669603a6597a1ba2e8161a | /wsgi.py | 5a56a5f6d9a052d9efc142c86eaeaca4ba01ca16 | [] | no_license | nuprakruthi/Test-02-Django | 8973fe13a9a6b3facd0d8c36d5b9f329675a3e2f | c77c0cec4834ec0255a3c06aa80549e3c7b33a8f | refs/heads/main | 2023-02-26T20:54:12.913591 | 2021-01-25T10:33:51 | 2021-01-25T10:33:51 | 332,708,927 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 415 | py | """
WSGI config for DEMOPROJECT project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'DEMOPROJECT.settings')
application = get_wsgi_application()
| [
"noreply@github.com"
] | nuprakruthi.noreply@github.com |
ef86fbfb19dccbac6f9dac33b709f68bf6d90bbd | 61215094b81485cf12e7a131e8b8666cc9f412fa | /yet_another_xor.py | 02d5985ac113d401c5949b80a558f0ec8147ec20 | [] | no_license | nielsen192/NeuralNetworks | 5191d988efa092216f0572e5b2476c80d2821624 | b9e7651ba7328c8e089c8e907466764ea677299f | refs/heads/master | 2021-01-22T05:37:59.179615 | 2017-06-27T18:21:13 | 2017-06-27T18:21:13 | 81,683,020 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,488 | py | import math
import numpy
import random
# note that this only works for a single layer of depth
INPUT_NODES = 2
OUTPUT_NODES = 1
HIDDEN_NODES = 2
# 15000 iterations is a good point for playing with learning rate
MAX_ITERATIONS = 2000
# setting this too low makes everything change very slowly, but too high
# makes it jump at each and every example and oscillate. I found .5 to be good
LEARNING_RATE = .5
print("Neural Network Program")
class network:
def __init__(self, input_nodes, hidden_nodes, output_nodes, learning_rate):
self.input_nodes = input_nodes
self.hidden_nodes = hidden_nodes
self.output_nodes = output_nodes
self.total_nodes = input_nodes + hidden_nodes + output_nodes
self.learning_rate = learning_rate
# set up the arrays
self.values = numpy.zeros(self.total_nodes)
self.expectedValues = numpy.zeros(self.total_nodes)
self.thresholds = numpy.zeros(self.total_nodes)
# the weight matrix is always square
self.weights = numpy.zeros((self.total_nodes, self.total_nodes))
# set random seed! this is so we can experiment consistently
random.seed(1)
# set initial random values for weights and thresholds
# this is a strictly upper triangular matrix as there is no feedback
# loop and there inputs do not affect other inputs
for i in range(self.input_nodes, self.total_nodes):
self.thresholds[i] = random.random() / random.random()
for j in range(i + 1, self.total_nodes):
self.weights[i][j] = random.random() * 2
def process(self):
# update the hidden nodes
for i in range(self.input_nodes, self.input_nodes + self.hidden_nodes):
# sum weighted input nodes for each hidden node, compare threshold, apply sigmoid
W_i = 0.0
for j in range(self.input_nodes):
W_i += self.weights[j][i] * self.values[j]
W_i -= self.thresholds[i]
self.values[i] = 1 / (1 + math.exp(-W_i))
# update the output nodes
for i in range(self.input_nodes + self.hidden_nodes, self.total_nodes):
# sum weighted hidden nodes for each output node, compare threshold, apply sigmoid
W_i = 0.0
for j in range(self.input_nodes, self.input_nodes + self.hidden_nodes):
W_i += self.weights[j][i] * self.values[j]
W_i -= self.thresholds[i]
self.values[i] = 1 / (1 + math.exp(-W_i))
def processErrors(self):
sumOfSquaredErrors = 0.0
# we only look at the output nodes for error calculation
for i in range(self.input_nodes + self.hidden_nodes, self.total_nodes):
error = self.expectedValues[i] - self.values[i]
#print error
sumOfSquaredErrors += math.pow(error, 2)
outputErrorGradient = self.values[i] * (1 - self.values[i]) * error
#print outputErrorGradient
# now update the weights and thresholds
for j in range(self.input_nodes, self.input_nodes + self.hidden_nodes):
# first update for the hidden nodes to output nodes (1 layer)
delta = self.learning_rate * self.values[j] * outputErrorGradient
#print delta
self.weights[j][i] += delta
hiddenErrorGradient = self.values[j] * (1 - self.values[j]) * outputErrorGradient * self.weights[j][i]
# and then update for the input nodes to hidden nodes
for k in range(self.input_nodes):
delta = self.learning_rate * self.values[k] * hiddenErrorGradient
self.weights[k][j] += delta
# update the thresholds for the hidden nodes
delta = self.learning_rate * -1 * hiddenErrorGradient
#print delta
self.thresholds[j] += delta
# update the thresholds for the output node(s)
delta = self.learning_rate * -1 * outputErrorGradient
self.thresholds[i] += delta
return sumOfSquaredErrors
class sampleMaker:
def __init__(self, network):
self.counter = 0
self.network = network
def setXor(self, x):
if x == 0:
self.network.values[0] = 1
self.network.values[1] = 1
self.network.expectedValues[4] = 0
elif x == 1:
self.network.values[0] = 0
self.network.values[1] = 1
self.network.expectedValues[4] = 1
elif x == 2:
self.network.values[0] = 1
self.network.values[1] = 0
self.network.expectedValues[4] = 1
else:
self.network.values[0] = 0
self.network.values[1] = 0
self.network.expectedValues[4] = 0
def setNextTrainingData(self):
self.setXor(self.counter % 4)
self.counter += 1
# start of main program loop, initialize classes
net = network(INPUT_NODES, HIDDEN_NODES, OUTPUT_NODES, LEARNING_RATE)
samples = sampleMaker(net)
for i in range(MAX_ITERATIONS):
samples.setNextTrainingData()
net.process()
error = net.processErrors()
# prove that we got the right answers(ish)!
if i > (MAX_ITERATIONS - 5):
output = (net.values[0], net.values[1], net.values[4], net.expectedValues[4], error)
print(output)
# display final parameters
print(net.weights)
print(net.thresholds)
| [
"nielsen192@gmail.com"
] | nielsen192@gmail.com |
1a8d233b160af98d9e8e553c481daa6a571c6329 | 795f27d242b9270c97621aadb917eab71fd8bab0 | /localiserparcelle/ui_control.py | cbf146bef352f1cb9dbc6e9e8cbaf7872580f8fd | [] | no_license | MTES-MCT/Localiser_Parcelle_Adresse_BAN | 13aba15612f92f24ff41a9f05cca9a08c1acbe62 | 8b62e459a10d2a7da301b35aad200a336c6bf419 | refs/heads/main | 2023-08-26T03:49:19.669226 | 2023-05-10T12:37:59 | 2023-05-10T12:37:59 | 393,299,204 | 0 | 1 | null | 2023-04-15T16:28:33 | 2021-08-06T07:48:12 | Python | UTF-8 | Python | false | false | 1,534 | py | # -*- coding: utf-8 -*-
from qgis.PyQt.QtCore import QSettings, Qt
from qgis.PyQt.QtWidgets import QApplication, QDialog
from .ui_localise import Ui_Dialog
class ui_control(QDialog, Ui_Dialog):
def __init__(self, parent, fl):
QDialog.__init__(self, parent, fl)
self.setupUi()
def commune_adresse_disable(self, output):
self.adrin.clear()
self.adrout.clear()
self.infracommune.setCurrentIndex(0)
self.infracommune.setEnabled(False)
def commune_adresse_enable(self, output):
self.adrout.clear()
self.infracommune.setEnabled(True)
def affiche_adresse(self, output):
self.adrout.setEnabled(True)
self.adrout.setText("%s" % output)
def efface_adresse(self, output):
self.adrout.clear()
def set_dialog_busy(self, dialogShouldBeBusy=True):
"""fonction qui rend l'interface occupée et l'indique à l'utilisateur"""
#s = QSettings()
#networkTimeout = s.value( "Qgis/networkAndProxy/networkTimeout", "60000" ) # Par defaut, c'est 60000
if dialogShouldBeBusy:
#s.setValue( "Qgis/networkAndProxy/networkTimeout", "10000" ) # Imposer un délai de 10 secondes
# Afficher un sablier à la place du curseur :
self.busyIndicator.setVisible(True)
QApplication.setOverrideCursor(Qt.WaitCursor)
QApplication.processEvents()
else:
#s.setValue( "Qgis/networkAndProxy/networkTimeout", networkTimeout ) # Retablir le parametre d'origine
QApplication.restoreOverrideCursor() # Retablir le curseur d'origine
self.busyIndicator.setVisible(False)
| [
"jdaniel.lomenede@gmail.com"
] | jdaniel.lomenede@gmail.com |
34cb41ee1faf957ad2bf5a821690bd7d68185cf4 | 594fd699d9f8070c867b83b11881ca1f624b417b | /exercicios de tuplas_listas_dicionários/brasileirão.py | 3cca717cafd6222508e6adf35cec7761a7c98208 | [] | no_license | felipmarqs/exerciciospythonbrasil | f140df2c59b933cc0460d5986afc8c6ddd493556 | 6d02e85ae5986d3b20cfd8781174998d871eeb90 | refs/heads/master | 2020-04-04T05:25:23.751175 | 2018-12-12T18:44:38 | 2018-12-12T18:44:38 | 155,745,781 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 924 | py | #Crie uma tupla preenchida com os 20 primeiros colocados da tabela do brasileirão na ordem de colocação. DEpois mostre. A)Apenas os 5 primeiros colocados. B)Os últimos 4 colocados. C) Uma lista com os times em ordem alfabética. D)EM que posição da tabeela está o time da chapecoense
tabela = ('Palmeiras','Flamengo','Internacional','Grêmio','São Paulo','Atlético','Atlético-PR','Cruzeiro','Botafogo','Santos','Bahia','Fluminense','Corinthians','Chapecoense','Ceará','Vasco da Gama','Sport Recife','América-MG','EC Vitória','Paraná')
print(f"Lista dos times do brasileirão {tabela}")
print("-="*30)
print(f"Os 5 primeiros colocados são {tabela[0:5]}")
print("-="*30)
print(f"Os últimos 4 são: {tabela[17:21]}")
print("-="*30)
print(f"Os times em ordem alfabética são {sorted(tabela)}")
print("-="*30)
print(f"A Chapecoense está na {tabela.index('Chapecoense')+1}ª posição")
print("-="*30)
print(f"") | [
"noreply@github.com"
] | felipmarqs.noreply@github.com |
4e3b295752c62b6b152833fed4990de0c4d5ac64 | 9ab04c90c1c0536ec5da4e818c8b99f255377f4f | /bloodshare/migrations/0001_initial.py | 22b33099413e147af39fd94d3062566505ea62a3 | [] | no_license | ookapil/Blood-share | e1aae2387e610bbf414309908e909a86e5ccf966 | dc9011b7192240efb91776a6ae5ba35258d4f1f7 | refs/heads/master | 2022-12-22T05:52:10.555717 | 2020-10-01T16:27:15 | 2020-10-01T16:27:15 | 300,345,637 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,739 | py | # Generated by Django 3.0.8 on 2020-08-26 10:50
import django.contrib.gis.db.models.fields
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Districts',
fields=[
('gid', models.AutoField(primary_key=True, serialize=False)),
('objectid', models.IntegerField(blank=True, null=True)),
('dcode', models.IntegerField(blank=True, null=True)),
('dist_name', models.CharField(blank=True, max_length=18, null=True)),
('shape_leng', models.DecimalField(blank=True, decimal_places=6, max_digits=6, null=True)),
('shape_area', models.DecimalField(blank=True, decimal_places=6, max_digits=6, null=True)),
('code1', models.SmallIntegerField(blank=True, null=True)),
('geom', django.contrib.gis.db.models.fields.MultiPolygonField(blank=True, null=True, srid=0)),
],
options={
'db_table': 'districts',
'managed': False,
},
),
migrations.CreateModel(
name='blooddata',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(default='kapil gyawali', max_length=100)),
('age', models.IntegerField(default=5)),
('bloodgroup', models.CharField(max_length=20)),
('address', models.CharField(max_length=100)),
('contact', models.CharField(max_length=50)),
],
),
]
| [
"kapilgyawali2055@gmail.com"
] | kapilgyawali2055@gmail.com |
1d5500a358e0d396c2a9ae091c82bfb7d427fb98 | daf15d76e95211205a93ab382aff4e9d9ca0c1d9 | /utils.py | 1fdb8b3baaea5c9ff5f8b2f608931731f9a37e4f | [] | no_license | wleddy/inventory | 59b032a3a7cbb9006e62912a8544dcb65dc7e143 | 81b616d3f08a25fd503bf94307fbfb6a66c993ed | refs/heads/master | 2022-05-17T14:32:21.260315 | 2022-03-22T23:11:47 | 2022-03-22T23:11:47 | 145,328,308 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,271 | py | from flask import g
from shotglass2.takeabeltof.utils import cleanRecordID
from inventory.models import Item, Category
"""
Some utility functions
"""
def category_name(id=None):
"""Return the name of the category or None"""
from inventory.models import Category
rec = Category(g.db).select_one(where='id = {}'.format(cleanRecordID(id)))
if rec:
return rec.name
return None
def stock_on_hand(id=None):
"""Return the stock count for the item.id else something else"""
rec = Item(g.db).get(cleanRecordID(id))
if rec:
soh = Item(g.db).stock_on_hand(cleanRecordID(id))
if soh > 0:
if soh >= cleanRecordID(rec.min_stock):
return soh
else:
return "{} Min ({})".format(soh,rec.min_stock)
return "- out of stock -"
def lifo_cost(id=None):
"""Return the LIFO cost as a string for item """
cost = Item(g.db).lifo_cost(cleanRecordID(id))
return str(cost)
def register_inv_filters(app):
# register the filters
app.jinja_env.filters['category_name'] = category_name
app.jinja_env.filters['stock_on_hand'] = stock_on_hand
app.jinja_env.filters['lifo_cost'] = lifo_cost
| [
"bill@williesworkshop.net"
] | bill@williesworkshop.net |
4164bd4ad40235d013e5a99b269c698ea63a77be | 92f94e06f64840b7eb4461b6f877744a2ef7bb92 | /supportbee/client/meta.py | 8b05907e4b0e6e8255b1dae153812e8f0391f35a | [] | no_license | josuemontano/SupportBee-python-wrapper | 9fd7dea1aa91ec039436150f290c9550b110d818 | 8dbcdd8edae710e052d0ed882ad72039f8682514 | refs/heads/master | 2021-01-10T09:22:41.819086 | 2016-02-18T00:08:34 | 2016-02-18T00:08:34 | 51,601,652 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,027 | py | import requests
import json
from urllib.parse import urlencode
TIMEOUT = 15
class SupportbeeClient(object):
""" Base SupportBee API client
"""
schema = None
root = 'https://{0}.supportbee.com'
def __init__(self, company, api_token, resource):
if api_token is None:
raise Error('You must provide an API token')
self.base_url = self.root.format(company)
self.api_token = api_token
self.resource = resource
def build_get_url(self, **kwargs):
""" Add GET params to base URL
:param kwargs: Dictionary containing requested params to be added
:return: string with updated URL
"""
params = dict(map(bool_to_string, kwargs.items()))
params['auth_token'] = self.api_token
query = '{0}/{1}?'.format(self.base_url, self.resource)
query += urlencode(params)
return query
def get(self, url, schema):
""" Makes a GET request and returns the deserialized response.
"""
try:
ans = requests.get(url, timeout=TIMEOUT)
except (ConnectionError, TimeoutError) as e:
raise e
else:
payload = json.loads(ans.text).get(self.resource)
return schema.load(payload).data
def post(self, url, item, schema):
"""
Makes a POST request. Serializes the given item and posts it
to the given url.
"""
data = schema.dump(item).data
try:
ans = requests.post(url, json=data, timeout=TIMEOUT)
except (ConnectionError, TimeoutError) as e:
raise e
else:
payload = json.loads(ans.text).get('ticket')
return schema.load(payload).data
def bool_to_string(item):
"""
Returns a tuple identical to the given one, unless its second term
is a boolean. In that case this is turned to a lowercase string.
"""
key, value = item
return (key, str(value).lower() if isinstance(value, bool) else value)
| [
"josuemontanoa@gmail.com"
] | josuemontanoa@gmail.com |
ad19cd3c7f8b3f2582e05bb862dcc7fb28c93568 | af4261e05aed9d671274bc2514e943c47c441c43 | /LinearAlgebra_Function/5.07.ConjugateTranspose_H.py | 659508333be5c493587db5bc13ff9c151f689ddb | [] | no_license | HYUNMIN-HWANG/LinearAlgebra_Study | 5f0f226f2b30b17cda433e4c368b1abfcb8b8266 | 6e7bce4ba893f852c14ce81de8f4ab2673c1155e | refs/heads/main | 2023-06-21T08:46:56.620585 | 2021-07-16T16:13:56 | 2021-07-16T16:13:56 | 383,745,777 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 540 | py | # conjugate transpose : 켤레복소수를 구한 후, 행과 열 바꾸기
# Hermite (H)
'''numpy.matrix.H'''
import numpy as np
x = np.matrix(np.arange(12).reshape((3,4)))
print(x)
# [[ 0 1 2 3]
# [ 4 5 6 7]
# [ 8 9 10 11]]
z = x - 1j*x
print(z)
# [[ 0. +0.j 1. -1.j 2. -2.j 3. -3.j]
# [ 4. -4.j 5. -5.j 6. -6.j 7. -7.j]
# [ 8. -8.j 9. -9.j 10.-10.j 11.-11.j]]
z_H = z.getH()
print(z_H)
# [[ 0. -0.j 4. +4.j 8. +8.j]
# [ 1. +1.j 5. +5.j 9. +9.j]
# [ 2. +2.j 6. +6.j 10.+10.j]
# [ 3. +3.j 7. +7.j 11.+11.j]]
| [
"hwangkei0212@gmail.com"
] | hwangkei0212@gmail.com |
19fcf897de250cba7c485263a8354f23b5219734 | 42c630dbbdb5bbacca816b9f02774111f5b8b18a | /sam.py | 137dc7ca8437511918de8129abec96049ec6ad26 | [] | no_license | samruddhipanda/HacktoberPython | 414600a10dd6bc8e765b0f101ca05e19b9902204 | db1ab2324a698953b166fe67990e398c613c4a1f | refs/heads/main | 2023-09-02T17:58:26.076356 | 2021-10-16T15:00:12 | 2021-10-16T15:00:12 | 416,482,052 | 1 | 0 | null | 2021-10-12T20:25:28 | 2021-10-12T20:08:08 | null | UTF-8 | Python | false | false | 42 | py | def sum(a,b):
c=a+b
return c
| [
"noreply@github.com"
] | samruddhipanda.noreply@github.com |
4a3e6cac5ee4dee5008231855a1c44d3db2bda4d | ca0ffc46bf9d5e23e25955150c47a04eb458bdff | /Restaurant Tables.py | f4a8925eb78fd2a973c1c36eca1f8b2ebea3036b | [] | no_license | Zeref-Draganeel/Esoetric-Code-Wars | 34030353239bf6d99e30381388d1d86a607b2b75 | d46c29089f9cf5f19371230f18a5d5273cdea13b | refs/heads/main | 2023-06-08T16:47:04.192345 | 2021-06-20T02:55:04 | 2021-06-20T02:55:04 | 375,865,832 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 388 | py | #https://www.codewars.com/kata/598c1bc6a04cd3b8dd000012/train/python
restaurant=lambda a,e,i:(lambda a=[a],e=[e],d=[0],s=[0]:[[(a.__setitem__(0,a[0]-1)if a[0]else[s.__setitem__(0,s[0]+1)if s[0]<e[0]else[s.__setitem__(0,s[0]-1),e.__setitem__(0, e[0]-1)]if e[0]else d.__setitem__(0,d[0]+1)])if v==1else e.__setitem__(0,e[0]-1)if e[0]>s[0]else d.__setitem__(0,d[0]+2)for v in i],d[0]][1])()
| [
"noreply@github.com"
] | Zeref-Draganeel.noreply@github.com |
21a1c87a06483f8c24a079005b510c3a5860d632 | a8042cb7f6a4daec26b8cea6b7da2cb7cb880a84 | /1985_FindKthLargestIntegerinArray.py | b43eac41293bd4226935a1b8e3988f6d1b93e5f6 | [] | no_license | renukadeshmukh/Leetcode_Solutions | 0108edf6c5849946623a75c2dfd57cbf9bb338e4 | 1211eac167f33084f536007468ea10c1a0ceab08 | refs/heads/master | 2022-11-10T20:48:42.108834 | 2022-10-18T07:24:36 | 2022-10-18T07:24:36 | 80,702,452 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,232 | py | '''
1985. Find the Kth Largest Integer in the Array
You are given an array of strings nums and an integer k. Each string in nums
represents an integer without leading zeros.
Return the string that represents the kth largest integer in nums.
Note: Duplicate numbers should be counted distinctly. For example, if nums is
["1","2","2"], "2" is the first largest integer, "2" is the second-largest integer,
and "1" is the third-largest integer.
Example 1:
Input: nums = ["3","6","7","10"], k = 4
Output: "3"
Explanation:
The numbers in nums sorted in non-decreasing order are ["3","6","7","10"].
The 4th largest integer in nums is "3".
Example 2:
Input: nums = ["2","21","12","1"], k = 3
Output: "2"
Explanation:
The numbers in nums sorted in non-decreasing order are ["1","2","12","21"].
The 3rd largest integer in nums is "2".
Example 3:
Input: nums = ["0","0"], k = 2
Output: "0"
Explanation:
The numbers in nums sorted in non-decreasing order are ["0","0"].
The 2nd largest integer in nums is "0".
Constraints:
1 <= k <= nums.length <= 104
1 <= nums[i].length <= 100
nums[i] consists of only digits.
nums[i] will not have any leading zeros.
'''
'''
ALGORITHM:
1. Since the given numbers are string, we cannot sort the array directly. That
will result in lexicographically sorted string.
2. We cannot convert string to integers because it can lead to integer overflow.
3. We can write a custom comparator and use this to sort the nums array.
4. For 2 nums a and b, if length of a is greater that means a is greater and
vice versa.
5. If length of a and b is same, we can do a direct lexicographic comparison.
RUNTIME COMPLEXITY: O(NLOGN)
SPACE COMPLEXITY: O(N)
'''
import functools
class Solution(object):
def kthLargestNumber(self, nums, k):
"""
:type nums: List[str]
:type k: int
:rtype: str
"""
sorted_nums = sorted(nums, key=functools.cmp_to_key(self.compare))
return sorted_nums[k-1]
def compare(self, x, y):
if len(x) > len(y):
return -1
elif len(y) > len(x):
return 1
elif x == y:
return 0
elif x > y:
return -1
else:
return 1
| [
"renud1988@gmail.com"
] | renud1988@gmail.com |
8825b62ea568f9627ebeacfe785ab05dae3449fa | 74af0a1d517f2d19a307712619d68e35ed0188ee | /Server/app/ServerView/viewFileService/views.py | 47f6b8492e5275b05cbd71b5997e7065d2794ecf | [] | no_license | vaststar/Blog | 64b37ba5e22755e8af2892f95eeb777613316f50 | b0c237425e7147879da4dd336af4c4c31cfdcfde | refs/heads/master | 2022-01-08T17:49:12.750356 | 2019-05-17T07:44:13 | 2019-05-17T07:44:13 | 166,943,297 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,015 | py | from flask import request,send_from_directory,jsonify,make_response
import os
from . import file_blue
from app.ServerConfig import config
from app.ServerView.Common import Common
from app.ServerView.Common.fileApi import FileApi
from app.ServerView.Common.Identify import IdentifyUtil
@file_blue.route("/<path:filePath>",methods=["GET"])
def get_file_Services(filePath):
'''提供下载功能'''
file = os.path.join(config.STATIC_FILE_PATH, filePath)
if os.path.isfile(file):
response = make_response(send_from_directory( os.path.dirname(file), os.path.basename(file), as_attachment=True))
response.headers['Cache-Control']="no-cache"
response.headers['Expires'] = "-1"
response.headers['Pragma'] = "no-cache"
return response
else:
return jsonify(Common.falseReturn(None,'not find file'))
@file_blue.route("/articles/pictures/<fileName>",methods=["POST"])
@IdentifyUtil.login_required
@IdentifyUtil.robot_defend
def post_article_file_Services(fileName):
userid = IdentifyUtil.get_user_id()
if not userid :
return jsonify(Common.falseReturn(None,'user not find'))
'''上传功能'''
refPath = FileApi.generateFilePath(fileName,"articles/pictures/"+userid)
uploadPath = os.path.join(config.STATIC_FILE_PATH, refPath)
saveResult = FileApi.saveRequestFileWithPath(uploadPath,request)
if saveResult['status']:
return jsonify(Common.trueReturn({'filepath':refPath},'up ok'))
else:
return jsonify(saveResult)
@file_blue.route("/avatars/<fileName>",methods=["POST"])
@IdentifyUtil.robot_defend
def post_avatar_file_Services(fileName):
'''上传功能'''
refPath = FileApi.generateFilePath(fileName,"avatars")
uploadPath = os.path.join(config.STATIC_FILE_PATH, refPath)
saveResult = FileApi.saveRequestFileWithPath(uploadPath,request)
if saveResult['status']:
return jsonify(Common.trueReturn({'filepath':refPath},'up ok'))
else:
return jsonify(saveResult)
| [
"47029316@qq.com"
] | 47029316@qq.com |
459a628aa6950737f59cab5e203e43b2ddd0f087 | 72b97af0cfd006674ffc915431394bf3df8bf0e3 | /strings.py | 65411dc076959d2c604d9ce141b33c61b93f3268 | [] | no_license | vjohnk/mycode | 7a8bc6de60811c70ff80cfe2729c8847fa47ad3a | 31993516740ea95e8e45cc7166792432f6f97314 | refs/heads/master | 2020-06-02T22:31:27.151035 | 2019-06-11T10:58:11 | 2019-06-11T10:58:11 | 191,329,988 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 499 | py | #!/usr/bin/python2
#servers='amazon'
#num=22
#print("We have serveral %s located at our premises there are %d" %(servers,num))
#from string import ascii_lowercase
#import itertools
#
#def iter_all_strings():
# for size in itertools.count(1):
# for s in itertools.product(ascii_lowercase, repeat=size):
# yield "".join(s)
#for s in iter_all_strings():
# print(s)
# if s == 'bb':
# break
str = raw_input("Enter Your Input: ");
print("Recieved input is : ") , str
| [
"vjohnk@live.com"
] | vjohnk@live.com |
e3429e1f31250ed80a833c551c5c8cfef6f163f5 | bb48536ee0a23e6b2bbc7cefd74066b84aeaa269 | /com/colorDice.py | 98fbb1ad9d9fd9bd1cbfa1cba67f39c3bbc1ba77 | [] | no_license | ArmaniGiles/P | 7b614ba723148a39e13d2db0dba652a0107ad2a8 | 882158e9e6cd86972e9cb3ca81c8ac360aeb09cb | refs/heads/master | 2021-11-04T17:43:37.876942 | 2017-07-02T05:52:52 | 2017-07-02T05:52:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,194 | py | '''
Created on Jun 11, 2017
@author: moni
'''
import random
import com.fireboxtraining.ColorChart as cfc
from tkinter import Button, Toplevel, Tk
import tkinter
count = 0
diceList = [0,0,0,0,0,0]
class colorDice(tkinter.Button):
def __init__(self):
self.button = None
self.g()
self.count=0
def random_generatorForColor(self):
result = random.randint(0, 478)
f = cfc.c.COLORS[result]
return str(f)
def random_generator(self):
result = random.randint(1, 6)
self.numberCounter(result)
strinG = ""
strinG += str(result)
return strinG
def g(self):
global count
if(self.button == None):
result = self.random_generator()
cResult = self.random_generatorForColor()
self.button = Button(root, text = "" + result, bg =""+cResult, height = 3 , width = 10, command = self.g)
self.button.pack()
count+=1
if(count%15==0 and count !=0 ):
import com.fireboxtraining.mom as M
print("gjhghgui")
M.start(diceList)
else:
result = self.random_generator()
cResult = self.random_generatorForColor()
self.button.destroy()
self.button = Button(root, text = "" + result, bg =""+cResult , height = 3 , width = 10, command = self.g)
self.button.pack()
count+=1
if(count%15==0 and count !=0 ):
import com.fireboxtraining.mom as M
print("gjhghgui")
M.start(diceList)
def numberCounter(self, result):
if result == 1:
diceList[0]+=1
elif result == 2:
diceList [1]+=1
elif result == 3:
diceList[2]+=1
elif result == 4:
diceList[3]+=1
elif result == 5:
diceList[4]+=1
else:
diceList[5]+=1
root = Toplevel()
cd = colorDice()
root.mainloop()
| [
"bless727@gmail.com"
] | bless727@gmail.com |
a36d7acc965145842393a1ab069cfeca719024aa | 80233b655cfb55c7d2b9a75287fb47dbfb0b98c7 | /hparam.py | 00233918008f96d78dbb0cbe4966e6669c6d738f | [] | no_license | shawnspace/HRED | 905a848cf67aecdc383b09849fb633e788464369 | 081e6202cd74063d7c7df4f7cca4b002003f57c0 | refs/heads/master | 2021-09-17T06:59:25.950291 | 2018-06-29T03:24:00 | 2018-06-29T03:24:00 | 125,837,684 | 5 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,316 | py | import tensorflow as tf
from collections import namedtuple
import os
# Model Parameters
tf.flags.DEFINE_string('word_embed_path','glove.txt','path to word embedding')
tf.flags.DEFINE_string('vocab_path','rg_vocab.txt','vocab path')
tf.flags.DEFINE_integer('vocab_size',18423,'vocab size')
tf.flags.DEFINE_integer("word_dim", 300, "Dimensionality of the embeddings")
tf.flags.DEFINE_integer('word_rnn_num_units', 600, 'Num of rnn cells')
tf.flags.DEFINE_integer('context_rnn_num_units', 1200, 'Num of rnn cells')
tf.flags.DEFINE_integer('decoder_rnn_num_units', 1200, 'Num of rnn cells')
tf.flags.DEFINE_integer('beam_width', 10, 'Num of beam_width')
tf.flags.DEFINE_float('keep_prob', 1.0, 'the keep prob of rnn state')
tf.flags.DEFINE_string('rnn_cell_type', 'GRU', 'the cell type in rnn')
# Pre-trained parameters
tf.flags.DEFINE_integer('max_sentence_length', 25,'the max sentence length')
tf.flags.DEFINE_integer('max_context_length', 20,'the max context length')
# Training Parameters,
# train example 131438 4000 step /epoch
# valid example 3907
# test example 3894
tf.flags.DEFINE_integer("batch_size", 32, "Batch size during training")
tf.flags.DEFINE_integer('num_epochs', 10, 'the number of epochs')
tf.flags.DEFINE_integer('eval_step', 2000, 'eval every n steps')
tf.flags.DEFINE_boolean('shuffle_batch',True, 'whether shuffle the train examples when batch')
tf.flags.DEFINE_float("learning_rate", 0.001, "Learning rate")
tf.flags.DEFINE_integer('summary_save_steps',200,'steps to save summary')
FLAGS = tf.flags.FLAGS
HParams = namedtuple(
"HParams",
[ "eval_step",
"batch_size",
"word_dim",
"learning_rate",
'vocab_size',
"num_epochs",
'word_rnn_num_units',
'context_rnn_num_units',
'decoder_rnn_num_units',
'beam_width',
'keep_prob',
'rnn_cell_type',
'max_sentence_length',
'max_context_length',
'shuffle_batch',
'summary_save_steps',
'clip_norm',
'lambda_l2',
'word_embed_path',
'vocab_path'
])
def create_hparam():
return HParams(
eval_step=FLAGS.eval_step,
batch_size=FLAGS.batch_size,
learning_rate=FLAGS.learning_rate,
word_dim=FLAGS.word_dim,
vocab_size=FLAGS.vocab_size,
num_epochs=FLAGS.num_epochs,
word_rnn_num_units=FLAGS.word_rnn_num_units,
context_rnn_num_units=FLAGS.context_rnn_num_units,
decoder_rnn_num_units=FLAGS.decoder_rnn_num_units,
beam_width=FLAGS.beam_width,
keep_prob=FLAGS.keep_prob,
rnn_cell_type=FLAGS.rnn_cell_type,
max_sentence_length=FLAGS.max_sentence_length,
max_context_length=FLAGS.max_context_length,
shuffle_batch=FLAGS.shuffle_batch,
summary_save_steps=FLAGS.summary_save_steps,
lambda_l2 = 0.001,
clip_norm = 10,
word_embed_path = FLAGS.word_embed_path,
vocab_path = FLAGS.vocab_path
)
def write_hparams_to_file(hp, model_dir):
with open(os.path.join(os.path.abspath(model_dir),'hyper_parameters.txt'), 'w') as f:
f.write('batch_size: {}\n'.format(hp.batch_size))
f.write('learning_rate: {}\n'.format(hp.learning_rate))
f.write('num_epochs: {}\n'.format(hp.num_epochs))
f.write('word_rnn_num_units: {}\n'.format(hp.word_rnn_num_units))
f.write('keep_prob: {}\n'.format(hp.keep_prob))
| [
"noreply@github.com"
] | shawnspace.noreply@github.com |
92a555e5a3e31b736ee8baff1c440dda0fd8e1d2 | 77fc2f58f7cd040f5dc13c63135909e174563409 | /CCC/CCC_2020/Comp/Escape_Room_1.py | 93b1ae3830bd99ac1c2aa3d91ecf4de8ac3cd465 | [] | no_license | JeffersonDing/CompetitiveProgramming | 12cc8062f580589630daa58eb6b2f5b0061687c6 | 73ac3510f61691c4814e4cce76e14d95d352dec9 | refs/heads/master | 2023-06-25T05:04:21.967808 | 2021-07-24T17:50:34 | 2021-07-24T17:50:34 | 297,129,969 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 808 | py | import pdb
#pdb.set_trace()
M=3
N=4
factors=[]
flag=False;
aM=[[3,10,8,14],[1,11,12,12],[6,2,3,90]]
def factor(x):
factors=[]
for i in range(1, x + 1):
if x % i == 0:
if(x/i>M or x/i>N or i>M or i>N):
break
else:
factors.append([int(x/i),i])
def findExit(x,y):
posible=[]
for i in range(0,len(aM)):
for p in range(0,len(aM[i])):
if(int(aM[i][p])==x*y and i*p!=x*y):
if(aM[0][0]==x*y):
print('yes')
break
else:
posible.append([i,p])
if(posible==[]):
print('empty no')
return
splitCases(posible)
def splitCases(x):
for i in x:
findExit(i[0],i[1])
print("ran",x)
findExit(M,N)
| [
"tyding2004@outlook.com"
] | tyding2004@outlook.com |
b7854c551d4a579b24e58bcd531521ea50733995 | 49e4556714d81697306acbb9030a4bfb09af1d46 | /uri problem solving/uri_1019.py | a8085054d63ad95c4651ca2fb8d1fb87bcdf9063 | [] | no_license | awalhadi/Problem-solving-python | ae03c20ac2c08deac89a2d9ed9876659ba49641c | ace55e47b759398c59c118d7f39f5256eb696a1a | refs/heads/main | 2023-07-18T04:09:10.004894 | 2021-08-29T01:35:14 | 2021-08-29T01:35:14 | 331,949,330 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 146 | py | second = int(input())
h = int(second/3600)
rest = int(second % 3600)
m = int(rest / 60)
s = (rest % 60)
print("{h}:{m}:{s}".format(h=h, m=m, s=s)) | [
"awalhadi5@gmail.com"
] | awalhadi5@gmail.com |
1fa8a37a2e587d652686bd4725c0dfec77735587 | a02d4b3c879c047082ccc295607d6ec8d1130039 | /Katana-SuperTools/ArnoldLightGafferPackage/v1/ArnoldLightGafferUIDelegate.py | 4a627daf735d2b8bfff386d84adcd19ec2576a99 | [
"Apache-2.0"
] | permissive | iceprincefounder/selected-sources | 4fa97e439248d1b4bd9e6e59d0f80a7c33752761 | b225c529e02009b5f44bfb2891ef8f039b9a6d80 | refs/heads/master | 2022-05-22T03:24:45.329292 | 2022-03-13T09:22:12 | 2022-03-13T09:22:12 | 149,724,714 | 17 | 3 | null | null | null | null | UTF-8 | Python | false | false | 27,345 | py | # Copyright (c) 2015 The Foundry Visionmongers Ltd. All Rights Reserved.
from PackageSuperToolAPI import UIDelegate
from PackageSuperToolAPI import NodeUtils as NU
from PackageSuperToolAPI import Packages
from Katana import QT4FormWidgets, FormMaster, Plugins
from ArnoldLightGafferPackage import (
ArnoldSpotLightGafferPackage,
ArnoldSpotLightGafferEditPackage,
ArnoldPointLightGafferPackage,
ArnoldPointLightGafferEditPackage,
ArnoldQuadLightGafferPackage,
ArnoldQuadLightGafferEditPackage,
ArnoldDistantLightGafferPackage,
ArnoldDistantLightGafferEditPackage,
ArnoldDiskLightGafferPackage,
ArnoldDiskLightGafferEditPackage,
ArnoldCylinderLightGafferPackage,
ArnoldCylinderLightGafferEditPackage,
ArnoldMeshLightGafferPackage,
ArnoldMeshLightGafferEditPackage,
ArnoldGoboSpotLightGafferPackage,
ArnoldGoboSpotLightGafferEditPackage,
ArnoldFilterLightGafferPackage,
ArnoldFilterLightGafferEditPackage,
ArnoldSkyDomeLightGafferPackage,
ArnoldSkyDomeLightGafferEditPackage,
)
# Get the base classes for our UI delegate classes from the PackageSuperToolAPI
# using the base classes of our custom Sky Dome Package classes
GafferThreeAPI = Plugins.GafferThreeAPI
LightUIDelegate = UIDelegate.GetUIDelegateClassForPackageClass(GafferThreeAPI.PackageClasses.LightPackage)
LightEditUIDelegate = UIDelegate.GetUIDelegateClassForPackageClass(GafferThreeAPI.PackageClasses.LightEditPackage)
class ArnoldLightGafferUIDelegate(LightUIDelegate):
"""
The UI delegate for the point light package.
This class is responsible for exposing the parameters on each of the
parameter tabs. This is done by creating parameter policies attached to the
parameters on the package's nodes. We can also modify the appearance of the
parameter tabs by modifying the hints dictionaries on the policies.
"""
# The hash used to uniquely identify the action of creating a package
# This was generated using:
# import hashlib
# print hashlib.md5('ArnoldGafferThreeLight.AddLight').hexdigest()
AddPackageActionHash = '0ea90ede9526a23ce0a26ba6e65ef5d0'
# The keyboard shortcut for creating a package
DefaultShortcut = 'Ctrl+A'
def getTabPolicy(self, tabName):
"""
The main method of a UIDelegate. This is responsible for returning a
policy instance for each tab. The policy will contain other policies
that should drive the actual package node's parameters.
"""
if tabName == "Object":
return self.getObjectTabPolicy()
elif tabName == "Material":
return self.getMaterialTabPolicy()
elif tabName == "Linking":
return self.getLinkingTabPolicy()
else:
return LightUIDelegate.getTabPolicy(self, tabName)
def getObjectTabPolicy(self):
"""
Returns the widget that should be displayed under the 'Object' tab.
"""
# Get the create node in the package, which contains the transform
# parameter.
# return self._LightUIDelegate__getObjectTabPolicy()
packageNode = self.getPackageNode()
createNode = NU.GetRefNode(packageNode, "create")
if createNode is None:
return None
# Create a root group policy and add some hints on it
rootPolicy = QT4FormWidgets.PythonGroupPolicy('object')
rootPolicy.getWidgetHints()['open'] = True
rootPolicy.getWidgetHints()['hideTitle'] = True
transformPolicy = QT4FormWidgets.PythonGroupPolicy('transform')
transformPolicy.getWidgetHints()['open'] = True
translatePolicy = FormMaster.CreateParameterPolicy(
None, createNode.getParameter("transform.translate"))
rotatePolicy = FormMaster.CreateParameterPolicy(
None, createNode.getParameter("transform.rotate"))
scalePolicy = FormMaster.CreateParameterPolicy(
None, createNode.getParameter("transform.scale"))
transformPolicy.addChildPolicy(translatePolicy)
transformPolicy.addChildPolicy(rotatePolicy)
transformPolicy.addChildPolicy(scalePolicy)
rootPolicy.addChildPolicy(transformPolicy)
viewerObjectSettingsNode = NU.GetRefNode(packageNode, "viewerObjectSettings")
annotationPolicy = QT4FormWidgets.PythonGroupPolicy('annotation')
annotationPolicy.getWidgetHints()['open'] = False
textPolicy = FormMaster.CreateParameterPolicy(
None, viewerObjectSettingsNode.getParameter('args.viewer.default.annotation.text'))
colorPolicy = FormMaster.CreateParameterPolicy(
None, viewerObjectSettingsNode.getParameter('args.viewer.default.annotation.color'))
previewColor = FormMaster.CreateParameterPolicy(
None, createNode.getParameter('previewColor'))
pickablePolicy = FormMaster.CreateParameterPolicy(
None, viewerObjectSettingsNode.getParameter('args.viewer.default.pickable'))
annotationPolicy.addChildPolicy(textPolicy)
annotationPolicy.addChildPolicy(colorPolicy)
annotationPolicy.addChildPolicy(previewColor)
annotationPolicy.addChildPolicy(pickablePolicy)
rootPolicy.addChildPolicy(annotationPolicy)
return rootPolicy
def getMaterialTabPolicy(self):
# Create a new material policy that just has the prmanLightParams for
# each light type
packageNode = self.getPackageNode()
materialNode = NU.GetRefNode(packageNode, "material")
if materialNode:
materialNode.checkDynamicParameters()
materialPolicy = QT4FormWidgets.PythonGroupPolicy('material')
materialPolicy.getWidgetHints()['hideTitle'] = True
shaderPolicy = FormMaster.CreateParameterPolicy(materialPolicy,
materialNode.getParameter('shaders.arnoldLightShader'))
shaderPolicy = QT4FormWidgets.ValuePolicyProxy(shaderPolicy)
shaderPolicy.setWidgetHints(shaderPolicy.getWidgetHints())
shaderPolicy.getWidgetHints()['readOnly'] = True
materialPolicy.addChildPolicy(shaderPolicy)
paramsPolicy = FormMaster.CreateParameterPolicy(materialPolicy,
materialNode.getParameter('shaders.arnoldLightParams'))
paramsPolicy.getWidgetHints()['open'] = True
materialPolicy.addChildPolicy(paramsPolicy)
return materialPolicy
return None
def getLinkingTabPolicy(self):
return self._LightUIDelegate__getLinkingTabPolicy()
class ArnoldLightGafferEditUIDelegate(LightEditUIDelegate):
"""
The UI delegate for the ArnoldPointLightEdit package.
"""
def getTabPolicy(self, tabName):
"""
The main method of a UIDelegate. This is responsible for returning a
Value Policy for each tab. The Value Policy will contain other policies
that should drive the actual package node's parameters.
"""
if tabName == "Object":
return self.getObjectTabPolicy()
elif tabName == "Material":
return self.getMaterialTabPolicy()
elif tabName == "Linking":
return self._LightEditUIDelegate__getLinkingTabPolicy()
else:
return LightEditUIDelegate.getTabPolicy(self, tabName)
def getObjectTabPolicy(self):
return self._LightEditUIDelegate__getObjectTabPolicy()
def getMaterialTabPolicy(self):
# Create a new material policy that just has the prmanLightParams for
# each light type
packageNode = self.getPackageNode()
materialNode = NU.GetRefNode(packageNode, "material_edit")
materialNode.checkDynamicParameters()
if materialNode:
materialPolicy = QT4FormWidgets.PythonGroupPolicy('material')
materialPolicy.getWidgetHints()['hideTitle'] = True
shaderPolicy = FormMaster.CreateParameterPolicy(materialPolicy,
materialNode.getParameter('shaders.arnoldLightShader'))
shaderPolicy = QT4FormWidgets.ValuePolicyProxy(shaderPolicy)
shaderPolicy.setWidgetHints(shaderPolicy.getWidgetHints())
shaderPolicy.getWidgetHints()['readOnly'] = False
materialPolicy.addChildPolicy(shaderPolicy)
paramsPolicy = FormMaster.CreateParameterPolicy(materialPolicy,
materialNode.getParameter('shaders.arnoldLightParams'))
paramsPolicy.getWidgetHints()['open'] = True
materialPolicy.addChildPolicy(paramsPolicy)
return materialPolicy
return None
def getLinkingTabPolicy(self):
return self._LightEditUIDelegate__getLinkingTabPolicy()
class ArnoldSpotLightGafferUIDelegate(ArnoldLightGafferUIDelegate):
AddPackageActionHash = 'GafferThree-AddSpotLight'
DefaultShortcut = 'Ctrl+1'
class ArnoldPointLightGafferUIDelegate(ArnoldLightGafferUIDelegate):
AddPackageActionHash = 'GafferThree-AddPointLight'
DefaultShortcut = 'Ctrl+2'
class ArnoldQuadLightGafferUIDelegate(ArnoldLightGafferUIDelegate):
AddPackageActionHash = 'GafferThree-AddQuadLight'
DefaultShortcut = 'Ctrl+3'
class ArnoldDistantLightGafferUIDelegate(ArnoldLightGafferUIDelegate):
AddPackageActionHash = 'GafferThree-AddDistantLight'
DefaultShortcut = 'Ctrl+4'
class ArnoldDiskLightGafferUIDelegate(ArnoldLightGafferUIDelegate):
AddPackageActionHash = 'GafferThree-AddDiskLight'
DefaultShortcut = 'Ctrl+5'
class ArnoldCylinderLightGafferUIDelegate(ArnoldLightGafferUIDelegate):
AddPackageActionHash = 'GafferThree-AddCylinderLight'
DefaultShortcut = 'Ctrl+6'
class ArnoldMeshLightGafferUIDelegate(ArnoldLightGafferUIDelegate):
AddPackageActionHash = 'GafferThree-AddMeshLight'
DefaultShortcut = 'Ctrl+7'
class ArnoldGoboSpotLightGafferUIDelegate(ArnoldLightGafferUIDelegate):
AddPackageActionHash = 'GafferThree-AddGoboSpotLight'
DefaultShortcut = 'Ctrl+8'
def getMaterialTabPolicy(self):
# Create a new material policy that just has the prmanLightParams for
# each light type
packageNode = self.getPackageNode()
materialNode = NU.GetRefNode(packageNode, "material")
if materialNode is not None:
materialPolicy = QT4FormWidgets.PythonGroupPolicy('material')
materialPolicy.getWidgetHints()['hideTitle'] = True
lightShaderPolicy = FormMaster.CreateParameterPolicy(materialPolicy,
materialNode.getParameter('shaders.arnoldLightShader'))
lightShaderPolicy = QT4FormWidgets.ValuePolicyProxy(lightShaderPolicy)
lightShaderPolicy.setWidgetHints(lightShaderPolicy.getWidgetHints())
lightShaderPolicy.getWidgetHints()['readOnly'] = True
materialPolicy.addChildPolicy(lightShaderPolicy)
lightfilterShaderPolicy = FormMaster.CreateParameterPolicy(materialPolicy,
materialNode.getParameter('shaders.arnoldLightFilterShader'))
lightfilterShaderPolicy = QT4FormWidgets.ValuePolicyProxy(lightfilterShaderPolicy)
lightfilterShaderPolicy.setWidgetHints(lightfilterShaderPolicy.getWidgetHints())
lightfilterShaderPolicy.getWidgetHints()['readOnly'] = True
materialPolicy.addChildPolicy(lightfilterShaderPolicy)
imageShaderPolicy = FormMaster.CreateParameterPolicy(materialPolicy,
materialNode.getParameter('shaders.arnoldSurfaceShader'))
imageShaderPolicy = QT4FormWidgets.ValuePolicyProxy(imageShaderPolicy)
imageShaderPolicy.setWidgetHints(imageShaderPolicy.getWidgetHints())
imageShaderPolicy.getWidgetHints()['readOnly'] = True
materialPolicy.addChildPolicy(imageShaderPolicy)
params1Policy = FormMaster.CreateParameterPolicy(materialPolicy,
materialNode.getParameter('shaders.arnoldLightParams'))
params1Policy.getWidgetHints()['open'] = True
params2Policy = FormMaster.CreateParameterPolicy(materialPolicy,
materialNode.getParameter('shaders.arnoldLightFilterParams'))
params2Policy.getWidgetHints()['open'] = True
params3Policy = FormMaster.CreateParameterPolicy(materialPolicy,
materialNode.getParameter('shaders.arnoldSurfaceParams'))
params3Policy.getWidgetHints()['open'] = True
materialPolicy.addChildPolicy(params1Policy)
materialPolicy.addChildPolicy(params2Policy)
materialPolicy.addChildPolicy(params3Policy)
return materialPolicy
return None
class ArnoldGoboSpotLightGafferEditUIDelegate(ArnoldLightGafferEditUIDelegate):
def getMaterialTabPolicy(self):
# Create a new material policy that just has the prmanLightParams for
# each light type
packageNode = self.getPackageNode()
materialNode = NU.GetRefNode(packageNode, "material_edit")
if materialNode is not None:
materialPolicy = QT4FormWidgets.PythonGroupPolicy('material')
materialPolicy.getWidgetHints()['hideTitle'] = True
lightShaderPolicy = FormMaster.CreateParameterPolicy(materialPolicy,
materialNode.getParameter('shaders.arnoldLightShader'))
lightShaderPolicy = QT4FormWidgets.ValuePolicyProxy(lightShaderPolicy)
lightShaderPolicy.setWidgetHints(lightShaderPolicy.getWidgetHints())
lightShaderPolicy.getWidgetHints()['readOnly'] = True
materialPolicy.addChildPolicy(lightShaderPolicy)
lightfilterShaderPolicy = FormMaster.CreateParameterPolicy(materialPolicy,
materialNode.getParameter('shaders.arnoldLightFilterShader'))
lightfilterShaderPolicy = QT4FormWidgets.ValuePolicyProxy(lightfilterShaderPolicy)
lightfilterShaderPolicy.setWidgetHints(lightfilterShaderPolicy.getWidgetHints())
lightfilterShaderPolicy.getWidgetHints()['readOnly'] = True
materialPolicy.addChildPolicy(lightfilterShaderPolicy)
imageShaderPolicy = FormMaster.CreateParameterPolicy(materialPolicy,
materialNode.getParameter('shaders.arnoldSurfaceShader'))
imageShaderPolicy = QT4FormWidgets.ValuePolicyProxy(imageShaderPolicy)
imageShaderPolicy.setWidgetHints(imageShaderPolicy.getWidgetHints())
imageShaderPolicy.getWidgetHints()['readOnly'] = True
materialPolicy.addChildPolicy(imageShaderPolicy)
params1Policy = FormMaster.CreateParameterPolicy(materialPolicy,
materialNode.getParameter('shaders.arnoldLightParams'))
params1Policy.getWidgetHints()['open'] = True
params2Policy = FormMaster.CreateParameterPolicy(materialPolicy,
materialNode.getParameter('shaders.arnoldLightFilterParams'))
params2Policy.getWidgetHints()['open'] = True
params3Policy = FormMaster.CreateParameterPolicy(materialPolicy,
materialNode.getParameter('shaders.arnoldSurfaceParams'))
params3Policy.getWidgetHints()['open'] = True
materialPolicy.addChildPolicy(params1Policy)
materialPolicy.addChildPolicy(params2Policy)
materialPolicy.addChildPolicy(params3Policy)
return materialPolicy
return None
class ArnoldFilterLightGafferUIDelegate(ArnoldLightGafferUIDelegate):
AddPackageActionHash = 'GafferThree-AddFliterLight'
DefaultShortcut = 'Ctrl+9'
def getMaterialTabPolicy(self):
# Create a new material policy that just has the prmanLightParams for
# each light type
packageNode = self.getPackageNode()
materialNode = NU.GetRefNode(packageNode, "material")
if materialNode is not None:
materialPolicy = QT4FormWidgets.PythonGroupPolicy('material')
materialPolicy.getWidgetHints()['hideTitle'] = True
lightShaderPolicy = FormMaster.CreateParameterPolicy(materialPolicy,
materialNode.getParameter('shaders.arnoldLightShader'))
lightShaderPolicy = QT4FormWidgets.ValuePolicyProxy(lightShaderPolicy)
lightShaderPolicy.setWidgetHints(lightShaderPolicy.getWidgetHints())
lightShaderPolicy.getWidgetHints()['readOnly'] = False
materialPolicy.addChildPolicy(lightShaderPolicy)
lightfilterShaderPolicy = FormMaster.CreateParameterPolicy(materialPolicy,
materialNode.getParameter('shaders.arnoldLightFilterShader'))
lightfilterShaderPolicy = QT4FormWidgets.ValuePolicyProxy(lightfilterShaderPolicy)
lightfilterShaderPolicy.setWidgetHints(lightfilterShaderPolicy.getWidgetHints())
lightfilterShaderPolicy.getWidgetHints()['readOnly'] = False
materialPolicy.addChildPolicy(lightfilterShaderPolicy)
params1Policy = FormMaster.CreateParameterPolicy(materialPolicy,
materialNode.getParameter('shaders.arnoldLightParams'))
params1Policy.getWidgetHints()['open'] = True
params2Policy = FormMaster.CreateParameterPolicy(materialPolicy,
materialNode.getParameter('shaders.arnoldLightFilterParams'))
params2Policy.getWidgetHints()['open'] = True
materialPolicy.addChildPolicy(params1Policy)
materialPolicy.addChildPolicy(params2Policy)
return materialPolicy
return None
class ArnoldFilterLightGafferEditUIDelegate(ArnoldLightGafferEditUIDelegate):
def getMaterialTabPolicy(self):
# Create a new material policy that just has the prmanLightParams for
# each light type
packageNode = self.getPackageNode()
materialNode = NU.GetRefNode(packageNode, "material_edit")
if materialNode is not None:
materialPolicy = QT4FormWidgets.PythonGroupPolicy('material')
materialPolicy.getWidgetHints()['hideTitle'] = True
lightShaderPolicy = FormMaster.CreateParameterPolicy(materialPolicy,
materialNode.getParameter('shaders.arnoldLightShader'))
lightShaderPolicy = QT4FormWidgets.ValuePolicyProxy(lightShaderPolicy)
lightShaderPolicy.setWidgetHints(lightShaderPolicy.getWidgetHints())
lightShaderPolicy.getWidgetHints()['readOnly'] = False
materialPolicy.addChildPolicy(lightShaderPolicy)
lightfilterShaderPolicy = FormMaster.CreateParameterPolicy(materialPolicy,
materialNode.getParameter('shaders.arnoldLightFilterShader'))
lightfilterShaderPolicy = QT4FormWidgets.ValuePolicyProxy(lightfilterShaderPolicy)
lightfilterShaderPolicy.setWidgetHints(lightfilterShaderPolicy.getWidgetHints())
lightfilterShaderPolicy.getWidgetHints()['readOnly'] = False
materialPolicy.addChildPolicy(lightfilterShaderPolicy)
params1Policy = FormMaster.CreateParameterPolicy(materialPolicy,
materialNode.getParameter('shaders.arnoldLightParams'))
params1Policy.getWidgetHints()['open'] = True
params2Policy = FormMaster.CreateParameterPolicy(materialPolicy,
materialNode.getParameter('shaders.arnoldLightFilterParams'))
params2Policy.getWidgetHints()['open'] = True
materialPolicy.addChildPolicy(params1Policy)
materialPolicy.addChildPolicy(params2Policy)
return materialPolicy
return None
class ArnoldSkyDomeLightGafferUIDelegate(ArnoldLightGafferUIDelegate):
AddPackageActionHash = 'GafferThree-AddSkyDomeLight'
DefaultShortcut = 'Ctrl+0'
def getObjectTabPolicy(self):
"""
Returns the widget that should be displayed under the 'Object' tab.
"""
# Get the create node in the package, which contains the transform parameter
packageNode = self.getPackageNode()
createNode = NU.GetRefNode(packageNode, "create")
if createNode is None:
return None
# Create a root group policy and add some hints on it
rootPolicy = QT4FormWidgets.PythonGroupPolicy('object')
rootPolicy.getWidgetHints()['open'] = True
rootPolicy.getWidgetHints()['hideTitle'] = True
transformPolicy = QT4FormWidgets.PythonGroupPolicy('transform')
transformPolicy.getWidgetHints()['open'] = True
translatePolicy = FormMaster.CreateParameterPolicy(None, createNode.getParameter("transform.translate"))
rotatePolicy = FormMaster.CreateParameterPolicy(None, createNode.getParameter("transform.rotate"))
scalePolicy = FormMaster.CreateParameterPolicy(None, createNode.getParameter("transform.scale"))
transformPolicy.addChildPolicy(translatePolicy)
transformPolicy.addChildPolicy(rotatePolicy)
transformPolicy.addChildPolicy(scalePolicy)
rootPolicy.addChildPolicy(transformPolicy)
return rootPolicy
def getMaterialTabPolicy(self):
# Create a new material policy that just has the prmanLightParams for
# each light type
packageNode = self.getPackageNode()
materialNode = NU.GetRefNode(packageNode, "material")
if materialNode is not None:
materialPolicy = QT4FormWidgets.PythonGroupPolicy('material')
materialPolicy.getWidgetHints()['hideTitle'] = True
lightShaderPolicy = FormMaster.CreateParameterPolicy(materialPolicy,
materialNode.getParameter('shaders.arnoldLightShader'))
lightShaderPolicy = QT4FormWidgets.ValuePolicyProxy(lightShaderPolicy)
lightShaderPolicy.setWidgetHints(lightShaderPolicy.getWidgetHints())
lightShaderPolicy.getWidgetHints()['readOnly'] = True
materialPolicy.addChildPolicy(lightShaderPolicy)
imageShaderPolicy = FormMaster.CreateParameterPolicy(materialPolicy,
materialNode.getParameter('shaders.arnoldSurfaceShader'))
imageShaderPolicy = QT4FormWidgets.ValuePolicyProxy(imageShaderPolicy)
imageShaderPolicy.setWidgetHints(imageShaderPolicy.getWidgetHints())
imageShaderPolicy.getWidgetHints()['readOnly'] = True
materialPolicy.addChildPolicy(imageShaderPolicy)
params1Policy = FormMaster.CreateParameterPolicy(materialPolicy,
materialNode.getParameter('shaders.arnoldLightParams'))
params1Policy.getWidgetHints()['open'] = True
params2Policy = FormMaster.CreateParameterPolicy(materialPolicy,
materialNode.getParameter('shaders.arnoldSurfaceParams'))
params2Policy.getWidgetHints()['open'] = True
materialPolicy.addChildPolicy(params1Policy)
materialPolicy.addChildPolicy(params2Policy)
return materialPolicy
return None
def getLinkingTabPolicy(self):
return LightUIDelegate.GetLightLinkingTabPolicy(
self.getReferencedNode("node_lightLink_illumination"),
self.getReferencedNode("node_lightLink_shadow"),
self.getReferencedNode("node_lightListEdit"))
class ArnoldSkyDomeLightGafferEditUIDelegate(ArnoldLightGafferEditUIDelegate):
def getMaterialTabPolicy(self):
# Create a new material policy that just has the prmanLightParams for
# each light type
packageNode = self.getPackageNode()
materialNode = NU.GetRefNode(packageNode, "material_edit")
if materialNode is not None:
materialPolicy = QT4FormWidgets.PythonGroupPolicy('material')
materialPolicy.getWidgetHints()['hideTitle'] = True
lightShaderPolicy = FormMaster.CreateParameterPolicy(materialPolicy,
materialNode.getParameter('shaders.arnoldLightShader'))
lightShaderPolicy = QT4FormWidgets.ValuePolicyProxy(lightShaderPolicy)
lightShaderPolicy.setWidgetHints(lightShaderPolicy.getWidgetHints())
lightShaderPolicy.getWidgetHints()['readOnly'] = True
materialPolicy.addChildPolicy(lightShaderPolicy)
imageShaderPolicy = FormMaster.CreateParameterPolicy(materialPolicy,
materialNode.getParameter('shaders.arnoldSurfaceShader'))
imageShaderPolicy = QT4FormWidgets.ValuePolicyProxy(imageShaderPolicy)
imageShaderPolicy.setWidgetHints(imageShaderPolicy.getWidgetHints())
imageShaderPolicy.getWidgetHints()['readOnly'] = True
materialPolicy.addChildPolicy(imageShaderPolicy)
params1Policy = FormMaster.CreateParameterPolicy(materialPolicy,
materialNode.getParameter('shaders.arnoldLightParams'))
params1Policy.getWidgetHints()['open'] = True
params2Policy = FormMaster.CreateParameterPolicy(materialPolicy,
materialNode.getParameter('shaders.arnoldSurfaceParams'))
params2Policy.getWidgetHints()['open'] = True
materialPolicy.addChildPolicy(params1Policy)
materialPolicy.addChildPolicy(params2Policy)
return materialPolicy
return None
# Register the UI delegates
UIDelegate.RegisterUIDelegateClass(ArnoldSpotLightGafferPackage, ArnoldSpotLightGafferUIDelegate)
UIDelegate.RegisterUIDelegateClass(ArnoldSpotLightGafferEditPackage, ArnoldLightGafferEditUIDelegate)
UIDelegate.RegisterUIDelegateClass(ArnoldPointLightGafferPackage, ArnoldPointLightGafferUIDelegate)
UIDelegate.RegisterUIDelegateClass(ArnoldPointLightGafferEditPackage, ArnoldLightGafferEditUIDelegate)
UIDelegate.RegisterUIDelegateClass(ArnoldQuadLightGafferPackage, ArnoldQuadLightGafferUIDelegate)
UIDelegate.RegisterUIDelegateClass(ArnoldQuadLightGafferEditPackage, ArnoldLightGafferEditUIDelegate)
UIDelegate.RegisterUIDelegateClass(ArnoldDistantLightGafferPackage, ArnoldDistantLightGafferUIDelegate)
UIDelegate.RegisterUIDelegateClass(ArnoldDistantLightGafferEditPackage, ArnoldLightGafferEditUIDelegate)
UIDelegate.RegisterUIDelegateClass(ArnoldDiskLightGafferPackage, ArnoldDiskLightGafferUIDelegate)
UIDelegate.RegisterUIDelegateClass(ArnoldDiskLightGafferEditPackage, ArnoldLightGafferEditUIDelegate)
UIDelegate.RegisterUIDelegateClass(ArnoldCylinderLightGafferPackage, ArnoldCylinderLightGafferUIDelegate)
UIDelegate.RegisterUIDelegateClass(ArnoldCylinderLightGafferEditPackage, ArnoldLightGafferEditUIDelegate)
UIDelegate.RegisterUIDelegateClass(ArnoldMeshLightGafferPackage, ArnoldMeshLightGafferUIDelegate)
UIDelegate.RegisterUIDelegateClass(ArnoldMeshLightGafferEditPackage, ArnoldLightGafferEditUIDelegate)
UIDelegate.RegisterUIDelegateClass(ArnoldGoboSpotLightGafferPackage, ArnoldGoboSpotLightGafferUIDelegate)
UIDelegate.RegisterUIDelegateClass(ArnoldGoboSpotLightGafferEditPackage, ArnoldGoboSpotLightGafferEditUIDelegate)
UIDelegate.RegisterUIDelegateClass(ArnoldFilterLightGafferPackage, ArnoldFilterLightGafferUIDelegate)
UIDelegate.RegisterUIDelegateClass(ArnoldFilterLightGafferEditPackage, ArnoldFilterLightGafferEditUIDelegate)
UIDelegate.RegisterUIDelegateClass(ArnoldSkyDomeLightGafferPackage, ArnoldSkyDomeLightGafferUIDelegate)
UIDelegate.RegisterUIDelegateClass(ArnoldSkyDomeLightGafferEditPackage, ArnoldSkyDomeLightGafferEditUIDelegate)
| [
"iceprincefounder@qq.com"
] | iceprincefounder@qq.com |
16c45496b754d067a48d6cd52d2a82516dd693eb | 4ac92f2661a8a949725bc1246aff330cf7286038 | /test_process_voltage.py | 9d6d8e7bf203d4beb3639d8a02ceb8edcbfb4469 | [
"MIT"
] | permissive | matthew-huber/bme590hrm | 7aebf9ff91724927e4640bb7dc6842093d6a35fb | 8ecfd49fb2f374733e85ec262b4a7d9fa650f5e1 | refs/heads/master | 2020-04-01T03:15:12.107378 | 2018-10-30T22:12:53 | 2018-10-30T22:12:53 | 152,815,275 | 0 | 0 | MIT | 2018-10-30T22:12:54 | 2018-10-12T22:52:22 | Python | UTF-8 | Python | false | false | 543 | py |
from analyze_data import process_voltage
import pytest
from fileReader import load_csv
times, voltages = load_csv('./test_data/test_data3.csv')
@pytest.mark.parametrize("voltage_array,expected", [
([-0.19, -0.205, -0.21, -0.2, -0.195, -0.21, -0.23, -0.235, -0.245],
[1, 0.357142857, 0.142857143, 0.571428571, 0.785714286, 0.142857143,
-0.714285714, -0.928571429, -1.357142857]),
])
def test_process_voltage(voltage_array, expected):
voltage = process_voltage(voltage_array)
assert voltage == pytest.approx(expected)
| [
"matthew.huber@duke.edu"
] | matthew.huber@duke.edu |
43fdc62ab7e5d11cfed8f1e4e2ccc8a2e1759c9d | cf197880ad947b1706ae2ca19fa7010cc2dd12b8 | /GUI/Tkinter/part_manager/db.py | 450b9aac89ce37d337ad3746245cc883560823c6 | [] | no_license | KevinMichaelCamp/Python-HardWay | 9b8adb5be31729da8f52c956b4d0638a79715013 | 25f21f4fb8934edb13649fea3d5d15033332a7eb | refs/heads/master | 2020-03-26T12:59:15.938322 | 2020-01-02T01:27:37 | 2020-01-02T01:27:37 | 144,917,453 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,097 | py | import sqlite3
class Database:
def __init__(self, db):
self.conn = sqlite3.connect(db)
self.cur = self.conn.cursor()
self.cur.execute("CREATE TABLE IF NOT EXISTS parts (id INTEGER PRIMARY KEY, part text, customer text, retailer text, price text)")
self.conn.commit()
def fetch(self):
self.cur.execute("SELECT * FROM parts")
rows = self.cur.fetchall()
return rows
def insert(self, part, customer, retailer, price):
self.cur.execute("INSERT INTO parts VALUES (NULL, ?, ?, ?, ?)", (part, customer, retailer, price))
self.conn.commit()
def remove(self, id):
self.cur.execute("DELETE FROM parts WHERE id=?", (id,))
self.conn.commit()
def update(self, id, part, customer, retailer, price):
self.cur.execute("UPDATE parts SET part=?, customer=?, retailer=?, price=? WHERE id=?", (part, customer, retailer, price, id))
self.conn.commit()
def __del__(self):
self.conn.close()
# db = Database('store.db')
# db.insert("4GB DDR4 Ram","John Doe","Microcenter","160")
| [
"kevinmichaelcamp@gmail.com"
] | kevinmichaelcamp@gmail.com |
b61b8d2cef0a56b2308755723d5c850c1c678bab | 06f7c5c8d7fcd24fa578f63c45bd88b54b60d1be | /63 Powerful digit counts.py | fdbdf8b5a37b7e6e4bff88b744fc0f20aeddbccd | [] | no_license | jericsinger/Euler | ffc39f30e75d4147c97c9b7f63b6c55d15b7c1a2 | 3aed12ddcc68b66677acd11aab77a3d93a098cf9 | refs/heads/master | 2021-01-20T11:31:28.844084 | 2014-02-20T02:05:37 | 2014-02-20T02:05:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 345 | py | candidates = {}
count = 0
for length in range(1, 25):
candidates[length] = []
j = 1
result = 1
while len(str(result)) <= length:
if len(str(result)) == length:
candidates[length].append("%s = %s^%s" % (result, j, length))
count += 1
j += 1
result = j ** length
for i in candidates:
for j in candidates[i]:
print j
print count | [
"justin.e.singer@gmail.com"
] | justin.e.singer@gmail.com |
857be5adf6d32f3427f0abd128e6aa8eb807e25f | 56f57ce1d70cf9678ac1de48521c5020dcff2a1c | /test.py | 288cb2713db5e792292c9a76cbbcd611667c2564 | [] | no_license | Alton1998/Public-key-generation | 5dde21ebf707fb8d7557a16d29c567a166972506 | 550b3c501a69bbb2063f9764b333e506bd065dc7 | refs/heads/master | 2022-07-17T15:49:05.066813 | 2020-03-13T18:05:46 | 2020-03-13T18:05:46 | 240,910,825 | 0 | 0 | null | 2022-06-22T01:21:59 | 2020-02-16T15:01:08 | Python | UTF-8 | Python | false | false | 818 | py | import unittest
from public_key import PublicKeyCrypto
import numpy as np
# Class to test the PublicKeyCrypto Class
class TestPublickeyCypto(unittest.TestCase):
# Test the generator matrix
def test_generator_matrix_func(self):
test_matrix = np.random.randint(1,10,size=(90,90))
p=PublicKeyCrypto()
matrix=p._PublicKeyCrypto__generate_matrix()
self.assertEqual(test_matrix.shape,matrix.shape)
# Test the select field
def test_select_field_func(self):
p=PublicKeyCrypto()
self.assertIsNotNone(p._PublicKeyCrypto__select_field())
# Test the private key
def test_generate_private_key(self):
p=PublicKeyCrypto()
self.assertIsNotNone(p._PublicKeyCrypto__generate_private_key())
if __name__=='__main__':
unittest.main()
| [
"altondsouza02@gmail.com"
] | altondsouza02@gmail.com |
f8afbc43f94b8d61637d0976391cc1a1036b1a02 | b9a23d1947f5f6328ca13c7e652499173f64da47 | /s_234/s_234.pyde | dd39037d773be3646c55f4750ee93eecf710933c | [] | no_license | berinhard/sketches | 96414a14ec40ca1281dcd8b2fec2c50db1d76e9a | f0e4be211397f205bcc6bd2c8b053b920a26bb62 | refs/heads/master | 2021-06-09T07:49:59.220785 | 2020-12-08T04:14:55 | 2020-12-08T04:23:43 | 137,092,663 | 41 | 15 | null | 2021-03-20T00:41:39 | 2018-06-12T15:34:49 | JavaScript | UTF-8 | Python | false | false | 1,251 | pyde | # Author: Berin
# Sketches repo: https://github.com/berinhard/sketches
# berin lib: https://github.com/berinhard/berin/
from berin.coords import draw_at_center, polar_coordinate
from berin.grids import BaseGrid
from berin.palettes import get_color_palette
from berin.save_frames import save_video_frames
from berin.shapes import regular_polygon, draw_shape, lines_intersection, IntersectionLine
from berin import easings
from random import choice
class Grid(BaseGrid):
def draw(self, *f_args, **f_kwargs):
for grid_elem in self.get_grid_positions():
with pushMatrix():
translate(grid_elem.x, grid_elem.y)
self.draw_elem(grid_elem, *f_args, **f_kwargs)
def draw_elem(self, grid_elem, *f_args, **f_kwargs):
x, y = grid_elem.x, grid_elem.y
i, j = grid_elem.i, grid_elem.j
if i == j:
return
elif i > j:
rect(x, y, 10 * (sqrt(i + 1)), 10 * j)
else:
rect(x, y, 10 * i, 10 * (sqrt(j + 1)), )
def setup():
size(900, 900)
noFill()
background(240)
stroke(27, 27, 27, 45)
strokeWeight(2)
def draw():
grid = Grid(0, 0, width / 10, 10)
grid.draw()
saveFrame("cover.png")
noLoop() | [
"bernardoxhc@gmail.com"
] | bernardoxhc@gmail.com |
355e6320238decdd731882bf7977ec35bcd9ec96 | cce6364dd85b62782671cd8048873eede2045137 | /hot/300_lengthOfLIS.py | 2df58fe7fe491209d5fbddbd0f2e9ec832227ea2 | [] | no_license | gmt710/leetcode_python | ed647958440f66583b8717dae7bca49c516984da | 441623afee3713506b702c5fd462c7ba84b48442 | refs/heads/master | 2020-03-28T05:11:02.851792 | 2019-04-17T09:14:51 | 2019-04-17T09:14:51 | 147,761,046 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 418 | py | class Solution:
def lengthOfLIS(self, nums: List[int]) -> int:
if len(nums) == 0:
return 0
dp = [1] * len(nums)
for i in range(len(nums)-1):
for j in range(0,i+1):
if nums[i+1] > nums[j]:
dp[i+1] = max(dp[i+1], dp[j]+1)
return max(dp[:])
# nums1 = input()
# print(Solution().lengthOfLIS(nums1))
| [
"noreply@github.com"
] | gmt710.noreply@github.com |
5faf980f7d12cf4bd18d8a24960480f7183138bd | 209bbccf90fb9dd403b3f296ec96e1f666704eda | /src/interaction.py | 1b6d558135fe98212c5bd55a35cfe714dd555b7e | [] | no_license | Kkun84/Practice-pytest | ae6dc5f3158090825695f145b7fea8c55a755e8c | 7b6bea8b583e6677aa10e50f873ce2637f89bf1e | refs/heads/master | 2022-08-21T21:28:05.573335 | 2020-05-25T01:19:24 | 2020-05-25T01:19:24 | 266,654,678 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 204 | py | def send(message: str):
ok = receive(message)
if ok:
return 'success'
return 'failure'
def receive(message: str) -> bool:
print('received: {}'.format(message))
return True
| [
"kesuike84@gmail.com"
] | kesuike84@gmail.com |
b878b8374c7724ad0fe32cbcec554bed0fb6b28c | 46f0a175038041824e168b3fce53ae3477e31738 | /simplemapplot/__init__.py | 76ae4abf3232724548400738beae84860dace6b7 | [] | no_license | jleesnow/Tweet-Map | 815f6b3561cf21bf99d2e6a9cfa94984a70bba03 | bbcb5447a6b8d97a37f47dd1bbf8d88ef62575a0 | refs/heads/master | 2021-01-01T06:17:04.771473 | 2015-09-22T15:26:12 | 2015-09-22T15:26:12 | 42,899,330 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 45 | py | from .simplemapplot import make_us_state_map
| [
"snowj3@students.wwu.edu"
] | snowj3@students.wwu.edu |
b177e6262b7577b0562e2d466f330451470a6895 | dbbf9fd571975c67684836a9507ffb0dec6b49bf | /tests/test_metrics.py | 4a22f3dc5eb72c0509d1111cbee721cea56e5177 | [
"MIT"
] | permissive | emuhedo/hisser | 22c2064d7b4aed99a045875461c8a85f6b3bf8fd | b4b963092a38a73713939f18505a90d9c8aedaa3 | refs/heads/master | 2020-03-31T16:40:43.363339 | 2018-09-18T20:12:05 | 2018-09-18T20:12:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,487 | py | from hisser import metrics as api
from hisser.utils import make_key
def test_make_tree():
result = list(api.make_tree([b'boo', b'foo']))
assert result == [(b'.', b'boo'), (b'.', b'foo')]
result = list(api.make_tree([b'boo.foo.bar']))
assert result == [(b'.', b'boo'),
(b'boo', b'foo'),
(b'boo.foo', b'bar')]
result = list(api.make_tree([b'boo.bar', b'boo.foo']))
assert result == [(b'.', b'boo'),
(b'boo', b'bar'),
(b'boo', b'foo')]
result = list(api.make_tree([b'app1.inst1.boo', b'app1.inst1.foo', b'app1.inst2.foo']))
assert result == [(b'.', b'app1'),
(b'app1', b'inst1'),
(b'app1.inst1', b'boo'),
(b'app1.inst1', b'foo'),
(b'app1', b'inst2'),
(b'app1.inst2', b'foo')]
def test_query_parts():
assert api.query_parts('localhost.boo.*.boo.foo') == (b'localhost.boo', [b'*', b'boo', b'foo'])
assert api.query_parts('[abc].boo.foo') == (b'', [b'[abc]', b'boo', b'foo'])
def test_simple_find(tmpdir):
fname = str(tmpdir.join('metrics.db'))
mi = api.MetricIndex(fname)
mi.add([b'boo', b'foo'])
result = list(mi.iter_tree())
assert result == [(b'.', b'boo'), (b'.', b'foo')]
result = mi.find_metrics_many(['*'])
assert result == {'*': [b'boo', b'foo']}
result = mi.find_metrics_many(['b*'])
assert result == {'b*': [b'boo']}
def test_prefix(tmpdir):
fname = str(tmpdir.join('metrics.db'))
mi = api.MetricIndex(fname)
mi.add([b'app1.inst1.boo', b'app1.inst1.foo', b'app1.inst2.foo'])
result = mi.find_metrics('app1.*.foo')
assert result == [b'app1.inst1.foo', b'app1.inst2.foo']
result = mi.find_metrics('app1.inst1.*')
assert result == [b'app1.inst1.boo', b'app1.inst1.foo']
result = mi.find_metrics('app1.*.f*')
assert result == [b'app1.inst1.foo', b'app1.inst2.foo']
result = mi.find_metrics('*.*.f*')
assert result == [b'app1.inst1.foo', b'app1.inst2.foo']
result = mi.find_tree('app1.*')
assert result == [(False, b'app1.inst1'), (False, b'app1.inst2')]
result = mi.find_tree('*')
assert result == [(False, b'app1')]
result = mi.find_tree('app1.inst1.*')
assert result == [(True, b'app1.inst1.boo'), (True, b'app1.inst1.foo')]
def test_tags(tmpdir):
fname = str(tmpdir.join('metrics.db'))
boo = b'boo;dc=prod'
foo = b'foo;dc=test;host=alpha'
mi = api.MetricIndex(fname)
mi.add([boo, foo])
mi.add([boo, foo])
assert list(mi.iter_tags()) == [
(b'dc', b'prod'), (b'dc', b'test'), (b'host', b'alpha'),
(b'name', b'boo'), (b'name', b'foo')]
assert sorted(mi.iter_tag_names()) == [
(b'boo', boo),
(b'dc=prod', boo),
(b'dc=test', foo),
(b'foo', foo),
(b'host=alpha', foo)
]
assert mi.get_tags() == [b'dc', b'host', b'name']
assert mi.get_tag_values('dc') == [b'prod', b'test']
assert mi.get_tag_values('host') == [b'alpha']
assert mi.get_tag_values('foo') == []
# = op
result = mi.match_by_tags([('dc', '=', 'prod')])
assert set(result) == {boo}
result = mi.match_by_tags([('name', '=', 'foo')])
assert set(result) == {foo}
# != op
result = mi.match_by_tags([('dc', '!=', 'prod')])
assert set(result) == {foo}
result = mi.match_by_tags([('dc', '!=', 'prod'), ('host', '=', 'alpha')])
assert set(result) == {foo}
# =~ op
result = mi.match_by_tags([('dc', '=~', ':prod,test')])
assert set(result) == {boo, foo}
result = mi.match_by_tags([('dc', '=~', ':stable')])
assert set(result) == set()
result = mi.match_by_tags([('name', '=~', '!bo*')])
assert set(result) == {boo}
result = mi.match_by_tags([('name', '=~', '!oo*')])
assert set(result) == set()
result = mi.match_by_tags([('dc', '=~', '(prod|test)')])
assert set(result) == {boo, foo}
# !=~ op
result = mi.match_by_tags([('dc', '!=~', ':prod,test')])
assert set(result) == set()
result = mi.match_by_tags([('dc', '!=~', ':stable')])
assert set(result) == {boo, foo}
result = mi.match_by_tags([('name', '!=~', '!bo*')])
assert set(result) == {foo}
result = mi.match_by_tags([('name', '!=~', '!oo*')])
assert set(result) == {boo, foo}
result = mi.match_by_tags([('dc', '!=~', '(prod|test)')])
assert set(result) == set()
| [
"baverman@gmail.com"
] | baverman@gmail.com |
a5c2a40abf352134456c61fce4a5b70db609744b | 8e167be7bbd0677cc51976826d5575d1ad6637f4 | /legacy/json_display.py | 07e8585c47557c6d9223a6bf757221fafb64de8c | [
"MIT"
] | permissive | kprasadvnsi/apicula | 04509e86402f5a570b096d34f792ee761962894c | 517c94c7cd9e19dd55a3a372a723400121cd41b3 | refs/heads/master | 2020-11-28T03:59:51.866180 | 2019-12-22T15:02:57 | 2019-12-22T15:02:57 | 229,697,807 | 1 | 0 | MIT | 2019-12-23T07:14:10 | 2019-12-23T07:14:10 | null | UTF-8 | Python | false | false | 528 | py | import sys
import json
import numpy as np
from bslib import read_bitstream
from PIL import Image
image = np.zeros([712, 2840], dtype="byte")
for fname in sys.argv[1:]:
print(fname)
with open(fname) as f:
try:
data = json.load(f)
except json.decoder.JSONDecodeError:
continue
for x, y in data:
image[x][y] += 1
print(np.nonzero(image > 1))
im = Image.frombytes(mode='1', size=image.shape[::-1], data=np.packbits(image))
#im.show()
im.save("bitmap.png","PNG")
| [
"pepijndevos@gmail.com"
] | pepijndevos@gmail.com |
17e65a05b9703447c6328c5ba85426b62aadec9f | c97cac88118ebd0814dec123e164dc74fef5773e | /omero_python_libs/omero_model_FilesetAnnotationLink_ice.py | 4afbd0ce4fb9ae127851099fd3ebb4f377fb0661 | [
"Apache-2.0"
] | permissive | nseyedtalebi/django-uwsgi-nginx | 285a7ed2c66b0ca2f25dd4fc79018f8deac1472d | 3163e9c7a88ed1298312d6a69a0d9eaf2f007e97 | refs/heads/master | 2020-07-08T22:01:42.082634 | 2019-08-22T16:37:18 | 2019-08-22T16:37:18 | 203,791,310 | 0 | 0 | null | 2019-08-22T12:33:28 | 2019-08-22T12:33:28 | null | UTF-8 | Python | false | false | 9,901 | py | # -*- coding: utf-8 -*-
# **********************************************************************
#
# Copyright (c) 2003-2017 ZeroC, Inc. All rights reserved.
#
# This copy of Ice is licensed to you under the terms described in the
# ICE_LICENSE file included in this distribution.
#
# **********************************************************************
#
# Ice version 3.6.4
#
# <auto-generated>
#
# Generated from file `FilesetAnnotationLink.ice'
#
# Warning: do not edit this file.
#
# </auto-generated>
#
from sys import version_info as _version_info_
import Ice, IcePy
import omero_model_IObject_ice
import omero_RTypes_ice
import omero_model_RTypes_ice
import omero_System_ice
import omero_Collections_ice
# Included module omero
_M_omero = Ice.openModule('omero')
# Included module omero.model
_M_omero.model = Ice.openModule('omero.model')
# Included module Ice
_M_Ice = Ice.openModule('Ice')
# Included module omero.sys
_M_omero.sys = Ice.openModule('omero.sys')
# Included module omero.api
_M_omero.api = Ice.openModule('omero.api')
# Start of module omero
__name__ = 'omero'
# Start of module omero.model
__name__ = 'omero.model'
if 'Fileset' not in _M_omero.model.__dict__:
_M_omero.model._t_Fileset = IcePy.declareClass('::omero::model::Fileset')
_M_omero.model._t_FilesetPrx = IcePy.declareProxy('::omero::model::Fileset')
if 'Annotation' not in _M_omero.model.__dict__:
_M_omero.model._t_Annotation = IcePy.declareClass('::omero::model::Annotation')
_M_omero.model._t_AnnotationPrx = IcePy.declareProxy('::omero::model::Annotation')
if 'Details' not in _M_omero.model.__dict__:
_M_omero.model._t_Details = IcePy.declareClass('::omero::model::Details')
_M_omero.model._t_DetailsPrx = IcePy.declareProxy('::omero::model::Details')
if 'FilesetAnnotationLink' not in _M_omero.model.__dict__:
_M_omero.model.FilesetAnnotationLink = Ice.createTempClass()
class FilesetAnnotationLink(_M_omero.model.IObject):
def __init__(self, _id=None, _details=None, _loaded=False, _version=None, _parent=None, _child=None):
if Ice.getType(self) == _M_omero.model.FilesetAnnotationLink:
raise RuntimeError('omero.model.FilesetAnnotationLink is an abstract class')
_M_omero.model.IObject.__init__(self, _id, _details, _loaded)
self._version = _version
self._parent = _parent
self._child = _child
def ice_ids(self, current=None):
return ('::Ice::Object', '::omero::model::FilesetAnnotationLink', '::omero::model::IObject')
def ice_id(self, current=None):
return '::omero::model::FilesetAnnotationLink'
def ice_staticId():
return '::omero::model::FilesetAnnotationLink'
ice_staticId = staticmethod(ice_staticId)
def getVersion(self, current=None):
pass
def setVersion(self, theVersion, current=None):
pass
def getParent(self, current=None):
pass
def setParent(self, theParent, current=None):
pass
def getChild(self, current=None):
pass
def setChild(self, theChild, current=None):
pass
def link(self, theParent, theChild, current=None):
pass
def __str__(self):
return IcePy.stringify(self, _M_omero.model._t_FilesetAnnotationLink)
__repr__ = __str__
_M_omero.model.FilesetAnnotationLinkPrx = Ice.createTempClass()
class FilesetAnnotationLinkPrx(_M_omero.model.IObjectPrx):
def getVersion(self, _ctx=None):
return _M_omero.model.FilesetAnnotationLink._op_getVersion.invoke(self, ((), _ctx))
def begin_getVersion(self, _response=None, _ex=None, _sent=None, _ctx=None):
return _M_omero.model.FilesetAnnotationLink._op_getVersion.begin(self, ((), _response, _ex, _sent, _ctx))
def end_getVersion(self, _r):
return _M_omero.model.FilesetAnnotationLink._op_getVersion.end(self, _r)
def setVersion(self, theVersion, _ctx=None):
return _M_omero.model.FilesetAnnotationLink._op_setVersion.invoke(self, ((theVersion, ), _ctx))
def begin_setVersion(self, theVersion, _response=None, _ex=None, _sent=None, _ctx=None):
return _M_omero.model.FilesetAnnotationLink._op_setVersion.begin(self, ((theVersion, ), _response, _ex, _sent, _ctx))
def end_setVersion(self, _r):
return _M_omero.model.FilesetAnnotationLink._op_setVersion.end(self, _r)
def getParent(self, _ctx=None):
return _M_omero.model.FilesetAnnotationLink._op_getParent.invoke(self, ((), _ctx))
def begin_getParent(self, _response=None, _ex=None, _sent=None, _ctx=None):
return _M_omero.model.FilesetAnnotationLink._op_getParent.begin(self, ((), _response, _ex, _sent, _ctx))
def end_getParent(self, _r):
return _M_omero.model.FilesetAnnotationLink._op_getParent.end(self, _r)
def setParent(self, theParent, _ctx=None):
return _M_omero.model.FilesetAnnotationLink._op_setParent.invoke(self, ((theParent, ), _ctx))
def begin_setParent(self, theParent, _response=None, _ex=None, _sent=None, _ctx=None):
return _M_omero.model.FilesetAnnotationLink._op_setParent.begin(self, ((theParent, ), _response, _ex, _sent, _ctx))
def end_setParent(self, _r):
return _M_omero.model.FilesetAnnotationLink._op_setParent.end(self, _r)
def getChild(self, _ctx=None):
return _M_omero.model.FilesetAnnotationLink._op_getChild.invoke(self, ((), _ctx))
def begin_getChild(self, _response=None, _ex=None, _sent=None, _ctx=None):
return _M_omero.model.FilesetAnnotationLink._op_getChild.begin(self, ((), _response, _ex, _sent, _ctx))
def end_getChild(self, _r):
return _M_omero.model.FilesetAnnotationLink._op_getChild.end(self, _r)
def setChild(self, theChild, _ctx=None):
return _M_omero.model.FilesetAnnotationLink._op_setChild.invoke(self, ((theChild, ), _ctx))
def begin_setChild(self, theChild, _response=None, _ex=None, _sent=None, _ctx=None):
return _M_omero.model.FilesetAnnotationLink._op_setChild.begin(self, ((theChild, ), _response, _ex, _sent, _ctx))
def end_setChild(self, _r):
return _M_omero.model.FilesetAnnotationLink._op_setChild.end(self, _r)
def link(self, theParent, theChild, _ctx=None):
return _M_omero.model.FilesetAnnotationLink._op_link.invoke(self, ((theParent, theChild), _ctx))
def begin_link(self, theParent, theChild, _response=None, _ex=None, _sent=None, _ctx=None):
return _M_omero.model.FilesetAnnotationLink._op_link.begin(self, ((theParent, theChild), _response, _ex, _sent, _ctx))
def end_link(self, _r):
return _M_omero.model.FilesetAnnotationLink._op_link.end(self, _r)
def checkedCast(proxy, facetOrCtx=None, _ctx=None):
return _M_omero.model.FilesetAnnotationLinkPrx.ice_checkedCast(proxy, '::omero::model::FilesetAnnotationLink', facetOrCtx, _ctx)
checkedCast = staticmethod(checkedCast)
def uncheckedCast(proxy, facet=None):
return _M_omero.model.FilesetAnnotationLinkPrx.ice_uncheckedCast(proxy, facet)
uncheckedCast = staticmethod(uncheckedCast)
def ice_staticId():
return '::omero::model::FilesetAnnotationLink'
ice_staticId = staticmethod(ice_staticId)
_M_omero.model._t_FilesetAnnotationLinkPrx = IcePy.defineProxy('::omero::model::FilesetAnnotationLink', FilesetAnnotationLinkPrx)
_M_omero.model._t_FilesetAnnotationLink = IcePy.declareClass('::omero::model::FilesetAnnotationLink')
_M_omero.model._t_FilesetAnnotationLink = IcePy.defineClass('::omero::model::FilesetAnnotationLink', FilesetAnnotationLink, -1, (), True, False, _M_omero.model._t_IObject, (), (
('_version', (), _M_omero._t_RInt, False, 0),
('_parent', (), _M_omero.model._t_Fileset, False, 0),
('_child', (), _M_omero.model._t_Annotation, False, 0)
))
FilesetAnnotationLink._ice_type = _M_omero.model._t_FilesetAnnotationLink
FilesetAnnotationLink._op_getVersion = IcePy.Operation('getVersion', Ice.OperationMode.Normal, Ice.OperationMode.Normal, False, None, (), (), (), ((), _M_omero._t_RInt, False, 0), ())
FilesetAnnotationLink._op_setVersion = IcePy.Operation('setVersion', Ice.OperationMode.Normal, Ice.OperationMode.Normal, False, None, (), (((), _M_omero._t_RInt, False, 0),), (), None, ())
FilesetAnnotationLink._op_getParent = IcePy.Operation('getParent', Ice.OperationMode.Normal, Ice.OperationMode.Normal, False, None, (), (), (), ((), _M_omero.model._t_Fileset, False, 0), ())
FilesetAnnotationLink._op_setParent = IcePy.Operation('setParent', Ice.OperationMode.Normal, Ice.OperationMode.Normal, False, None, (), (((), _M_omero.model._t_Fileset, False, 0),), (), None, ())
FilesetAnnotationLink._op_getChild = IcePy.Operation('getChild', Ice.OperationMode.Normal, Ice.OperationMode.Normal, False, None, (), (), (), ((), _M_omero.model._t_Annotation, False, 0), ())
FilesetAnnotationLink._op_setChild = IcePy.Operation('setChild', Ice.OperationMode.Normal, Ice.OperationMode.Normal, False, None, (), (((), _M_omero.model._t_Annotation, False, 0),), (), None, ())
FilesetAnnotationLink._op_link = IcePy.Operation('link', Ice.OperationMode.Normal, Ice.OperationMode.Normal, False, None, (), (((), _M_omero.model._t_Fileset, False, 0), ((), _M_omero.model._t_Annotation, False, 0)), (), None, ())
_M_omero.model.FilesetAnnotationLink = FilesetAnnotationLink
del FilesetAnnotationLink
_M_omero.model.FilesetAnnotationLinkPrx = FilesetAnnotationLinkPrx
del FilesetAnnotationLinkPrx
# End of module omero.model
__name__ = 'omero'
# End of module omero
| [
"nima.seyedtalebi@uky.edu"
] | nima.seyedtalebi@uky.edu |
56faaf1025413066c3e894299e4ff857a737b8fd | c0eba907151b17d867cc079c4da85c904fd96f6e | /opencv_threshold.py | 08217424e5859063a6eaf4f2313cc32e6cea711c | [] | no_license | Inamdarpushkar/opencv | 5d11e7363e4379a3a23eacc0047f52bbcad6c4fa | 69c847660deb603f2877813cc12aad14ac933460 | refs/heads/master | 2020-04-04T01:08:33.524062 | 2018-11-28T04:31:00 | 2018-11-28T04:31:00 | 155,669,353 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,011 | py | import cv2
import numpy as np
#importing an image
img=cv2.imread('bookpage.jpg')
#Setting a threshold of 12 (or some other value based on the brightness)
# as the image has low light threshold value is low. So any value above 12 is converted to hight
# DM value, 255 is max DM value.
retval,threshold=cv2.threshold(img,12,255,cv2.THRESH_BINARY)
#converting to grayscale image
grayscaled=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
retval,threshold2=cv2.threshold(grayscaled,12,255,cv2.THRESH_BINARY)
#adaptive threshold (adjust threshold based on the global DM values)
gaus=cv2.adaptiveThreshold(grayscaled,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,cv2.THRESH_BINARY,155,1)
#
retval,otsu=cv2.threshold(grayscaled,125,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)
#displaying images
cv2.imshow('original',img)
cv2.imshow('threshold',threshold)
cv2.imshow('threshold2',threshold2)
cv2.imshow('Gaussian',gaus)
cv2.imshow('otsu',otsu)
#keyboard connection press any key to destroyAllWindows
cv2.waitKey(0)
cv2.destroysallwindows()
| [
"inamdarpushkar27@gmail.com"
] | inamdarpushkar27@gmail.com |
7d09590beebb9c945f1bcf5efbd6f72df73221a0 | 364f60e80b25251b57c79ec3c5c88065731320be | /resdk/tests/unit/test_decorators.py | 28ffe6332ad4275a9a551939065bebeacfd62dd8 | [
"Apache-2.0"
] | permissive | tristanbrown/resolwe-bio-py | 2c9f4463b1c82005cec209c1374ed4ac673658e7 | c911defde8a5e7e902ad1adf4f9e480f17002c18 | refs/heads/master | 2018-10-14T06:20:46.734255 | 2018-09-19T21:57:10 | 2018-09-19T21:57:10 | 110,161,960 | 0 | 0 | Apache-2.0 | 2018-09-19T21:57:11 | 2017-11-09T20:19:59 | Python | UTF-8 | Python | false | false | 719 | py | """
Unit tests for resdk/utils/decorators.py file.
"""
# pylint: disable=missing-docstring, protected-access
import unittest
from resdk.utils.decorators import return_first_element
class TestDecorators(unittest.TestCase):
def test_return_first_element(self):
@return_first_element
def test_function():
return [1]
self.assertEqual(test_function(), 1)
@return_first_element
def test_function_2():
return [1, 2]
with self.assertRaises(RuntimeError):
test_function_2()
@return_first_element
def test_function_3():
return 1
with self.assertRaises(TypeError):
test_function_3()
| [
"domen@blenkus.com"
] | domen@blenkus.com |
e92c33170af2ea34bb7c4563777a9a051b82ae01 | 8a6617724716ba8cfeb82ec1fa69f60a3e67f280 | /chapter7.regex/tasks/version_of_strip.py | dbd3dc77c1bafe9cc9ce279f6c913c46d1a9c8a2 | [] | no_license | Jump1556/automatetheboringstuff | 6443d90abfa0c096c99ccf7b6294f8a55d74d5c9 | 71f6c92822549b55ba5ddc248d9373dbff1a813c | refs/heads/master | 2021-07-18T19:19:51.396608 | 2017-10-27T07:58:31 | 2017-10-27T07:58:31 | 105,357,804 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 305 | py | #! /usr/bin/env python3.
# version_of_strip.py - Check if the password string is strong enough.
import re
def stripRegex(s, toStrip='' ):
if toStrip=='':
toStrip = '\s'
pattern= re.compile(r'^{0}|{0}$'.format(toStrip))
return pattern.sub('', s)
print(stripRegex("hh hgts hh", "hh"))
| [
"anastasiia.kostashenko@gmail.com"
] | anastasiia.kostashenko@gmail.com |
36c233913754da01984f01cdeba0db5add042e73 | ec1ce8cc6133cc92ed48631f4c93e6768986894b | /venv_pycmd/bin/python-config | 13b4afa9cc3cd436d5f0405907b2a0e20c5632cf | [] | no_license | khankth/pycmd | b861377c92b521d590ba1134b056e61132f3128a | 5d5118e1c67364fbd24b547af560a08b9ca0a78e | refs/heads/master | 2020-03-08T09:46:54.900992 | 2018-06-18T08:04:04 | 2018-06-18T08:04:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,357 | #!/home/adnan/MyProjects/pycmd/venv_pycmd/bin/python
import sys
import getopt
import sysconfig
valid_opts = ['prefix', 'exec-prefix', 'includes', 'libs', 'cflags',
'ldflags', 'help']
if sys.version_info >= (3, 2):
valid_opts.insert(-1, 'extension-suffix')
valid_opts.append('abiflags')
if sys.version_info >= (3, 3):
valid_opts.append('configdir')
def exit_with_usage(code=1):
sys.stderr.write("Usage: {0} [{1}]\n".format(
sys.argv[0], '|'.join('--'+opt for opt in valid_opts)))
sys.exit(code)
try:
opts, args = getopt.getopt(sys.argv[1:], '', valid_opts)
except getopt.error:
exit_with_usage()
if not opts:
exit_with_usage()
pyver = sysconfig.get_config_var('VERSION')
getvar = sysconfig.get_config_var
opt_flags = [flag for (flag, val) in opts]
if '--help' in opt_flags:
exit_with_usage(code=0)
for opt in opt_flags:
if opt == '--prefix':
print(sysconfig.get_config_var('prefix'))
elif opt == '--exec-prefix':
print(sysconfig.get_config_var('exec_prefix'))
elif opt in ('--includes', '--cflags'):
flags = ['-I' + sysconfig.get_path('include'),
'-I' + sysconfig.get_path('platinclude')]
if opt == '--cflags':
flags.extend(getvar('CFLAGS').split())
print(' '.join(flags))
elif opt in ('--libs', '--ldflags'):
abiflags = getattr(sys, 'abiflags', '')
libs = ['-lpython' + pyver + abiflags]
libs += getvar('LIBS').split()
libs += getvar('SYSLIBS').split()
# add the prefix/lib/pythonX.Y/config dir, but only if there is no
# shared library in prefix/lib/.
if opt == '--ldflags':
if not getvar('Py_ENABLE_SHARED'):
libs.insert(0, '-L' + getvar('LIBPL'))
if not getvar('PYTHONFRAMEWORK'):
libs.extend(getvar('LINKFORSHARED').split())
print(' '.join(libs))
elif opt == '--extension-suffix':
ext_suffix = sysconfig.get_config_var('EXT_SUFFIX')
if ext_suffix is None:
ext_suffix = sysconfig.get_config_var('SO')
print(ext_suffix)
elif opt == '--abiflags':
if not getattr(sys, 'abiflags', None):
exit_with_usage()
print(sys.abiflags)
elif opt == '--configdir':
print(sysconfig.get_config_var('LIBPL'))
| [
"cn_adnan@hotmail.com"
] | cn_adnan@hotmail.com | |
103b6265ea51a32f709f6921cdce54653589b647 | 634e8b48ef6e989920ac97c92ed0b01de76b06e9 | /.venv/Lib/site-packages/stdnum/do/rnc.py | ddc6c3b6fb3824b8fac5afc7bd9ec1b77b84b833 | [] | permissive | heltonteixeira92/Proj_teste | 7ff89f7382d307c7bb9778f7dfed7252ac300441 | cf819aa737cd0572f591e1136df9831267d46815 | refs/heads/master | 2023-05-15T07:40:51.291345 | 2021-06-13T22:06:46 | 2021-06-13T22:06:46 | 336,849,472 | 0 | 0 | MIT | 2021-02-09T16:02:12 | 2021-02-07T17:42:01 | Python | UTF-8 | Python | false | false | 6,947 | py | # rnc.py - functions for handling Dominican Republic tax registration
# coding: utf-8
#
# Copyright (C) 2015-2018 Arthur de Jong
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301 USA
# Development of this functionality was funded by iterativo | https://iterativo.do
"""RNC (Registro Nacional del Contribuyente, Dominican Republic tax number).
The RNC is the Dominican Republic taxpayer registration number for
institutions. The number consists of 9 digits.
>>> validate('1-01-85004-3')
'101850043'
>>> validate('1018A0043')
Traceback (most recent call last):
...
InvalidFormat: ...
>>> validate('101850042')
Traceback (most recent call last):
...
InvalidChecksum: ...
>>> format('131246796')
'1-31-24679-6'
"""
import json
from stdnum.exceptions import *
from stdnum.util import clean, get_soap_client, isdigits
# list of RNCs that do not match the checksum but are nonetheless valid
whitelist = set('''
101581601 101582245 101595422 101595785 10233317 131188691 401007374
501341601 501378067 501620371 501651319 501651823 501651845 501651926
501656006 501658167 501670785 501676936 501680158 504654542 504680029
504681442 505038691
'''.split())
dgii_wsdl = 'https://www.dgii.gov.do/wsMovilDGII/WSMovilDGII.asmx?WSDL'
"""The WSDL URL of DGII validation service."""
def compact(number):
"""Convert the number to the minimal representation. This strips the
number of any valid separators and removes surrounding whitespace."""
return clean(number, ' -').strip()
def calc_check_digit(number):
"""Calculate the check digit."""
weights = (7, 9, 8, 6, 5, 4, 3, 2)
check = sum(w * int(n) for w, n in zip(weights, number)) % 11
return str((10 - check) % 9 + 1)
def validate(number):
"""Check if the number provided is a valid RNC."""
number = compact(number)
if not isdigits(number):
raise InvalidFormat()
if number in whitelist:
return number
if len(number) != 9:
raise InvalidLength()
if calc_check_digit(number[:-1]) != number[-1]:
raise InvalidChecksum()
return number
def is_valid(number):
"""Check if the number provided is a valid RNC."""
try:
return bool(validate(number))
except ValidationError:
return False
def format(number):
"""Reformat the number to the standard presentation format."""
number = compact(number)
return '-'.join((number[:1], number[1:3], number[3:-1], number[-1]))
def _convert_result(result): # pragma: no cover
"""Translate SOAP result entries into dicts."""
translation = {
'RGE_RUC': 'rnc',
'RGE_NOMBRE': 'name',
'NOMBRE_COMERCIAL': 'commercial_name',
'CATEGORIA': 'category',
'REGIMEN_PAGOS': 'payment_regime',
'ESTATUS': 'status',
'RNUM': 'result_number',
}
return dict(
(translation.get(key, key), value)
for key, value in json.loads(result.replace('\n', '\\n').replace('\t', '\\t')).items())
def check_dgii(number, timeout=30): # pragma: no cover
"""Lookup the number using the DGII online web service.
This uses the validation service run by the the Dirección General de
Impuestos Internos, the Dominican Republic tax department to lookup
registration information for the number. The timeout is in seconds.
Returns a dict with the following structure::
{
'rnc': '123456789', # The requested number
'name': 'The registered name',
'commercial_name': 'An additional commercial name',
'status': '2', # 1: inactive, 2: active
'category': '0', # always 0?
'payment_regime': '2', # 1: N/D, 2: NORMAL, 3: PST
}
Will return None if the number is invalid or unknown."""
# this function isn't automatically tested because it would require
# network access for the tests and unnecessarily load the online service
number = compact(number)
client = get_soap_client(dgii_wsdl, timeout)
result = client.GetContribuyentes(
value=number,
patronBusqueda=0, # search type: 0=by number, 1=by name
inicioFilas=1, # start result (1-based)
filaFilas=1, # end result
IMEI='')
if result and 'GetContribuyentesResult' in result:
result = result['GetContribuyentesResult'] # PySimpleSOAP only
if result == '0':
return
result = [x for x in result.split('@@@')]
return _convert_result(result[0])
def search_dgii(keyword, end_at=10, start_at=1, timeout=30): # pragma: no cover
"""Search the DGII online web service using the keyword.
This uses the validation service run by the the Dirección General de
Impuestos Internos, the Dominican Republic tax department to search the
registration information using the keyword.
The number of entries returned can be tuned with the `end_at` and
`start_at` arguments. The timeout is in seconds.
Returns a list of dicts with the following structure::
[
{
'rnc': '123456789', # The found number
'name': 'The registered name',
'commercial_name': 'An additional commercial name',
'status': '2', # 1: inactive, 2: active
'category': '0', # always 0?
'payment_regime': '2', # 1: N/D, 2: NORMAL, 3: PST
'result_number': '1', # index of the result
},
...
]
Will return an empty list if the number is invalid or unknown."""
# this function isn't automatically tested because it would require
# network access for the tests and unnecessarily load the online service
client = get_soap_client(dgii_wsdl, timeout)
results = client.GetContribuyentes(
value=keyword,
patronBusqueda=1, # search type: 0=by number, 1=by name
inicioFilas=start_at, # start result (1-based)
filaFilas=end_at, # end result
IMEI='')
if results and 'GetContribuyentesResult' in results:
results = results['GetContribuyentesResult'] # PySimpleSOAP only
if results == '0':
return []
return [_convert_result(result) for result in results.split('@@@')]
| [
"heltonteixeiradesouza@hotmail.com"
] | heltonteixeiradesouza@hotmail.com |
f0c1e0f1abc82a92b48b4fa94f5cb6e2a65dda5b | a0a839332d41621056567d2082b55899228ffd51 | /taskI.py | 6b61445e07c58a9323d397a2f090c25e00ba391b | [] | no_license | Oxbech/F2020_RES_project_handin | 44970ef01b808cf2cc216aa3af26ae9d85c98423 | 78c51617897bd4b8cbb851106eb76cd25678d24a | refs/heads/main | 2023-02-23T13:44:17.323976 | 2021-01-27T22:13:29 | 2021-01-27T22:13:29 | 333,574,121 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,277 | py | # -*- coding: utf-8 -*-
"""
Created on Mon Dec 7 14:04:30 2020
@author: Anders
"""
## ## ## ## ## ## ## ## IMPORTING PACKAGES ## ## ## ## ## ## ## ##
import pypsa
import pandas as pd
import matplotlib.pyplot as plt
## ## ## ## ## ## ## ## IMPORTING DATA ## ## ## ## ## ## ## ##
# electricity demand in MWh
data_elec = pd.read_csv("../data/electricity_demand.csv", sep=";", index_col=0)
# onshore wind generation capacity factors
data_wind = pd.read_csv("../data/CF_onshore_wind.csv", sep=";", index_col=0)
# PV capacity factors
data_solar = pd.read_csv("../data/CF_pv_optimal.csv", sep=";", index_col=0)
# Hydro inflow in GWh
data_hydro = pd.read_csv("../data/hydro_inflow.csv", sep=",")
# creating a column of dates, by concatenating the three first columns
data_hydro["date"] = data_hydro["Year"].astype(str) + "-" + \
+ data_hydro["Month"].astype(str) + "-" + data_hydro["Day"].astype(str)
# converts the date column to pandas datetime-type
data_hydro["date"] = pd.to_datetime(data_hydro["date"])
# removes the extraneous columns
data_hydro = pd.DataFrame({"date": data_hydro["date"],
"inflow": data_hydro["Inflow [GWh]"]})
# adding a "Not A Number" row to get data resampled for the 31st of december
# of the final year of data, 2012
data_hydro.loc[len(data_hydro.index)] = [pd.to_datetime("2013-01-01T00:00")
,float("NaN")]
# sets the date column to be the index of the DataFrame
data_hydro = data_hydro.set_index("date")
# divides the inflow with 24 hours to get the inflow per hour, i.e. as power
# in MW
data_hydro["inflow"] = data_hydro["inflow"]/24*1e3
# resamples the data into 1 hour intervals using the foward fill method, i.e.
# the last valid observation will be propagated forward
data_hydro = data_hydro.resample("H",convention="end").ffill()
# removes all rows with no data, i.e. the final value, which was required to
# get values for all hours of the 31st of december 2012
data_hydro = data_hydro.dropna(how="any",axis=0)
# creates a series from the DataFrame for the last non-leap year inflow data
# is available for, 2011 [MW]
hydro_inflow = data_hydro.loc["2011-01-01":"2011-12-31","inflow"]
## ## ## ## ## ## ## ## FUNCTIONS ## ## ## ## ## ## ## ##
def annuity(n,r):
"""Calculate the annuity factor for an asset with a liftime of n years and
discount rate of r, e.g. annuity(20,0.05)*20 = 1.6"""
if r > 0:
return r/( 1. - 1. / (1. + r)**n )
else:
return 1/n
## ## ## ## ## ## ## ## CREATE NETWORK ## ## ## ## ## ## ## ##
# naming the network "network"
network = pypsa.Network()
# creating series of hours to be included in the calculation
hours_in_2015 = pd.date_range("2015-01-01T00:00Z","2015-12-31T23:00Z",
freq="H")
# seting the timestamps to calulate for
network.set_snapshots(hours_in_2015)
# adding the electricity system/bus, name: "electricity bus"
network.add("Bus","electricity bus")
# electricity demand in MWh for Austria
demand_elec = data_elec["AUT"]
# adding the load to the electricty bus, i.e. the electricity demand.
# Name: "load"
network.add("Load","load",bus="electricity bus",p_set=demand_elec)
## ## ## ## ## ## ## ## ADD GENERATORS ## ## ## ## ## ## ## ##
# onshore wind generation capacity factors
CF_wind = data_wind["AUT"]\
[[hour.strftime("%Y-%m-%dT%H:%M:%SZ") for hour in network.snapshots]]
# PV capacity factors
CF_solar = data_solar["AUT"]\
[[hour.strftime("%Y-%m-%dT%H:%M:%SZ") for hour in network.snapshots]]
# changing the index of the hydro inflow series to match the dates of the
# network snapshots.
hydro_inflow = hydro_inflow.reset_index(drop=True)
hydro_inflow = pd.DataFrame({"date": network.snapshots, "inflow": hydro_inflow})
hydro_inflow = hydro_inflow.set_index("date")
hydro_inflow = hydro_inflow.loc[:,"inflow"]
# adding the different energy carriers, only gas emits CO2
network.add("Carrier","gas",co2_emissions=0.19) # ton_CO2/MWh_th
network.add("Carrier","onshorewind")
network.add("Carrier","solar")
network.add("Carrier","hydro")
network.add("Carrier","electricity")
network.add("Carrier","hydrogen")
network.add("Carrier","nuclear")
# annualizing capital (overnight and FOM) costs for onshore wind over 30 years
# with a discount rate of 0.07
capital_cost_onshorewind = annuity(30,0.07)*910e3*(1 + 0.033) # €/MW
# adding onshore wind generator
network.add("Generator",
"onshorewind",
bus="electricity bus",
p_nom_extendable=True, # the capacity can be extended
carrier="onshorewind",
capital_cost=capital_cost_onshorewind,
marginal_cost=0, # no fuel costs
p_max_pu=CF_wind
)
# annualizing capital (overnight and FOM) costs for utility scale solar
# generation over 25 years with a discount rate of 0.07
capital_cost_solar = annuity(25,0.07)*425e3*(1 + 0.03) # €/MW
# adding ultility scale solar power generator
network.add("Generator",
"solar",
bus="electricity bus",
p_nom_extendable=True, # the capacity can be extended
carrier="solar",
capital_cost=capital_cost_solar,
marginal_cost=0, # no fuel costs
p_max_pu=CF_solar
)
# annualizing capital (overnight and FOM) costs for OCGT, Open Cycle Gas
# Turbine which serves as backup-power, over 25 years with a discount rate of
# 0.07
capital_cost_OCGT = annuity(25,0.07)*560e3*(1 + 0.033) # €/MW
# fuel cost of gas for OCGT in €/MWh_th
fuel_cost_OCGT = 21.6
# efficiency of OCGT
efficiency_OCGT = 0.39
# marginal (fuel) cost for OCGT per MWh electricity produced, €/MWh_el
marginal_cost_OCGT = fuel_cost_OCGT / efficiency_OCGT
# adding OCGT (backup) power generator
network.add("Generator",
"OCGT",
bus="electricity bus",
p_nom_extendable=True, # the capacity can be extended
carrier="gas",
capital_cost=capital_cost_OCGT,
marginal_cost=marginal_cost_OCGT,
efficiency=efficiency_OCGT
)
# fuel cost of uranium in €/MWh_el
fuel_cost_nuclear = 3.2
# marginal cost for nuclear per MWh electricity produced, €/MWh_el
marginal_cost_nuclear = fuel_cost_nuclear / 0.14 # fuel cost is 14 % of the total
# adding OCGT (backup) power generator
network.add("Generator",
"nuclear",
bus="electricity bus",
p_nom=692, # Zwentendorf Nuclear Power Plant [MW]
p_nom_extendable=False, # the capacity can be extended
carrier="nuclear",
capital_cost=0,
marginal_cost=marginal_cost_nuclear
)
# annualizing capital (overnight and FOM) costs for run-of-river hydro
# generation over 80 years with a discount rate of 0.07
capital_cost_hydro = annuity(80,0.07)*2000e3*(1 + 0.01) # €/MW
# adding hydro generation as a storage unit
network.add("StorageUnit",
"hydro",
bus="electricity bus",
p_nom=13427.4, # from Wagner et al. (total)
p_nom_extendable=False,
carrier="hydro",
capital_cost=capital_cost_hydro,
marginal_cost=0, # no fuel costs
efficiency_store=0.87,
efficiency_dispatch=0.87,
inflow=hydro_inflow,
p_min_pu=-0.5844, # equivalent to the installed power capacity
max_hours=3.5065, # equivalent to the installed storage capacity
cyclic_state_of_charge=True # intial SoC = final SoC
)
## ## ## ## ## ## ## ## CONSTRAIN THE NETWORK ## ## ## ## ## ## ## ##
# ton CO2 equivalents emitted from energy in 1990
co2_1990 = 14e6
# 5% of 1990 emissions allowed
co2_percentage = 0.05
# calculating the equivalent limits on CO2 emissions in ton CO2 equivalents
co2_limit = co2_percentage*co2_1990 # tonCO2e
network.add("GlobalConstraint",
"co2_limit",
type="primary_energy",
carrier_attribute="co2_emissions",
sense="<=",
constant=co2_limit
)
## ## ## ## ## ## ## ## ADD BATTERIES ## ## ## ## ## ## ## ##
# annualizing capital (overnight and FOM) costs for battery (inverter) power
# capacity over 20 years with a discount rate of 0.07
capital_cost_inverter = annuity(20,0.07)*310e3*(1 + 0.01) # €/MW
# capital cost of batteries for every MWh storage capacity, annualized over
# 15 years with a discount rate of 0.07
capital_cost_batteries = annuity(20,0.07)*144.6e3*(1 + 0) # €/MWh
# battery inverter efficiency
efficiency_inverter = 0.9
# adding a battery bus
network.add("Bus",
"battery bus",
carrier="DC")
# adding a link to charge and discharge the batteries from the grid
network.add("Link",
"inverter",
bus0="battery bus",
bus1="electricity bus",
p_nom_extendable=True,
p_min_pu=-1, # link is reversible
capital_cost=capital_cost_inverter*efficiency_inverter,
marginal_cost=0,
efficiency=efficiency_inverter
)
# adding a store to store the electricity in the batteries
network.add("Store",
"batteries",
bus="battery bus",
e_nom_extendable=True,
capital_cost=capital_cost_batteries,
e_cyclic=True,
)
## ## ## ## ## ## ## ## ADD HYDROGEN STORAGE ## ## ## ## ## ## ## ##
# annualizing capital (overnight and FOM) costs for hydrogen power capacity,
# e.g. electrolysis and fuel cells, annualized over 18 and 20 years
# respectively with a discount rate of 0.07
capital_cost_fuelCell = annuity(20,0.07)*339e3*(1 + 0.03) # €/MW
capital_cost_electrolysis = annuity(18,0.07)*350e3*(1 + 0.04) # €/MW
# capital cost of hydrogen storage, for every MWh storage capacity, annualized
# over 20 years with a discount rate of 0.07
capital_cost_hydrogen = annuity(20,0.07)*8.4e3*(1 + 0) # €/MWh
# fuel cell (hydrogen to AC) efficiency
efficiency_fuelCell = 0.58
# electrolysis (AC to hydrogen) efficiency
efficiency_electrolysis = 0.8
# adding a battery bus
network.add("Bus",
"hydrogen bus",
carrier="hydrogen")
# adding a link to act as the fuel cell
network.add("Link",
"fuel cell",
bus0="hydrogen bus",
bus1="electricity bus",
p_nom_extendable=True,
capital_cost=capital_cost_fuelCell*efficiency_fuelCell,
efficiency=efficiency_fuelCell
)
# adding a link to act as the elctrolysis
network.add("Link",
"electrolysis",
bus0="electricity bus",
bus1="hydrogen bus",
p_nom_extendable=True,
capital_cost=capital_cost_electrolysis*efficiency_electrolysis,
efficiency=efficiency_electrolysis
)
# adding a store to store the hydrogen
network.add("Store",
"hydrogen storage",
bus="hydrogen bus",
e_nom_extendable=True,
capital_cost=capital_cost_hydrogen,
e_cyclic=True
)
## ## ## ## ## ## ## ## SOLVE THE NETWORK ## ## ## ## ## ## ## ##
network.lopf(network.snapshots,solver_name="gurobi")
#%%
## ## ## ## ## ## ## ## DATA-PROCESSING ## ## ## ## ## ## ## ##
print("\nNominal wind power capacity: {:4.0f} MW".format(
network.generators.p_nom_opt["onshorewind"]))
print("Nominal solar power capacity: {:4.0f} MW".format(
network.generators.p_nom_opt["solar"]))
print("Nominal OCGT power capacity: {:4.0f} MW".format(
network.generators.p_nom_opt["OCGT"]))
print("Nominal hydro power capacity: {:4.0f} MW".format(
network.storage_units.p_nom_opt["hydro"]))
print("Nominal nuclear power capacity: {:4.0f} MW".format(
network.generators.p_nom_opt["nuclear"]))
print("\nNominal battery power capacity: {:4.0f} MW".format(
network.links.p_nom_opt["inverter"]))
print("Nominal battery storage capacity: {:4.0f} MWh".format(
network.stores.e_nom_opt["batteries"]))
print("\nNominal electrolysis power capacity: {:4.0f} MW".format(
network.links.p_nom_opt["electrolysis"]))
print("Nominal fuel cell power capacity: {:4.0f} MW".format(
network.links.p_nom_opt["fuel cell"]))
print("Nominal hydrogen storage capacity: {:4.0f} MWh".format(
network.stores.e_nom_opt["hydrogen storage"]))
print("\nTotal system cost: {:4.0f} million €".format(
network.objective*1e-6))
print("Marginal system cost: {:2.1f} €/MWh".format(
network.objective/network.loads_t.p["load"].sum()))
print("\nCO2-limit: {:4.0f} kt".format(
network.global_constraints.constant["co2_limit"]*1e-3))
print("Relative CO2-emissions: {:2.1f}%".format(
network.global_constraints.constant["co2_limit"]*1e-6 / 14*100))
print("CO2-price: {:4.0f} €".format(
network.global_constraints.mu["co2_limit"]))
# timeperiod of dispatch time series, three days in winter
startdate = "2015-06-07"
enddate = "2015-06-10"
# plotting the electricity dispatch time series
fig = plt.figure()
plt.plot(network.loads_t.p["load"].loc[startdate:enddate],
color="black", linestyle="dashed", label="demand")
plt.plot(network.storage_units_t.p["hydro"].loc[startdate:enddate],
color="blue", label="hydro")
plt.plot(network.generators_t.p["onshorewind"].loc[startdate:enddate],
color="black", label="onshore wind")
plt.plot(network.generators_t.p["solar"].loc[startdate:enddate],
color="orange", label="solar")
plt.plot(network.generators_t.p["OCGT"].loc[startdate:enddate],
color="brown", label="gas (OCGT)")
plt.plot(network.links_t.p1["inverter"].loc[startdate:enddate],
color="cyan", label="batteries")
plt.plot((-network.links_t.p1["fuel cell"]
-network.links_t.p0["electrolysis"]).loc[startdate:enddate],
color="purple", label="hydrogen")
plt.plot(network.generators_t.p["nuclear"].loc[startdate:enddate],
color="grey", label="nuclear")
fig.autofmt_xdate()
plt.xlim([pd.Timestamp(startdate), pd.Timestamp(enddate)])
plt.ylim([-10000,20000])
plt.ylabel("MW electricity")
plt.grid(True)
plt.legend(loc="center", bbox_to_anchor=(.5,1.1), frameon=False , ncol=4)
plt.tight_layout()
plt.savefig("../LaTeX/figures/I_disp.eps")
# plotting the annual electricity mix
fig = plt.figure()
labels = ["hydro","onshore wind","solar","gas (OCGT)","nuclear"]
sizes = [network.storage_units_t.p["hydro"].sum(),
network.generators_t.p["onshorewind"].sum(),
network.generators_t.p["solar"].sum(),
network.generators_t.p["OCGT"].sum(),
network.generators_t.p["nuclear"].sum()]
colors=["blue","black","orange","brown","grey"]
plt.pie(sizes,
colors=colors,
labels=labels,
wedgeprops={"linewidth":0})
plt.axis("equal")
plt.tight_layout()
plt.savefig("../LaTeX/figures/I_mix.eps")
| [
"noreply@github.com"
] | Oxbech.noreply@github.com |
e0926f4cf8c7dee547f37e59744ced9841b8d659 | ca1bd3e57699329b533d70789b607a8fc6d3c73d | /tree/inorderTraversal.py | ea0ba6088b15a814cfc655f8b992b4ea687fb6bf | [] | no_license | msps9341012/leetcode | 6719054e049b51c89fd8dab97f25109645e79805 | 2647ac891501f479ee31b223405e209b0347fac0 | refs/heads/master | 2020-05-30T03:54:28.420608 | 2019-10-04T04:03:28 | 2019-10-04T04:03:28 | 189,524,341 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 362 | py | def inorderTraversal(self, root):
"""
:type root: TreeNode
:rtype: List[int]
"""
ans = []
stack = []
while stack or root:
if root:
stack.append(root)
root = root.left
else:
tmpNode = stack.pop()
ans.append(tmpNode.val)
root = tmpNode.right
return ans
| [
"a0113130@gmail.com"
] | a0113130@gmail.com |
ff8475b3faf9faea915e08d1cb1f7e285a8bed96 | 84b68b5dc1df9a0d88c38f7f4de3dbe371b08358 | /cars/migrations/0005_auto_20210616_1921.py | 286875a7eb9f80ae8a27a795ea879f720944045d | [] | no_license | alisamadzadeh46/car | e489c548343096ca9c547c693c5339569f221b4e | 057ea3d35cf86d44530683e06c963a54771aa386 | refs/heads/main | 2023-05-28T10:44:36.574886 | 2021-06-16T16:58:09 | 2021-06-16T16:58:09 | 374,656,937 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,677 | py | # Generated by Django 3.2.4 on 2021-06-16 14:51
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('cars', '0004_alter_cars_features'),
]
operations = [
migrations.CreateModel(
name='Message',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('first_name', models.CharField(max_length=200, verbose_name='first name')),
('last_name', models.CharField(max_length=200, verbose_name='last name')),
('car', models.IntegerField(verbose_name='car id')),
('customer_need', models.CharField(max_length=200, verbose_name='customer need')),
('car_title', models.CharField(max_length=200, verbose_name='car title')),
('city', models.CharField(max_length=200, verbose_name='city')),
('state', models.CharField(max_length=200, verbose_name='state')),
('email', models.EmailField(max_length=200, verbose_name='email')),
('phone', models.CharField(max_length=200, verbose_name='phone')),
('message', models.TextField(max_length=200, verbose_name='message')),
('user_id', models.IntegerField(blank=True, verbose_name='user id')),
('create_data', models.DateTimeField(default=datetime.datetime.now)),
],
),
migrations.AlterField(
model_name='cars',
name='vin_no',
field=models.CharField(max_length=150, verbose_name='vin'),
),
]
| [
"alisamadzadeh46@gmail.com"
] | alisamadzadeh46@gmail.com |
e4360a25775d75b4dc56801c0c489f5de3649459 | 9039324600f488ce75e29c023095f321fac339a2 | /main.py | 73e1476eb80db1616e68b39a9fc6e19965fb906f | [] | no_license | shubham25121999/Intro-Bot | d29af209d4b327f4eb7a5d00ad8ce70b013885de | 24c825700a548f42138c8c361d868728666634e2 | refs/heads/main | 2023-08-13T23:16:57.419155 | 2021-09-15T12:06:01 | 2021-09-15T12:06:01 | 406,742,453 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,272 | py | import discord
from discord.ext import commands
import os
from keep_alive import keep_alive
intents = discord.Intents.default()
intents.members = True
client = commands.Bot(command_prefix="!", intents=intents)
my_secret = os.environ['TOKEN']
@client.event
async def on_ready():
print("We are logged in as {0.user}".format(client))
@client.command()
async def server(ctx):
name = str(ctx.guild.name)
description = str(ctx.guild.description)
owner = str(ctx.guild.owner)
id = str(ctx.guild.id)
region = str(ctx.guild.region)
memberCount = str(ctx.guild.member_count)
icon = str(ctx.guild.icon_url)
embed = discord.Embed(
title = name + " Server Information",
description = description,
color = discord.Color.blue()
)
embed.set_thumbnail(url=icon)
embed.add_field(name="Owner", value=owner, inline=True)
embed.add_field(name="Server ID", value=id, inline=True)
embed.add_field(name="Region", value=region, inline=True)
embed.add_field(name="Member Count", value=memberCount, inline=True)
embed.add_field(name = "GREETINGS!", value = name + " warmly welcomes you!! Hope you enjoy your stay here!!", inline = False)
await ctx.send(embed=embed)
keep_alive()
client.run(my_secret) | [
"shubhamchauhan058@gmail.com"
] | shubhamchauhan058@gmail.com |
57e736dab3fabc755212d3a77678288b744088d2 | 0359deb1a798cb50934612d35df087d489ebe423 | /average_2.py | a08f32e58b484bd8af86e36335ee36ff9ed852ca | [] | no_license | lcdlima/python_projects | 42595f07db74a5adc1ccc05f996d21138ff9ece6 | b571d764902fdcab6fe6250623b079b637929e42 | refs/heads/master | 2021-01-02T07:26:49.817364 | 2020-10-02T20:45:24 | 2020-10-02T20:45:24 | 239,547,474 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 433 | py | # Read 3 values, in this case, variables A, B and C, which are the three grades of a student. Next, calculate the student's average, knowing that grade A has weight 2, grade B has weight 3 and grade C has weight 5. Consider that each grade can range from 0 to 10.0, always to one decimal place.
A = float (input ())
B = float (input ())
C = float (input ())
MEDIA = (A*2 + B*3 + C*5) / 10
print ('MEDIA = {:.1f}'.format (MEDIA)) | [
"leticiacristinaduartelima@Leticias-MacBook-Air.local"
] | leticiacristinaduartelima@Leticias-MacBook-Air.local |
07ed30f499f2a8a1684498705f5d91aafd7471af | 829de50fb2355053e4f142d77eb1e7b6794e25ed | /markus/turtle_from.py | 25812782074d6aee5ac3e577c613997fd7a7fb90 | [] | no_license | cp-helsinge/2020-2 | 9ab25df29ce3a718575bcda667dd158fd202d4f6 | 21249c2f31f644aa4736e4bcc3790eb4a31197ca | refs/heads/master | 2023-01-22T12:33:34.877191 | 2020-11-24T16:52:31 | 2020-11-24T16:52:31 | 291,752,458 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 108 | py | import turtle
from opg4 import sol
turtle.hideturtle()
turtle.speed(0)
sol(50, 75, 100)
turtle.exitonclick() | [
"PythonKursus@f.hoth.dk"
] | PythonKursus@f.hoth.dk |
7bef4d422e98a9753b624259790937db203bcc73 | 7b6ed28d1f1c4966e5dca086ceb3c20ac4beea47 | /run.py | d2a02cf78b47b200e310acce0d9c7f02de17316f | [
"MIT"
] | permissive | snobu/raw-mqtt-to-azure-iot-hub | 818d60e551941dff49079f17884678fb426616e7 | b8013732d880ae734d8f24d1f1e40a0bc29ca2bf | refs/heads/master | 2023-02-09T22:11:01.787118 | 2023-01-28T15:00:20 | 2023-01-28T15:00:20 | 206,485,647 | 4 | 1 | MIT | 2021-11-05T19:17:16 | 2019-09-05T05:55:51 | Python | UTF-8 | Python | false | false | 5,776 | py | #!/usr/bin/env python3
import sys
import ssl
import certifi
import time
import json
import re
import paho.mqtt.client as mqtt
from colorama import init
init()
from colorama import Fore, Back, Style
# Set Device ID, should be the same as in the SAS token
DEVICE_ID = 'RawMqttDevice'
# Generate the SAS token with azure-cli
# https://github.com/azure/azure-iot-cli-extension#installation
# Example:
# az iot hub generate-sas-token \
# --hub-name YOUR_IOT_HUB_NAME \
# --device-id YOUR_DEVICE_ID \
# --key-type primary \
# --duration 3600 \
# --output tsv
try:
f = open("sas.token", "r")
SAS_TOKEN = f.readline().rstrip('\n\r')
except FileNotFoundError:
print('File "sas.token" not found.\nCreate it and place the SAS token on the first line.')
sys.exit(404)
class DirectMethod:
name = None
rid = None
def extract_direct_method(msg):
direct_method = DirectMethod()
# Extract method name and request id
direct_method.name = re.compile('\/POST\/(\w+)').findall(msg.topic)[0]
direct_method.rid = re.compile('\$rid=(\w+)').findall(msg.topic)[0]
return direct_method
# The callback for when the client receives a CONNACK response from the server.
def on_connect(client, userdata, flags, rc):
print("Connected with result code "+str(rc))
# Subscribing in on_connect() means that if we lose the connection and
# reconnect then subscriptions will be renewed.
#client.subscribe("$SYS/#")
print('Subscribing to device specific message topic...')
client.subscribe(f'devices/{DEVICE_ID}/messages/devicebound/#')
print('Subscribing to direct method topic...')
client.subscribe('$iothub/methods/POST/#')
# To respond, the device sends a message with a valid JSON or empty body
# to the topic $iothub/methods/res/{status}/?$rid={request id}.
# In this message, the request ID must match the one in the request message,
# and status must be an integer.
# The callback for when a PUBLISH message is received from the server.
def on_message(client, userdata, msg):
# Handle cloud to device messages
if f'devices/{DEVICE_ID}/messages/devicebound/' in msg.topic:
print(Back.CYAN +
'*** Received Cloud to Device Message:' +
Style.RESET_ALL + '\n' +
f' Topic: {msg.topic}\n' +
f' Payload: {msg.payload}\n' +
f' QoS: {msg.qos}\n')
# Handle direct methods
if '$iothub/methods/' in msg.topic:
direct_method = extract_direct_method(msg)
print(Back.YELLOW +
'*** Received Direct Method invocation:' +
Style.RESET_ALL + '\n' +
f' Topic: {msg.topic}\n' +
f' Method name: {direct_method.name}\n' +
f' Request id: {direct_method.rid}\n')
# Execute direct method logic then reply with status
time.sleep(2)
response_code = 200
response_payload = {
'all_good': True,
'battery': 'good'
}
client.publish(f'$iothub/methods/res/{response_code}/?$rid={direct_method.rid}',
payload=json.dumps(response_payload),
qos=0,
retain=False)
print('Sent direct method response.')
print('Parsing msg.payload -')
try:
p = json.loads(str(msg.payload, 'utf-8'))
payload = json.dumps(p, indent=4)
print(f'Payload: \n{payload}')
except:
print(Fore.RED + 'Unable to deserialize msg.payload as JSON.')
print(Style.RESET_ALL)
def on_publish(client, userdata, result):
print("\nData published.\n")
# Always use MQTT v3.1.1 with Azure IoT Hub.
# MQTT v3.1 won't be able to connect at all (Paho error 3 or 5).
client = mqtt.Client(client_id=DEVICE_ID, clean_session=True, userdata=None, protocol=mqtt.MQTTv311, transport='tcp')
client.on_connect = on_connect
client.on_message = on_message
client.on_publish = on_publish
# Connect to Azure IoT Hub
ca_bundle = certifi.where()
print(f'Using CA bundle provided by certifi, version {certifi.__version__}.')
# If you don't specify your own CA bundle PEM file, Python will
# attempt to use the default CA on the system.
client.tls_set(ca_certs=ca_bundle, tls_version=ssl.PROTOCOL_TLSv1_2)
client.tls_insecure_set(False)
# DEBUG
# print(f'USERNAME: poorlyfundedskynet.azure-devices.net/{DEVICE_ID}/?api-version=2021-04-12')
# print(f'PASSWORD: {SAS_TOKEN}')
client.username_pw_set(username=f'poorlyfundedskynet.azure-devices.net/{DEVICE_ID}/?api-version=2021-04-12', password=SAS_TOKEN)
#client._ssl_context.load_verify_locations(cafile='badssl.crl.pem')
#client._ssl_context.load_verify_locations(cafile='microsoftca4.crl.pem')
#client._ssl_context.verify_flags = ssl.VERIFY_CRL_CHECK_LEAF
client.connect('poorlyfundedskynet.azure-devices.net', 8883, 60)
#client.connect('revoked.badssl.com', 443, 60)
print('------------ TLS session info ----------')
print(client._ssl_context.protocol)
print('----------------------------------------')
# Blocking call that processes network traffic, dispatches callbacks and
# handles reconnecting.
# Other loop*() functions are available that give a threaded interface and a
# manual interface.
#client.loop_forever()
client.loop_start()
SLEEP_DELAY = 10
while True:
print('Publishing message to broker..')
message = {
"DeviceId": f"{DEVICE_ID}",
"Message": "Hey, this is a telemetry message sent to IoT Hub.",
"SentAt": f"{time.strftime('%Y-%m-%d %H:%M:%S')}"
}
json_message= json.dumps(message)
client.publish(f'devices/{DEVICE_ID}/messages/events/',
payload=json_message, qos=1, retain=False)
print(f'Sleeping for {SLEEP_DELAY} seconds.')
time.sleep(SLEEP_DELAY)
| [
"foo@snobu.org"
] | foo@snobu.org |
d41e913021a7c3a702cecbb85707e0cc13298952 | 3754976626969ea80dddf7461492124a980afdf5 | /ch03_polls/polls/urls.py | 4eb5d298c7f69b6fae39e3874f78b685a8f675cf | [] | no_license | sujinkim123/bookmark | 1b9b3e533acd8f1ae9eaa4b561c85b7e75b01e4f | 63c9cd53b355e8acadf29115c8f6debd985cc949 | refs/heads/master | 2022-11-07T04:47:22.272239 | 2020-06-25T19:09:49 | 2020-06-25T19:09:49 | 261,143,804 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 652 | py | from django.urls import path
from . import views
app_name = 'polls' # 앱의 이름공간을 지정
urlpatterns = [
# ex: /polls/
# path('', views.index, name='index'),
path('', views.IndexView.as_view(), name='index'),
# ex: /polls/5/
# path('<int:question_id>/', views.detail, name='detail'),
path('<int:pk>/', views.DetailView.as_view(), name='detail'),
# ex: /polls/5/results/
# path('<int:question_id>/results/', views.results, name='results'),
path('<int:pk>/results/', views.ResultsView.as_view(), name='results'),
# ex: /polls/5/vote/
path('<int:question_id>/vote/', views.vote, name='vote'),
] | [
"alicekim0429@naver.com"
] | alicekim0429@naver.com |
20fb46422e84edef2c7809df4a941b1caec43a31 | 93e8d6dbadd1dff81169c9c1ede11b604c8614f0 | /Prob_2.py | 50b8717b062c85188b97e3504a240deac50ee203 | [] | no_license | nabendu96/assignment_4 | 2f40449163da91df11689e42a6fe867093b173af | c3faaa061dca4ea8bcd5863642f9cb3992d12133 | refs/heads/master | 2022-09-18T17:19:07.196021 | 2020-06-02T18:41:52 | 2020-06-02T18:41:52 | 268,880,018 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 668 | py | # -*- coding: utf-8 -*-
"""
Created on Sun May 24 07:55:01 2020
@author: nabendu
"""
import numpy as np
import matplotlib.pyplot as plt
n=10000
numpy_random=np.random.rand(n) #generating uniformly distributed random numbers between 0 and 1 using numpy.random.rand()
plt.hist(numpy_random,range=(0.0,1.0),density=True,label=r'density histrogram of numpy randoms') #density histogram of the generated numbers
plt.plot(np.linspace(0,1,n),np.ones(n),lw=3,color='r',label=r'uniform pdf') #plotting uniform pdf
plt.xlabel(r'$x_i$',fontsize=20)
plt.ylabel(r'PDF',fontsize=20)
plt.legend()
plt.show()
| [
"noreply@github.com"
] | nabendu96.noreply@github.com |
01950a3e9d1df0514903d2214dca8d8a31a4269a | 72e598db1227386679c6b67e5244bbdf0db38574 | /index.py | 1c9f7bd93fbccadd2cba3a85f100be3b2c218e86 | [] | no_license | Neetika23/Liver-Patient-Prediction | 3c4e49e39395908a2db2ddca2d64564e46d5b888 | 8ad09470b3e538b517e3f6bc3c5c013bcf124b2b | refs/heads/master | 2022-11-18T16:03:32.181911 | 2020-07-01T08:28:17 | 2020-07-01T08:28:17 | 276,318,014 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,948 | py | # Necessary Imports
from sklearn.metrics import accuracy_score()
form sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
form sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier as KNN
from sklearn.ensemble import VotingClassifier
# Load the dataset(link is in the READ_ME)
# splitting
X_train, X_test, y_train, y_test = train_test_split(X,y,test_size=0.3,random_state=SEED)
# Set seed for reproducibility
SEED=1
# Instantiate lr
lr = LogisticRegression(random_state=SEED)
# Instantiate knn
knn = KNeighborsClassifier(n_neighbors=27)
# Instantiate dt
dt = DecisionTreeClassifier(min_samples_leaf=0.13, random_state=SEED)
# Define the list classifiers
classifiers = [('Logistic Regression', lr), ('K Nearest Neighbours', knn), ('Classification Tree', dt)]
# Iterate over the pre-defined list of classifiers
for clf_name, clf in classifiers:
# Fit clf to the training set
clf.fit(X_train,y_train)
# Predict y_pred
y_pred = clf.predict(X_test)
# Calculate accuracy
accuracy = accuracy_score(y_test,y_pred)
# Evaluate clf's accuracy on the test set
print('{:s} : {:.3f}'.format(clf_name, accuracy))
# Logistic Regression : 0.747
# K Nearest Neighbours : 0.724
# Classification Tree : 0.730
# Import VotingClassifier from sklearn.ensemble
from sklearn.ensemble import VotingClassifier
# Instantiate a VotingClassifier vc
vc = VotingClassifier(estimators=classifiers)
# Fit vc to the training set
vc.fit(X_train,y_train)
# Evaluate the test set predictions
y_pred = vc.predict(X_test)
# Calculate accuracy score
accuracy = accuracy_score(y_test,y_pred)
print('Voting Classifier: {:.3f}'.format(accuracy))
# Voting Classifier : 0.753
# the voting classifier achieves a test set accuracy of 75.3%. This value is greater than that achieved by LogisticRegression.
| [
"noreply@github.com"
] | Neetika23.noreply@github.com |
76f52ded389704cc1f03f9da619f641d2905a688 | 28be9f00d885ec8db4ad0bf9b5ae024f66739723 | /Circle Turtle.py | 213c8a084555cae43183c807f9dc0887f6407709 | [] | no_license | Annahariprasad/Python-Programs | 5e6be33e487e6bae9de3d12d13e22af7d697e7df | 8f025dc687eb68b0feeaf6c729ae38cb098e9da3 | refs/heads/main | 2023-03-11T12:38:06.466675 | 2021-03-04T07:16:43 | 2021-03-04T07:16:43 | 334,987,047 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 355 | py | import turtle,math
def circle(t,l,a,n):
for i in range(n):
t.fd(l)
t.lt(a)
t = turtle.Turtle()
t.color('red','blue')
t.pensize(2)
r = int(input('Enter radius : '))
n = int(input('Enter no. of parts : '))
cir = (2*math.pi*r)
l = cir/n
a = 360/n
t.begin_fill()
circle(t,l,a,n)
t.end_fill()
turtle.mainloop()
| [
"noreply@github.com"
] | Annahariprasad.noreply@github.com |
28298fdfa04aed081723678be28c5cd0d2073613 | 8db482694e8574a7a6481eb0862c3992f91b76ee | /smiconverter.py | b572d6305758382afcc3d024806053c5d521a4ad | [] | no_license | mlacayoemery/pointpatternanalyst | e0b6d16100634811f513a3ed019085fc7ffae208 | 3c2fad93818a93ada0efde7518d1fc1a5d5e1be3 | refs/heads/master | 2021-03-12T23:36:26.207083 | 2018-06-20T06:48:53 | 2018-06-20T06:48:53 | 32,144,311 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 657 | py | import sys
import os
inFileName = sys.argv[1]
outFileName = sys.argv[2]
inFile = open(inFileName)
outFile = open(outFileName, 'w')
line = ""
while line != "##":
line = inFile.readline().strip()
#header
inFile.readline()
#message
start = int(inFile.readline().strip().split("\t")[0])
outFile.write("\n" * 18)
outFile.write("\nTimestamp\tStimuliName\tAoiNames\tGazePointX\tGazePointY\tFixationDuration")
for line in inFile:
line = line.strip().split("\t")
outFile.write("\n"+"\t".join([line[0],line[11],line[7],line[3],line[4],str(int(line[0])-start)]))
start = int(line[0])
inFile.close()
outFile.close()
| [
"mlacayoemery@gmail.com"
] | mlacayoemery@gmail.com |
be642c1f6326fcf18c8e37129bcb078a5b410449 | 9fe65faced0555d02114f1f54fc008c7e1da21a4 | /hw-AES-Protected/TVLA.py | d1f748c050bc4f36495c611531c8ef09333781a3 | [] | no_license | yuan9/Boostrap-TVLA | 97baa115dd82d3b49b7c145edbaa23cdf8e06e86 | ef40ac2551bc0a2effc6b097997b2bb15f64178a | refs/heads/master | 2023-03-16T04:49:16.074787 | 2020-04-16T07:05:03 | 2020-04-16T07:05:03 | 215,585,612 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,531 | py | from __future__ import absolute_import, division, print_function, with_statement
import numpy as np
import os
import re
import struct
import functools
import time
import matplotlib
import matplotlib.pyplot as plt
#import pandas as pd
#import tensorflow as tf
#import keras
#import aes_internals as aise
import dwdb_reader
import numpy as np
import array
from scipy import stats
from scipy.stats import chi2
from scipy.stats import chi2_contingency
from scipy.stats import norm
#import seaborn as sns
import random
import util.dwdb_reader as io
import util.tests as tests
tracenum = 50000
#Yuan: Note that the basic range is from (40,180)
sample_start = 40
sample_end = 180
#sample_start = 0
#sample_end = 3
dsr = io.dwdb_reader('/Users/yaoyuan/Desktop/Boostrap-TVLA/hw-AES-Protected/RawTraces_new.dwdb', '/Users/yaoyuan/Desktop/Boostrap-TVLA/hw-AES-Protected/')
data_batch, meta_batch = dsr.read_batch(tracenum, sample_start, sample_end)
data_np = np.asarray(data_batch)
print(len(data_np[0]))
#processing of classifiers
classifiers = [s.split('=')[1] for m in meta_batch for s in m['other'].split() if s.startswith('s=')]
classifiers= np.asarray(classifiers) # 2D numpy array of classifier
print("finish readin traces")
t_evolution = []
# initializing the rand and fix dataset
rand = []
fix = []
for i in range(0, tracenum):
if classifiers[i] == '1':
rand.append(data_np[i])
if classifiers[i] == '0':
fix.append(data_np[i])
#print("rand:{}".format(rand))
#print("fix:{}".format(fix))
rand_np = np.asarray(rand)
rand_trans = rand_np.T
#print (rand_trans)
fix_np = np.asarray(fix)
fix_trans = fix_np.T
#print (fix_trans)
# t-statistics
t_value =[]
for i in range(0, sample_end - sample_start):
t, p = stats.ttest_ind(rand_trans[i], fix_trans[i], equal_var = False)
t_value.append(abs(t))
# data = np.ones_like(data_np[0])
# data_label = np.ones_like(classifiers[0])
# print (data)
# print (data_label)
#print (classifiers)
#print (data_batch[1])
font = {'family' : 'normal', 'size' : 30}
plt.rc('font', **font)
plt.rcParams['figure.facecolor'] = 'white'
plt.xlabel('Time');
plt.ylabel('t-statistics');
print(t_value)
plt.plot(t_value,linewidth=2, linestyle='-', color = 'navy')
plt.axhline(y=4.5, color='r', linewidth=2)
plt.ylim(top = 10 )
# plt.plot(t_evolution[0])
# plt.plot(t_evolution[1])
# plt.plot(t_evolution[2])
# plt.axhline(y=4.5, color='r')
# plt.axvline(x=83, color='b')
# #plt.legend(['10000','20000', '30000'], loc='upper left')
# plt.legend(legend, loc='upper left')
plt.show()
| [
"yuan9@vt.edu"
] | yuan9@vt.edu |
10e1c06dfbfef952c8b3cecace20bcce32eb19a6 | 0dd5f2e1ce43439183e3ee0b9014973ad3b5de90 | /apps/funcionarios/models.py | 8383e13f4fe627ab83630ae4ebece978bdf26478 | [] | no_license | giusepper11/Gestao_RH | 0c4b997f6d0a589c79874cc490ee28cf16f12832 | 58694635e7b7c5b202319ff47b0cc00702da91b1 | refs/heads/master | 2020-04-10T20:22:41.063922 | 2019-01-23T02:59:04 | 2019-01-23T02:59:04 | 161,265,620 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 531 | py | from django.contrib.auth.models import User
from django.db import models
from apps.departamentos.models import Departamento
from apps.empresas.models import Empresa
# Create your models here.
class Funcionario(models.Model):
nome = models.CharField(max_length=100)
user = models.OneToOneField(User, on_delete=models.PROTECT)
departamentos = models.ManyToManyField(Departamento)
empresa = models.ForeignKey(Empresa, on_delete=models.PROTECT, null=True, blank=True)
def __str__(self):
return self.nome
| [
"giusepper11@gmail.com"
] | giusepper11@gmail.com |
636dcdbc6cfa15bf949d0c6be7d420e7d67a25cb | 80b7f2a10506f70477d8720e229d7530da2eff5d | /ixnetwork_restpy/testplatform/sessions/ixnetwork/traffic/trafficitem/configelement/stack/fabricpath_template.py | 94c5673175e4b1c21d8fc7ffb4ec27e628dacbc8 | [
"MIT"
] | permissive | OpenIxia/ixnetwork_restpy | 00fdc305901aa7e4b26e4000b133655e2d0e346a | c8ecc779421bffbc27c906c1ea51af3756d83398 | refs/heads/master | 2023-08-10T02:21:38.207252 | 2023-07-19T14:14:57 | 2023-07-19T14:14:57 | 174,170,555 | 26 | 16 | MIT | 2023-02-02T07:02:43 | 2019-03-06T15:27:20 | Python | UTF-8 | Python | false | false | 10,057 | py | from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
class Fabricpath(Base):
__slots__ = ()
_SDM_NAME = "fabricpath"
_SDM_ATT_MAP = {
"OuterdestinationAddressMacformat": "fabricpath.fabricpathHeader.outerdestinationAddress.macformat-1",
"DestaddEndnodeid1": "fabricpath.fabricpathHeader.outerdestinationAddress.destadd.endnodeid1-2",
"DestaddUbit": "fabricpath.fabricpathHeader.outerdestinationAddress.destadd.ubit-3",
"DestaddIbit": "fabricpath.fabricpathHeader.outerdestinationAddress.destadd.ibit-4",
"DestaddEndnodeid2": "fabricpath.fabricpathHeader.outerdestinationAddress.destadd.endnodeid2-5",
"DestaddResv": "fabricpath.fabricpathHeader.outerdestinationAddress.destadd.resv-6",
"DestaddOoo_dl_bit": "fabricpath.fabricpathHeader.outerdestinationAddress.destadd.ooo_dl_bit-7",
"DestaddSwitchid": "fabricpath.fabricpathHeader.outerdestinationAddress.destadd.switchid-8",
"DestaddSubswitchid": "fabricpath.fabricpathHeader.outerdestinationAddress.destadd.subswitchid-9",
"DestaddPortid": "fabricpath.fabricpathHeader.outerdestinationAddress.destadd.portid-10",
"OutersrcAddressMacformat": "fabricpath.fabricpathHeader.outersrcAddress.macformat-11",
"SrcaddEndnodeid1": "fabricpath.fabricpathHeader.outersrcAddress.srcadd.endnodeid1-12",
"SrcaddUbit": "fabricpath.fabricpathHeader.outersrcAddress.srcadd.ubit-13",
"SrcaddIbit": "fabricpath.fabricpathHeader.outersrcAddress.srcadd.ibit-14",
"SrcaddEndnodeid2": "fabricpath.fabricpathHeader.outersrcAddress.srcadd.endnodeid2-15",
"SrcaddResv": "fabricpath.fabricpathHeader.outersrcAddress.srcadd.resv-16",
"SrcaddOoo_dl_bit": "fabricpath.fabricpathHeader.outersrcAddress.srcadd.ooo_dl_bit-17",
"SrcaddSwitchid": "fabricpath.fabricpathHeader.outersrcAddress.srcadd.switchid-18",
"SrcaddSubswitchid": "fabricpath.fabricpathHeader.outersrcAddress.srcadd.subswitchid-19",
"SrcaddPortid": "fabricpath.fabricpathHeader.outersrcAddress.srcadd.portid-20",
"FptagEtherType": "fabricpath.fabricpathHeader.fptag.etherType-21",
"FptagFtag": "fabricpath.fabricpathHeader.fptag.ftag-22",
"FptagTtl": "fabricpath.fabricpathHeader.fptag.ttl-23",
}
def __init__(self, parent, list_op=False):
super(Fabricpath, self).__init__(parent, list_op)
@property
def OuterdestinationAddressMacformat(self):
"""
Display Name: DA Mac Format
Default Value: 00:00:00:00:00:00
Value Format: mAC
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(
self,
self._get_attribute(self._SDM_ATT_MAP["OuterdestinationAddressMacformat"]),
)
@property
def DestaddEndnodeid1(self):
"""
Display Name: EndNodeId-[5:0]
Default Value: 0
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(
self, self._get_attribute(self._SDM_ATT_MAP["DestaddEndnodeid1"])
)
@property
def DestaddUbit(self):
"""
Display Name: U/L bit
Default Value: 1
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP["DestaddUbit"]))
@property
def DestaddIbit(self):
"""
Display Name: I/G Bit
Default Value: 0
Value Format: decimal
Available enum values: Individual Address, 0, Group Address, 1
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP["DestaddIbit"]))
@property
def DestaddEndnodeid2(self):
"""
Display Name: EndNodeId-[7:6]
Default Value: 0
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(
self, self._get_attribute(self._SDM_ATT_MAP["DestaddEndnodeid2"])
)
@property
def DestaddResv(self):
"""
Display Name: RSVD
Default Value: 0
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP["DestaddResv"]))
@property
def DestaddOoo_dl_bit(self):
"""
Display Name: OOO/DL Bit
Default Value: 0
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(
self, self._get_attribute(self._SDM_ATT_MAP["DestaddOoo_dl_bit"])
)
@property
def DestaddSwitchid(self):
"""
Display Name: Switch ID
Default Value: 1
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(
self, self._get_attribute(self._SDM_ATT_MAP["DestaddSwitchid"])
)
@property
def DestaddSubswitchid(self):
"""
Display Name: Sub Switch ID
Default Value: 0
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(
self, self._get_attribute(self._SDM_ATT_MAP["DestaddSubswitchid"])
)
@property
def DestaddPortid(self):
"""
Display Name: Port ID
Default Value: 1
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP["DestaddPortid"]))
@property
def OutersrcAddressMacformat(self):
"""
Display Name: SA Mac Format
Default Value: 00:00:00:00:00:00
Value Format: mAC
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(
self, self._get_attribute(self._SDM_ATT_MAP["OutersrcAddressMacformat"])
)
@property
def SrcaddEndnodeid1(self):
"""
Display Name: EndNodeId-[5:0]
Default Value: 0
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(
self, self._get_attribute(self._SDM_ATT_MAP["SrcaddEndnodeid1"])
)
@property
def SrcaddUbit(self):
"""
Display Name: U/L bit
Default Value: 1
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP["SrcaddUbit"]))
@property
def SrcaddIbit(self):
"""
Display Name: I/G Bit
Default Value: 0
Value Format: decimal
Available enum values: IndividualAddress, 0, GroupAddress, 1
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP["SrcaddIbit"]))
@property
def SrcaddEndnodeid2(self):
"""
Display Name: EndNodeId-[7:6]
Default Value: 0
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(
self, self._get_attribute(self._SDM_ATT_MAP["SrcaddEndnodeid2"])
)
@property
def SrcaddResv(self):
"""
Display Name: RSVD
Default Value: 0
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP["SrcaddResv"]))
@property
def SrcaddOoo_dl_bit(self):
"""
Display Name: OOO/DL Bit
Default Value: 0
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(
self, self._get_attribute(self._SDM_ATT_MAP["SrcaddOoo_dl_bit"])
)
@property
def SrcaddSwitchid(self):
"""
Display Name: Switch ID
Default Value: 1
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(
self, self._get_attribute(self._SDM_ATT_MAP["SrcaddSwitchid"])
)
@property
def SrcaddSubswitchid(self):
"""
Display Name: Sub Switch ID
Default Value: 0
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(
self, self._get_attribute(self._SDM_ATT_MAP["SrcaddSubswitchid"])
)
@property
def SrcaddPortid(self):
"""
Display Name: Port ID
Default Value: 1
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP["SrcaddPortid"]))
@property
def FptagEtherType(self):
"""
Display Name: EtherType
Default Value: 0x8903
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(
self, self._get_attribute(self._SDM_ATT_MAP["FptagEtherType"])
)
@property
def FptagFtag(self):
"""
Display Name: FTag
Default Value: 1
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP["FptagFtag"]))
@property
def FptagTtl(self):
"""
Display Name: TTL
Default Value: 32
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP["FptagTtl"]))
def add(self):
return self._create(self._map_locals(self._SDM_ATT_MAP, locals()))
| [
"andy.balogh@keysight.com"
] | andy.balogh@keysight.com |
dcafe79375adc524d12bb7e48c9d8ba0ff2ce6c1 | ffae452a114d892e40ecba0302f82745492342c3 | /blog/migrations/0012_auto_20180721_1913.py | 55ad4a2754544a949c919e44ea6a44c1ac571d67 | [] | no_license | Shoyee/blogproject | 6f26066fe75628c30620c3111687c9fbcdbac2af | 5e2787123d3362a497b811ad930fc018ee6e9514 | refs/heads/master | 2020-03-23T14:41:06.886517 | 2018-08-03T12:23:13 | 2018-08-03T12:23:13 | 141,690,725 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 842 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2018-07-21 11:13
from __future__ import unicode_literals
import datetime
from django.db import migrations, models
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('blog', '0011_auto_20180721_1844'),
]
operations = [
migrations.AlterField(
model_name='post',
name='created_time',
field=models.DateTimeField(default=datetime.datetime(2018, 7, 21, 11, 13, 40, 135629, tzinfo=utc), help_text='创建时间'),
),
migrations.AlterField(
model_name='post',
name='modified_time',
field=models.DateTimeField(default=datetime.datetime(2018, 7, 21, 11, 13, 40, 135668, tzinfo=utc), help_text='最后修改时间'),
),
]
| [
"shuoyu_2008@163.com"
] | shuoyu_2008@163.com |
c892f85ea5a0f83de3f24946486aae49a94a3bdb | 14dc9cabea198c64b83c8d08e41d22b34bd56ac9 | /woran/settings/production.py | 5e4b8ad8b4a6aea41da3d881c5dfb89018e9df66 | [] | no_license | Ibrokola/Woran | ae62501dd32f1137128de12e3de5c9417808c678 | 31c32ebfb106c0218e62fe4133db46378d17578e | refs/heads/master | 2022-12-11T07:28:05.398690 | 2019-10-18T19:46:07 | 2019-10-18T19:46:07 | 93,820,295 | 0 | 0 | null | 2022-12-08T00:00:31 | 2017-06-09T04:56:57 | CSS | UTF-8 | Python | false | false | 4,837 | py | import os
from woran.aws.conf import *
from decouple import config
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = config('SECRET_KEY')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
ALLOWED_HOSTS = ['woran.herokuapp.com']
FULL_DOMAIN_NAME = 'http://www.woran.com'
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites',
'allauth',
'allauth.account',
'allauth.socialaccount',
'crispy_forms',
'storages',
'videos',
'analytics',
'billing',
'comments',
'notifications',
'accounts',
'series',
'categories',
'search',
]
SITE_ID = 1
AUTHENTICATION_BACKENDS = (
# Needed to login by username in Django admin, regardless of `allauth`
'django.contrib.auth.backends.ModelBackend',
# `allauth` specific authentication methods, such as login by e-mail
'allauth.account.auth_backends.AuthenticationBackend',
)
ACCOUNT_AUTHENTICATION_METHOD = 'username_email'
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_EMAIL_CONFIRMATION_EXPIRE_DAYS = 7
ACCOUNT_UNIQUE_EMAIL = True
ACCOUNT_EMAIL_VERIFICATION = 'none'
SOCIALACCOUNT_EMAIL_VERIFICATION = ACCOUNT_EMAIL_VERIFICATION
LOGIN_REDIRECT_URL = '/'
CRISPY_TEMPLATE_PACK = "bootstrap3"
AUTH_USER_MODEL = 'accounts.MyUser'
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'woran.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'woran.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
import dj_database_url
db_from_env = dj_database_url.config()
DATABASES['default'].update(db_from_env)
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, "static"),
)
STATIC_ROOT = os.path.join(BASE_DIR, "live-static", "static-root")
MEDIA_URL = "/media/"
MEDIA_ROOT = os.path.join(BASE_DIR, "live-static", "media-root")
SOCIALACCOUNT_PROVIDERS = {
'facebook': {
'METHOD': 'oauth2',
'SCOPE': ['email', 'public_profile', 'user_friends'],
'AUTH_PARAMS': {'auth_type': 'reauthenticate'},
'INIT_PARAMS': {'cookie': True},
'FIELDS': [
'id',
'email',
'name',
'first_name',
'last_name',
'verified',
'locale',
'timezone',
'link',
'gender',
'updated_time',
],
'EXCHANGE_TOKEN': True,
'LOCALE_FUNC': lambda request: 'en_US',
'VERIFIED_EMAIL': True,
'VERSION': 'v2.4',
}
} | [
"babskolawole@gmail.com"
] | babskolawole@gmail.com |
7d943eea910d9da73e0ee05cfa01110474597de7 | 875ffe8a76a80c9fe221f6c750cd1051115d190d | /plugins/lib/scope_data/__init__.py | cef03f564b2c51e5aaacd24c8f3dbe5e02248f09 | [
"MIT"
] | permissive | templarundead/PackageDevPlus | 5f74434fe46b94557b64dbe4420520fb15099111 | 6b6aeb803784a6060624fb17999f87fb1cab5ab0 | refs/heads/master | 2023-05-11T17:44:39.436195 | 2023-05-08T15:36:38 | 2023-05-08T15:36:38 | 128,805,121 | 0 | 0 | MIT | 2023-05-10T04:54:16 | 2018-04-09T16:52:46 | Python | UTF-8 | Python | false | false | 3,835 | py | import logging
from .data import DATA
__all__ = ["COMPILED_NODES", "COMPILED_HEADS", "completions_from_prefix"]
logger = logging.getLogger(__name__)
class NodeSet(set):
"""
Methods:
* find(name)
* find_all(name)
* to_completion()
"""
def find(self, name):
for node in self:
if node == name:
return node
return None
def find_all(self, name):
res = NodeSet()
for node in self:
if node == name:
res.add(node)
return res
def to_completion(self):
# return zip(self, self)
return list(sorted((n.name + "\tconvention", n.name) for n in self))
class ScopeNode(object):
"""
Attributes:
* name
* parent
* children
* level | unused
Methods:
* add_child(child)
* tree()
"""
def __init__(self, name, parent=None, children=None):
self.name = name
self.parent = parent
self.children = children or NodeSet()
self.level = parent and parent.level + 1 or 1
def __hash__(self):
return hash(str(self))
def add_child(self, child):
self.children.add(child)
def tree(self):
if self.parent:
return self.name + '.' + self.parent.tree()
else:
return self.name
def __eq__(self, other):
if isinstance(other, str):
return str(self) == other
elif isinstance(other, ScopeNode):
return (self.name == other.name
and self.parent == other.parent
and self.children == other.children)
def __str__(self):
return self.name
def __repr__(self):
ret = self.name
if self.children:
ret += " {%s}" % ' '.join(map(repr, self.children))
return ret
#######################################
# output values
COMPILED_NODES = NodeSet()
COMPILED_HEADS = NodeSet()
# parse the DATA string
lines = DATA.split("\n")
# some variables
indent = " " * 4
indent_level = 0
indents = {}
# process lines
# Note: expects sane indentation (such as only indent by 1 `indent` at a time)
for line in lines:
if line.isspace() or not len(line):
# skip blank lines
continue
if line.startswith(indent * (indent_level + 1)):
# indent increased
indent_level += 1
if not line.startswith(indent * indent_level):
# indent decreased
for level in range(indent_level - 1, 0, -1):
if line.startswith(indent * level):
indent_level = level
break
parent = indents[indent_level - 1] if indent_level - 1 in indents else None
node = ScopeNode(line.strip(), parent)
indents[indent_level] = node
if parent:
parent.add_child(node)
else:
COMPILED_HEADS.add(node)
COMPILED_NODES.add(node)
# Tokenize the current selector
def completions_from_prefix(prefix):
"""Build completions from a given scope prefix (including dots)."""
tokens = prefix.split(".")
if len(tokens) <= 1:
# No work to be done here, just return the heads
return COMPILED_HEADS.to_completion()
# Browse the nodes and their children
nodes = COMPILED_HEADS
for i, token in enumerate(tokens[:-1]):
node = nodes.find(token)
if not node:
logger.info("`%s` not found in scope naming conventions", '.'.join(tokens[:i + 1]))
break
nodes = node.children
if not nodes:
logger.info("No nodes available in scope naming conventions after `%s`",
'.'.join(tokens[:-1]))
break
else:
# Offer to complete from conventions or base scope
return nodes.to_completion()
return []
| [
"fichtefoll2@googlemail.com"
] | fichtefoll2@googlemail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.