blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
281
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
57
| license_type
stringclasses 2
values | repo_name
stringlengths 6
116
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 313
values | visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 18.2k
668M
⌀ | star_events_count
int64 0
102k
| fork_events_count
int64 0
38.2k
| gha_license_id
stringclasses 17
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 107
values | src_encoding
stringclasses 20
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.02M
| extension
stringclasses 78
values | content
stringlengths 2
6.02M
| authors
listlengths 1
1
| author
stringlengths 0
175
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
54b0660369b4623c61a1343991020fb29be5e3b5
|
9b93b28428865e4d800e40ed8922b7ade5f92273
|
/multiprocess_bucket.py
|
2567ef2116692013eecb934024619def019a675f
|
[] |
no_license
|
Minjire/s3-bucket
|
c7701f485bcca86366a3816427bd605c7ce03fc4
|
6bb6b02c53b85ff12f59a220d0f4050d192abfa0
|
refs/heads/master
| 2023-02-05T18:56:52.934855
| 2020-12-28T06:28:51
| 2020-12-28T06:28:51
| 286,675,594
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,475
|
py
|
import boto3
from boto3 import session
from botocore.client import Config
import multiprocessing
import logging
from pathlib import Path
ACCESS_ID = ''
ACCESS_KEY = ''
REGION = ''
URL = 'https://sfo2.digitaloceanspaces.com'
PATH = ''
PATHS = []
s3 = boto3.resource('s3', region_name=REGION, endpoint_url=URL, aws_access_key_id=ACCESS_ID,
aws_secret_access_key=ACCESS_KEY)
my_bucket = s3.Bucket('capital')
# my_bucket = s3.Bucket('ai-images')
def get_paths():
for s3_object in my_bucket.objects.all():
filename = s3_object.key
if 'var/www/capitalalliance/public/files/vehicles/vehicle_pics' in filename:
# append to array
PATHS.append(filename)
else:
pass
def download_all_files(path):
print(path)
dir = path.rsplit('/', 3)[-3]
dir += '/'
temp = path.rsplit('/', 1)[-1]
try:
print(f"\ndownloading...{path}\n")
Path(PATH + dir).mkdir(parents=True, exist_ok=True)
my_bucket.download_file(path, PATH + dir + temp)
print("Success\n")
except Exception as e:
logging.error(f'FilePath: {path}', exc_info=True)
print("Error encountered: %s" % e)
if __name__ == '__main__':
get_paths()
print(PATHS)
p = multiprocessing.Pool(multiprocessing.cpu_count())
result = p.map(download_all_files, PATHS)
print(result)
p.close()
p.join()
|
[
"noreply@github.com"
] |
noreply@github.com
|
cb78188c2f44df68d2563faf03f37a360a8ec3db
|
494955a18b5f7d25c275c4d454b4ae9c8db3976e
|
/Student-Information-Chatbot-System/mappings.py
|
54737b424e55d4c838a9c25df4b732f68fd9c139
|
[] |
no_license
|
Prashanth8143/ChatBot
|
ee343b547de3da7c3a0ef34c64c9d06fa50a3f67
|
7461099763b533f87b6c85ad6464d9245c581c27
|
refs/heads/main
| 2023-08-10T15:44:22.845222
| 2021-09-27T07:46:50
| 2021-09-27T07:46:50
| 410,763,895
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 503
|
py
|
# -*- coding: utf-8 -*-
map_keys = {
'one':1,
'first':1,
'two':2,
'second':2,
'three':3,
'third':3,
'four':4,
'fourth':4,
'five':5,
'fifth':5,
'six':6,
'sixth':6,
'seven':7,
'seventh':7,
'eight':8,
'eighth':8,
'nine':9,
'ninth':9,
'zero':0,
'zeroth':0
}
if('2' in map_keys.keys()):
print(map_keys['five'])
|
[
"noreply@github.com"
] |
noreply@github.com
|
95085f0f6148d3aeba523e3cba43e37d56a4cc60
|
908336e941d7d95d2ff168f8d132bf5656b87752
|
/datasets/weibo_senti_100k/parse.py
|
ad104fde40447e6e0371f6df42df4903898a6e89
|
[] |
no_license
|
cyy0523xc/ChineseNlpCorpus
|
364437b5662bc0a138281afc817b375c50a7fecf
|
a027225e9caf963d0d4e38d96b402ce515505850
|
refs/heads/master
| 2020-03-22T05:45:09.343135
| 2018-12-18T02:14:08
| 2018-12-18T02:14:08
| 139,587,654
| 2
| 0
| null | 2018-07-03T13:29:28
| 2018-07-03T13:29:27
| null |
UTF-8
|
Python
| false
| false
| 612
|
py
|
# -*- coding: utf-8 -*-
#
#
# Author: alex
# Created Time: 2018年07月03日 星期二 21时51分54秒
import csv
with open('./weibo_senti_100k.csv', encoding='utf8') as r, \
open('../../format_datasets/weibo_senti_100k/weibo_senti_100k_pos.txt', 'w', encoding='utf8') as pos, \
open('../../format_datasets/weibo_senti_100k/weibo_senti_100k_neg.txt', 'w', encoding='utf8') as neg:
for row in csv.DictReader(r):
content = row['review'].replace("\n", ' ').strip() + "\n"
if row['label'] == '1':
pos.write(content)
else:
neg.write(content)
print('ok')
|
[
"cyy0523xc@gmail.com"
] |
cyy0523xc@gmail.com
|
8d665015be357a2f1bb710b9ce38053140191537
|
a9eac49babafe2b38b331bf166b9d590dc398fa0
|
/ex5.py
|
feda93de50f3c2f9e6b7c30a1296410d25c2371a
|
[] |
no_license
|
codedsun/LearnPythonTheHardWay
|
7bb07af8ae4a64d96c43c17de6040cfc31be79d7
|
bba6c046b690ad2172872f64defe6cc5a5bb0ca7
|
refs/heads/master
| 2021-04-03T10:10:24.477405
| 2018-03-20T10:37:54
| 2018-03-20T10:37:54
| 124,868,279
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 261
|
py
|
my_name = 'Suneet'
my_age = 20 #this is good
my_height = '5.9' #feet
my_weight = 50
my_eyes = "Black"
my_teeth = "White"
my_hair = "White" #sounds wierd
print ("Lets talk about %s"%my_name)
print ("He's $s tall"% my_height)
print ("He's $s heavy" $ my_weight)
|
[
"suneetbond91@gmail.com"
] |
suneetbond91@gmail.com
|
173e55ba70e34759a282ab0079f115c666248ba0
|
d0cfc5e43170208ce5680a9bd6e39acfc2501470
|
/networks.py
|
59e1dbcac6964609cc9d946444d7346b9d139ea7
|
[] |
no_license
|
yzes/pix2pix-pytorch-master
|
d60a836d9a537b1d189104177a690dce2b719458
|
96f3192fe022f138de92e1cc29623cb4362c2f9a
|
refs/heads/master
| 2022-12-19T08:59:42.464980
| 2020-09-29T08:19:55
| 2020-09-29T08:19:55
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,810
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @File : networks.py
# @Author: Jehovah
# @Date : 18-6-4
# @Desc :
import utils, torch, time, os, pickle
import numpy as np
import torch.nn as nn
import torch.optim as optim
from torch.autograd import Variable
class Generator(nn.Module):
def __init__(self, input_nc, output_nc, ngf=64):
super(Generator, self).__init__()
#256*256
self.en1 = nn.Sequential(
nn.Conv2d(input_nc, ngf, kernel_size=4, stride=2, padding=1),
nn.LeakyReLU(0.2, True)
)
self.en2 = nn.Sequential(
nn.Conv2d(ngf, ngf*2, kernel_size=4, stride=2, padding=1),
nn.BatchNorm2d(ngf*2),
nn.LeakyReLU(0.2, True)
)
self.en3 = nn.Sequential(
nn.Conv2d(ngf*2, ngf * 4, kernel_size=4, stride=2, padding=1),
nn.BatchNorm2d(ngf * 4),
nn.LeakyReLU(0.2, True)
)
self.en4 = nn.Sequential(
nn.Conv2d(ngf * 4, ngf * 8, kernel_size=4, stride=2, padding=1),
nn.BatchNorm2d(ngf * 8),
nn.LeakyReLU(0.2, True)
)
self.en5 = nn.Sequential(
nn.Conv2d(ngf * 8, ngf * 8, kernel_size=4, stride=2, padding=1),
nn.BatchNorm2d(ngf * 8),
nn.LeakyReLU(0.2, True)
)
self.en6 = nn.Sequential(
nn.Conv2d(ngf * 8, ngf * 8, kernel_size=4, stride=2, padding=1),
nn.BatchNorm2d(ngf * 8),
nn.LeakyReLU(0.2, True)
)
self.en7 = nn.Sequential(
nn.Conv2d(ngf * 8, ngf * 8, kernel_size=4, stride=2, padding=1),
nn.BatchNorm2d(ngf * 8),
nn.LeakyReLU(0.2, True)
)
self.en8 = nn.Sequential(
nn.Conv2d(ngf * 8, ngf * 8, kernel_size=4, stride=2, padding=1),
nn.ReLU(True)
)
self.de1 = nn.Sequential(
nn.ConvTranspose2d(ngf * 8,ngf * 8, kernel_size=4, stride=2, padding=1),
nn.BatchNorm2d(ngf * 8),
# nn.Dropout(0.5),
nn.ReLU(True)
)
self.de2 = nn.Sequential(
nn.ConvTranspose2d(ngf * 8 * 2, ngf * 8, kernel_size=4, stride=2, padding=1),
nn.BatchNorm2d(ngf * 8),
nn.Dropout(0.5),
nn.ReLU(True)
)
self.de3 = nn.Sequential(
nn.ConvTranspose2d(ngf * 8 * 2, ngf * 8, kernel_size=4, stride=2, padding=1),
nn.BatchNorm2d(ngf * 8),
nn.Dropout(0.5),
nn.ReLU(True)
)
self.de4 = nn.Sequential(
nn.ConvTranspose2d(ngf * 8 * 2, ngf * 8, kernel_size=4, stride=2, padding=1),
nn.BatchNorm2d(ngf * 8),
nn.Dropout(0.5),
nn.ReLU(True)
)
self.de5 = nn.Sequential(
nn.ConvTranspose2d(ngf * 8 * 2, ngf * 4, kernel_size=4, stride=2, padding=1),
nn.BatchNorm2d(ngf * 4),
nn.ReLU(True)
)
self.de6 = nn.Sequential(
nn.ConvTranspose2d(ngf * 8, ngf * 2, kernel_size=4, stride=2, padding=1),
nn.BatchNorm2d(ngf * 2),
nn.ReLU(True)
)
self.de7 = nn.Sequential(
nn.ConvTranspose2d(ngf * 4, ngf, kernel_size=4, stride=2, padding=1),
nn.BatchNorm2d(ngf),
nn.ReLU(True)
)
self.de8 = nn.Sequential(
nn.ConvTranspose2d(ngf * 2, output_nc,
kernel_size=4, stride=2,
padding=1),
nn.Tanh()
)
def forward(self, x):
#encoder
out_en1 = self.en1(x)
out_en2 = self.en2(out_en1)
out_en3 = self.en3(out_en2)
out_en4 = self.en4(out_en3)
out_en5 = self.en5(out_en4)
out_en6 = self.en6(out_en5)
out_en7 = self.en7(out_en6)
out_en8 = self.en8(out_en7)
#decoder
out_de1 = self.de1(out_en8)
out_de1 = torch.cat((out_de1, out_en7), 1)
out_de2 = self.de2(out_de1)
out_de2 = torch.cat((out_de2, out_en6), 1)
out_de3 = self.de3(out_de2)
out_de3 = torch.cat((out_de3, out_en5), 1)
out_de4 = self.de4(out_de3)
out_de4 = torch.cat((out_de4, out_en4), 1)
out_de5 = self.de5(out_de4)
out_de5 = torch.cat((out_de5, out_en3), 1)
out_de6 = self.de6(out_de5)
out_de6 = torch.cat((out_de6, out_en2), 1)
out_de7 = self.de7(out_de6)
out_de7 = torch.cat((out_de7, out_en1), 1)
out_de8 = self.de8(out_de7)
return out_de8
class Discriminator(nn.Module):
def __init__(self, input_nc, output_nc, ndf=64):
super(Discriminator, self).__init__()
self.cov1 = nn.Sequential(
nn.Conv2d(input_nc + output_nc, ndf, kernel_size=4, stride=2, padding=1),
nn.LeakyReLU(0.2, True)
)
self.cov2 = nn.Sequential(
nn.Conv2d(ndf, ndf*2, kernel_size=4, stride=2, padding=1),
nn.BatchNorm2d(ndf * 2),
nn.LeakyReLU(0.2, True)
)
self.cov3 = nn.Sequential(
nn.Conv2d(ndf*2, ndf * 4, kernel_size=4, stride=2, padding=1),
nn.BatchNorm2d(ndf * 4),
nn.LeakyReLU(0.2, True)
)
self.cov4 = nn.Sequential(
nn.Conv2d(ndf * 4, ndf * 8, kernel_size=4, stride=1, padding=1),
nn.BatchNorm2d(ndf * 8),
nn.LeakyReLU(0.2, True)
)
self.cov5 = nn.Sequential(
nn.Conv2d(ndf*8, 1, kernel_size=4, stride=1, padding=1),
nn.Sigmoid()
)
def forward(self, x):
out_cov1 = self.cov1(x)
out_cov2 = self.cov2(out_cov1)
out_cov3 = self.cov3(out_cov2)
out_cov4 = self.cov4(out_cov3)
out = self.cov5(out_cov4)
return out
|
[
"52454904+Ausiden@users.noreply.github.com"
] |
52454904+Ausiden@users.noreply.github.com
|
7797d8e412d4ea33bba9c8cebcecd1cf9377cc15
|
abb9cc66d4b86485c3e24ab75f5b04e6e052b710
|
/migrations/0007_auto_20181022_1132.py
|
a665706863f0188f9dfe1031a55fead3c22288c7
|
[] |
no_license
|
0-afflatus/diary
|
0dab6ed1d2856d3ef79d234737a7e746bb04fa73
|
13045ecfd430c7cbbcd1f673ecbec1b98de7a7c6
|
refs/heads/master
| 2020-04-15T11:47:31.360212
| 2019-01-08T12:17:56
| 2019-01-08T12:17:56
| 164,644,711
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 417
|
py
|
# Generated by Django 2.0.5 on 2018-10-22 11:32
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('diary', '0006_event_percentage'),
]
operations = [
migrations.AlterField(
model_name='event',
name='end_time',
field=models.TimeField(default='00:00:00', verbose_name='closing time'),
),
]
|
[
"afflatus@pathilorra.co.uk"
] |
afflatus@pathilorra.co.uk
|
717a8ed939260d470b84aab80fa2b1e6d9af0e94
|
69a93ce7bb58ab73d438b5aa9f218fc18b90869a
|
/word_count/views.py
|
ac7335473bd41f1850768a57c9a6b2294904cad7
|
[] |
no_license
|
wignes/word_count
|
b8be2447f7a583d33ebb2b3d8968187d97f745cf
|
3bcf79fbc6429836c8adda97da07a82f5e2422ad
|
refs/heads/master
| 2023-04-15T21:28:21.065677
| 2021-04-26T17:31:54
| 2021-04-26T17:31:54
| 361,839,547
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 744
|
py
|
import operator
from django.shortcuts import render
def home_page(request):
return render(request, 'home.html', {'given_val': "Given value is empty"})
def count(request):
given_word = request.GET['fulltext']
word_count = len(given_word.split())
word_dic = {}
for word in given_word.split():
if word in word_dic:
# Increase
word_dic[word] += 1
else:
# Create new
word_dic[word] = 1
sorted_word_list = sorted(word_dic.items(), key=operator.itemgetter(1), reverse=True)
return render(request, 'count.html', dict(word_count=word_count, your_text=given_word, word_dict=sorted_word_list))
def about(request):
return render(request, 'about.html')
|
[
"wignes3010@gmail.com"
] |
wignes3010@gmail.com
|
10cd21ed4c84590a3dc0fe3c13de00f496e38eb1
|
24f9c4329537ed2278fae65125091c6179e4e12e
|
/commonlib/use_xml.py
|
8fe3dc3ce85a4ced2746b76b62be969860cd18c7
|
[
"Apache-2.0"
] |
permissive
|
semoren/learn_python
|
e439b1e4f51f13bdeb112447843c9d2a8bfae10b
|
d3140c3d4cb2bdb674d9c072c4195ebcfe686be3
|
refs/heads/master
| 2020-06-16T21:05:05.321097
| 2016-12-09T06:20:35
| 2016-12-09T06:20:35
| 75,066,722
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 736
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from xml.parsers.expat import ParserCreate
class DefaultSaxHandle(object):
def start_element(self, name, attrs):
print('sax:start_element: %s, attrs: %s' % (name, str(attrs)))
def end_element(self, name):
print('sax:end_element: %s' % name)
def char_date(self, text):
print('sax:char_data: %s' % text)
xml = r'''<?xml version="1.0"?>
<ol>
<li><a href="/python">Python</a></li>
<li><a href="/ruby">Ruby</a></li>
</ol>
'''
handler = DefaultSaxHandle()
parser = ParserCreate()
parser.StartElementHandler = handler.start_element
parser.EndElementHandler = handler.end_element
parser.CharacterDataHandler = handler.char_date
parser.Parse(xml)
|
[
"renqing50@gmail.com"
] |
renqing50@gmail.com
|
3a1b01762b7a75f9736a2e02d8011a390201f429
|
f05a08881b606d593bb76fa725d62187fb8e6cc0
|
/cache_ensembl/dump_orthology.py
|
c1284e4b8abd80999d38f73341bbc01cce44305f
|
[] |
no_license
|
bunbun/cache-ensembl
|
6cf109dd0a9f6dad15744d4583ab701f7bda5a35
|
02ce50016321fecb5f9f784c63ce4f8e5066d74b
|
refs/heads/master
| 2021-01-23T13:58:36.493124
| 2011-12-06T21:45:04
| 2011-12-06T21:45:04
| 32,793,683
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,678
|
py
|
#!/usr/bin/env python
################################################################################
#
# compara_orthology.py
#
#
# Copyright (c) 11/3/2010 Leo Goodstadt
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#################################################################################
"""
for downloading homology data from an ensembl compara data base and caching for local fast
access
"""
import re, os,sys, cPickle
from dump_object import dump_object
from random_access_file_by_sections import fill_directory_of_sections, write_directory_of_sections, read_directory_of_sections
from collections import defaultdict, namedtuple
import marshal, time, struct
from general import (check_cache_file_version,
_prepare_cache_file,
cache_specified_compara_data,
lookup_compara_cache_file_name,
cache_specified_core_data,
lookup_core_cache_file_name
)
from marshalable_object import marshalable_object, load_dict_of_object_lists, dump_dict_of_object_lists, load_dict_of_objects, dump_dict_of_objects
from collections import namedtuple
import sqlite3
import tempfile
import compara_orthology
from compara_orthology import *
if __name__ == '__main__':
import logging,sys
import logging.handlers
MESSAGE = 15
logging.addLevelName(MESSAGE, "MESSAGE")
def setup_std_logging (logger, log_file, verbose):
"""
set up logging using programme options
"""
class debug_filter(logging.Filter):
"""
Ignore INFO mesages
"""
def filter(self, record):
return logging.INFO != record.levelno
class NullHandler(logging.Handler):
"""
for when there is no logging
"""
def emit(self, record):
pass
# We are interesting in all messages
logger.setLevel(logging.DEBUG)
has_handler = False
# log to file if that is specified
if log_file:
handler = logging.FileHandler(log_file, delay=False)
handler.setFormatter(logging.Formatter("%(asctime)s - %(name)s - %(levelname)6s - %(message)s"))
handler.setLevel(MESSAGE)
logger.addHandler(handler)
has_handler = True
# log to stderr if verbose
if verbose:
stderrhandler = logging.StreamHandler(sys.stderr)
stderrhandler.setFormatter(logging.Formatter(" %(message)s"))
stderrhandler.setLevel(logging.DEBUG)
if log_file:
stderrhandler.addFilter(debug_filter())
logger.addHandler(stderrhandler)
has_handler = True
# no logging
if not has_handler:
logger.addHandler(NullHandler())
#
# set up log
#
logger = logging.getLogger("me")
setup_std_logging(logger, "me.log", True)
from general import *
mysql_dbh = connect_ensembl_mysql()
cursor = mysql_dbh.cursor()
cursor.execute("use ensembl_compara_58")
taxon_id_to_name_and_assembly = compara_orthology._retrieve_species_name_and_assembly (cursor, logger)
converting_species = "Anolis carolinensis", "Ailuropoda melanoleuca", "Branchiostoma floridae", "Canis familiaris", "Danio rerio", "Equus caballus", "Gasterosteus aculeatus", "Gallus gallus", "Homo sapiens", "Monodelphis domestica", "Mus musculus", "Macaca mulatta", "Nematostella vectensis", "Ornithorhynchus anatinus", "Oryzias latipes", "Petromyzon marinus", "Rattus norvegicus", "Strongylocentrotus purpuratus", "Taeniopygia guttata", "Tetraodon nigroviridis", "Xenopus tropicalis"
taxon_ids = []
taxon_id_to_species_name = dict()
for s in converting_species:
for t in taxon_id_to_name_and_assembly.values():
if s == t.scientific_name:
taxon_ids.append(t.taxon_id)
taxon_id_to_species_name[t.taxon_id] = s.replace(' ', '_')
break
else:
logger.warning("No orthology_data for species %s" % s)
taxon_id_pairs = []
species_name_pairs = []
for t in taxon_ids:
for tt in taxon_ids:
if t < tt:
taxon_id_pairs.append((t, tt))
species_name_pairs.append((taxon_id_to_species_name[t], taxon_id_to_species_name[tt]))
for taxon_id_pair, species_name_pair in zip(taxon_id_pairs, species_name_pairs):
(ortholog_sets,
ortholog_set_pairwise_scores,
ortholog_set_pairwise_alignment,
ensembl_version, cache_file_name) = compara_orthology.get_compara_orthology_for_taxon_ids("me.index", logger, taxon_id_pair, None, data_to_load = "ortholog_sets")
compara_orthology.log_compara_orthology (logger, taxon_id_pair, species_name_pair, ortholog_sets, ortholog_set_pairwise_scores, ortholog_set_pairwise_alignment)
#
# empty set
#
if not len(ortholog_sets):
continue
orthologs_file = open("try/%s_%s.orthologs" % species_name_pair, "w")
#
# Count genes for each ortholog set and place in category
#
for ortholog_set in ortholog_sets.itervalues():
gene_cnts = [len(ortholog_set.gene_ids[t]) for t in taxon_id_pair]
if gene_cnts != [1, 1]:
continue
orthologs_file.write("%s\t%s\n" %
(ortholog_set.gene_ids[taxon_id_pair[0]][0],
ortholog_set.gene_ids[taxon_id_pair[1]][0]))
|
[
"bunbun68@localhost"
] |
bunbun68@localhost
|
81c8c3d11e4e50bdbf954eb74fa3031bec16c4e5
|
6a3ef6066d8164af34d1812e28d21096f6db94ec
|
/iteration3_code/Include/test.py
|
e140de7b1b37d28dafb16ed58227e487d567a321
|
[] |
no_license
|
doc-vu/VAML_ApplicationPerformance
|
16e96d696a490a94b7664bdb6dda1c7a986f7939
|
60c90fbd44140c08d480467efa526fa6702d4900
|
refs/heads/master
| 2020-05-18T19:28:01.793035
| 2019-04-27T05:04:46
| 2019-04-27T05:04:46
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,659
|
py
|
from flask import Flask, render_template, request, url_for
import pandas as pd
import numpy as np
import csv
from pca_data_prepare import *
np.set_printoptions(suppress=True)
import json
app = Flask(__name__)
@app.route("/linechart", methods=['GET', 'POST'])
def linechart():
application_name = "pmd"
data = pd.read_csv(application_name + ".csv")
latency_90 = list(data["latency90"])
latency_90 = [round(i, 2) for i in latency_90]
timeseries = list(data["date"])
timeseries = [i.split(" ")[1].split(".")[0] for i in timeseries]
header = ["BW","MEM_BW","switch","cpu","io","memory","network"]
header2 = ["id"] + header
date = "3/28/2019 "
if request.method == "POST":
rec_time = request.form["time"]
start_time = date + rec_time.split(",")[0]
end_time = date + rec_time.split(",")[1]
data['date'] = pd.to_datetime(data['date'])
data = data.set_index('date')
data = data[start_time:end_time]
data = data.reset_index()
new_m = get_correlation_table(application_name, header, data)
new_parallel_data, ratio_header = get_parallel_json_data(data, header, application_name)
print (new_parallel_data)
respon_mess = {"new_correlation_matrix": new_m, "new_parallel_data": new_parallel_data, "new_header": header2, "time_start_end": rec_time,
"new_parallel_header": header, "new_eigen_header": ratio_header}
return json.dumps(respon_mess)
raw_data = data
matrix_json = get_correlation_table(application_name, header, raw_data)
parallel_data, ratio_header = get_parallel_json_data(raw_data, header, application_name)
return render_template("linechart.html", latency_90={"latency_90": latency_90}, timeseries={"timeseries": timeseries},
application_name=application_name,#line chart
correlation_matrix={"correlation_matrix": matrix_json},
correlation_header={"correlation_header": header2},#correlation table
parallel_data=parallel_data,
parallel_header={"parallel_header": header},
eigen_header={"eigen_header": ratio_header}
)
def get_correlation_table(application_name, header, raw_data):
header2 = ["id"] + header
get_correlation_csv(application_name, raw_data)
data = pd.read_csv('correlation_'+ application_name + '.csv')
matrix = []
first_1 = 1
loc_index = 0
for loc_index in range(0, len(data)):
tmp_d = data.loc[loc_index].tolist()
tmp_d = [str(i) for i in tmp_d]
tmp_d[0] = header[loc_index]
for i in range(first_1 + 1, len(tmp_d)):
tmp_d[i] = [list(raw_data[tmp_d[0]]), list(raw_data[header[i-1]])]
matrix.append(tmp_d)
first_1 += 1
matrix_json = []
for loc_index in range(0, len(matrix)):
m_i = 0
json_data = {}
for d in matrix[loc_index]:
json_data[header2[m_i]] = d
m_i += 1
matrix_json.append(json_data)
return matrix_json
def get_correlation_csv(application_name, raw_data):
dataset = raw_data
drop_attr = ['date', 'DISTSIM_BENCH.benchmark_id', 'L2_BW', 'L3_BW',
'latency', 'latency90']
subset = dataset.drop(drop_attr, axis=1)
data = subset.values
result = np.corrcoef(data, rowvar=0)
result = np.around(result, decimals=2)
df = pd.DataFrame(result)
target_name = "correlation_" + application_name + ".csv"
df.to_csv(target_name)
return 0
def get_parallel_data(raw_data, header, application_name): #raw_data: dataframe
target_name = "parallel_" + application_name + ".csv"
eigenvector, ratio = get_pca_data(raw_data, header)
df2 = raw_data.drop(["date", "DISTSIM_BENCH.benchmark_id", "L2_BW", "L3_BW", "latency"],axis=1)
df1 = pd.DataFrame(eigenvector)
df1.columns = ratio
df_all = pd.concat([df1, df2], axis=1, ignore_index=False)
df_all.to_csv(target_name, index=None)
b = list(csv.reader(open(target_name)))
data = b[1:]
h = b[0]
return data, h, ratio
def get_parallel_json_data(raw_data, header, application_name):
data = raw_data
d, h, ratio= get_parallel_data(data, header, application_name)
parallel_data = []
id = 0
for item in d:
tmp = {}
for i in range(0, len(item)):
tmp[h[i]] = item[i]
tmp["id"] = id
parallel_data.append(tmp)
id += 1
return {"parallel_data":parallel_data}, ratio
if __name__ == '__main__':
app.run()
|
[
"214731923@qq.com"
] |
214731923@qq.com
|
2e97d884af1803b0fbf8605ab42c9398621efc5f
|
360e6fa1eb84f9bdd2bc9e2d781b38abe4f269b8
|
/api/account/register/forms.py
|
d0dabb220740e6d6494466123526c919a4acf58f
|
[] |
no_license
|
RandomGuy090/taskmanager
|
03b0386a8fa492766cebd3537401efac527922d4
|
25e1bf7a3ae4a75c02f576582778bb259d7d8d4a
|
refs/heads/master
| 2023-08-27T14:26:09.897088
| 2021-10-25T09:39:35
| 2021-10-25T09:39:35
| 387,257,991
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,149
|
py
|
from django import forms
from django.contrib.auth import get_user_model, authenticate, login
User = get_user_model()
from taskmanager.exceptions import Unauthorized
class RegisterForm(forms.Form):
username = forms.CharField(widget=forms.TextInput())
password = forms.CharField(widget=forms.PasswordInput())
password2 = forms.CharField(widget=forms.PasswordInput())
def clean(self):
if self.password != self.password2:
raise forms.ValidationError("passwords are not equal!")
# raise Unauthorized
qs = User.objects.filter(username=self.username)
user = authenticate(username=self.username, password=self.password)
if qs.exists():
raise forms.ValidationError("Such user already exists")
user = User.objects.create_user(username=self.username, password=self.password)
user.save()
self.user = user
def clean_username(self):
self.username = self.cleaned_data.get("username")
return self.username
def clean_password(self):
self.password = self.cleaned_data.get("password")
return self.password
def clean_password2(self):
self.password2 = self.cleaned_data.get("password2")
return self.password2
|
[
"randomguy0090@gmail.com"
] |
randomguy0090@gmail.com
|
31603fd8567b2891d889818416d417116616a854
|
dd592d5eafbb27c95ea0a43e5bd4abb2564597f3
|
/Test1/test21.py
|
3fdeb9d7f8db88b97b303816e5f580b9f6114eae
|
[] |
no_license
|
FlyingZYH/PythonGame
|
44f442a80f2bdb3ae7fded28bd736f206ada3096
|
5f685445feb2618709171c1c0e7d6ae211732c2c
|
refs/heads/master
| 2020-03-18T22:57:00.796878
| 2018-05-30T06:05:05
| 2018-05-30T06:05:05
| 135,371,802
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,400
|
py
|
import math
import random
random.seed(0)
def rand(a,b): #随机函数
return (b-a)*random.random()+a
def make_matrix(m,n,fill=0.0):#创建一个指定大小的矩阵
mat = []
for i in range(m):
mat.append([fill]*n)
return mat
#定义sigmoid函数和它的导数
def sigmoid(x):
return 1.0/(1.0+math.exp(-x))
def sigmoid_derivate(x):
return x*(1-x) #sigmoid函数的导数
class BPNeuralNetwork:
def __init__(self):#初始化变量
self.input_n = 0
self.hidden_n = 0
self.output_n = 0
self.input_cells = []
self.hidden_cells = []
self.output_cells = []
self.input_weights = []
self.output_weights = []
self.input_correction = []
self.output_correction = []
#三个列表维护:输入层,隐含层,输出层神经元
def setup(self,ni,nh,no):
self.input_n = ni+1 #输入层+偏置项
self.hidden_n = nh #隐含层
self.output_n = no #输出层
#初始化神经元
self.input_cells = [1.0]*self.input_n
self.hidden_cells= [1.0]*self.hidden_n
self.output_cells= [1.0]*self.output_n
#初始化连接边的边权
self.input_weights = make_matrix(self.input_n,self.hidden_n) #邻接矩阵存边权:输入层->隐藏层
self.output_weights = make_matrix(self.hidden_n,self.output_n) #邻接矩阵存边权:隐藏层->输出层
#随机初始化边权:为了反向传导做准备--->随机初始化的目的是使对称失效
for i in range(self.input_n):
for h in range(self.hidden_n):
self.input_weights[i][h] = rand(-0.2 , 0.2) #由输入层第i个元素到隐藏层第j个元素的边权为随机值
for h in range(self.hidden_n):
for o in range(self.output_n):
self.output_weights[h][o] = rand(-2.0, 2.0) #由隐藏层第i个元素到输出层第j个元素的边权为随机值
#保存校正矩阵,为了以后误差做调整
self.input_correction = make_matrix(self.input_n , self.hidden_n)
self.output_correction = make_matrix(self.hidden_n,self.output_n)
#输出预测值
def predict(self,inputs):
#对输入层进行操作转化样本
for i in range(self.input_n-1):
self.input_cells[i] = inputs[i] #n个样本从0~n-1
#计算隐藏层的输出,每个节点最终的输出值就是权值*节点值的加权和
for j in range(self.hidden_n):
total = 0.0
for i in range(self.input_n):
total+=self.input_cells[i]*self.input_weights[i][j]
# 此处为何是先i再j,以隐含层节点做大循环,输入样本为小循环,是为了每一个隐藏节点计算一个输出值,传输到下一层
self.hidden_cells[j] = sigmoid(total) #此节点的输出是前一层所有输入点和到该点之间的权值加权和
for k in range(self.output_n):
total = 0.0
for j in range(self.hidden_n):
total+=self.hidden_cells[j]*self.output_weights[j][k]
self.output_cells[k] = sigmoid(total) #获取输出层每个元素的值
return self.output_cells[:] #最后输出层的结果返回
#反向传播算法:调用预测函数,根据反向传播获取权重后前向预测,将结果与实际结果返回比较误差
def back_propagate(self,case,label,learn,correct):
#对输入样本做预测
self.predict(case) #对实例进行预测
output_deltas = [0.0]*self.output_n #初始化矩阵
for o in range(self.output_n):
error = label[o] - self.output_cells[o] #正确结果和预测结果的误差:0,1,-1
output_deltas[o]= sigmoid_derivate(self.output_cells[o])*error#误差稳定在0~1内
#隐含层误差
hidden_deltas = [0.0]*self.hidden_n
for h in range(self.hidden_n):
error = 0.0
for o in range(self.output_n):
error+=output_deltas[o]*self.output_weights[h][o]
hidden_deltas[h] = sigmoid_derivate(self.hidden_cells[h])*error
#反向传播算法求W
#更新隐藏层->输出权重
for h in range(self.hidden_n):
for o in range(self.output_n):
change = output_deltas[o]*self.hidden_cells[h]
#调整权重:上一层每个节点的权重学习*变化+矫正率
self.output_weights[h][o] += learn*change + correct*self.output_correction[h][o]
#更新输入->隐藏层的权重
for i in range(self.input_n):
for h in range(self.hidden_n):
change = hidden_deltas[h]*self.input_cells[i]
self.input_weights[i][h] += learn*change + correct*self.input_correction[i][h]
self.input_correction[i][h] = change
#获取全局误差
error = 0.0
for o in range(len(label)):
error = 0.5*(label[o]-self.output_cells[o])**2 #平方误差函数
return error
def train(self,cases,labels,limit=10000,learn=0.05,correct=0.1):
for i in range(limit): #设置迭代次数
error = 0.0
for j in range(len(cases)):#对输入层进行访问
label = labels[j]
case = cases[j]
error+=self.back_propagate(case,label,learn,correct) #样例,标签,学习率,正确阈值
def test(self): #学习异或
cases = [
[0, 0],
[0, 1],
[1, 0],
[1, 1],
] #测试样例
labels = [[0], [1], [1], [0]] #标签
self.setup(2,5,1) #初始化神经网络:输入层,隐藏层,输出层元素个数
self.train(cases,labels,10000,0.05,0.1) #可以更改
for case in cases:
print(self.predict(case))
if __name__ == '__main__':
nn = BPNeuralNetwork()
nn.test()
|
[
"朱远宏@MSI"
] |
朱远宏@MSI
|
1c941a7cc78d3c56ee8329aa53d4b3d4c645405a
|
a386d0ac7524cc7f0222b4c9861db0cc6cfa1e76
|
/multichat/multichat/urls.py
|
01bc02eaff0925dc1c74d5ff4a1c1431ae4a1742
|
[] |
no_license
|
jorgeOmurillo/sockets
|
aba8e8c42977a6adf285cbefe9a0866f00d5ba0d
|
230cb3d0c8fa83e72b1f0b940a1d7fadd5c14033
|
refs/heads/master
| 2021-01-01T11:56:36.989225
| 2017-07-26T12:31:33
| 2017-07-26T12:31:33
| 97,573,988
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 994
|
py
|
"""multichat URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url
from django.contrib import admin
from django.contrib.auth.views import login, logout
from chat.views import index
urlpatterns = [
url(r'^$', index, name='homepage'),
url(r'^accounts/login/$', login, name='login'),
url(r'^accounts/logout/$', logout, name='logout'),
url(r'^admin/', admin.site.urls),
]
|
[
"jorge@debian"
] |
jorge@debian
|
34019fe74d66ee473c9f78d9730d9b933cee8973
|
4007a7626ccb18480e73ac304b0010f6aeba33fb
|
/proj_preproc/db.py
|
cb9defbbd56297c1bca17df0cef2ee205afdb103
|
[] |
no_license
|
olmozavala/air_pollution_forecast
|
68030748b975d463158f1ce7c7f16eb038493ced
|
5b543b3f4a190d7ae33a55c4f5b30f56b17347c3
|
refs/heads/master
| 2023-07-22T15:16:31.166036
| 2023-06-08T18:55:14
| 2023-06-08T18:55:14
| 226,166,662
| 0
| 0
| null | 2023-02-16T18:40:24
| 2019-12-05T18:41:50
|
Python
|
UTF-8
|
Python
| false
| false
| 129
|
py
|
def eliminateNonContinuousTimes(data, numhours):
"""It eliminates those 'rows' that do not contain 'numhours' continuously"""
|
[
"olmozavala@gmail.com"
] |
olmozavala@gmail.com
|
20fc2a3aa614e91ca3694826f465f2758e097e13
|
c2a172307057164a835c66b1f308c45f7f8b9ebb
|
/partII/deliverable/scripts/col18-2count.py
|
b0ec37e60b1e06586876ffbca16f05fe8cddd68a
|
[] |
no_license
|
jacketsu/NYPD-Crime
|
f219738e55d72fe87fc274343eb9c9e155b91dbe
|
1cf8429f7d4ac0798324c892679ad333ce261ed4
|
refs/heads/master
| 2021-08-31T15:56:39.544216
| 2017-12-22T00:15:23
| 2017-12-22T00:15:23
| 116,798,746
| 12
| 0
| null | 2018-01-09T10:01:12
| 2018-01-09T10:01:10
| null |
UTF-8
|
Python
| false
| false
| 2,396
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
import sys, os
from pyspark import SparkContext
from pyspark.sql import SQLContext
from pyspark.sql.types import *
from pyspark.sql.functions import col, when, udf
from pyspark.sql.types import BooleanType
from pyspark.sql import Row
from csv import reader
from datetime import datetime
# Collect the statistics
def statistic_count_year_boro(rdd):
rdd.map(lambda row: (row, 1)) \
.reduceByKey(lambda x, y: x + y) \
.sortBy(lambda x: x[1], False) \
.map(lambda row: (row[0],row[1])) \
.saveAsTextFile("YEAR_PARKS_NM_count.out")
def statistic_count_month_boro(rdd):
rdd.map(lambda row: (row, 1)) \
.reduceByKey(lambda x, y: x + y) \
.sortBy(lambda x: x[1], False) \
.map(lambda row: (row[0], row[1])) \
.saveAsTextFile("MONTH_PARKS_NM_count.out")
def statistic_count_year_month_boro(rdd):
rdd.map(lambda row: (row, 1)) \
.reduceByKey(lambda x, y: x + y) \
.sortBy(lambda x: x[0], False) \
.map(lambda row: (row[0], row[1])) \
.saveAsTextFile("YEAR_MONTH_PARKS_NM_count.out")
if __name__ == '__main__':
if len(sys.argv) != 2:
print("Usage: spark-submit --packages com.databricks:spark-csv_2.10:1.2.0 col18-2count.py <input>")
exit(-1)
sc = SparkContext()
lines = sc.textFile(sys.argv[1], 1)
header = lines.first()
# Remove the header
lines = lines.filter(lambda x: x != header).mapPartitions(lambda x: reader(x))
lines = lines.map(lambda x: (x[1], x[17])).map(lambda s: (datetime.strptime(s[0], '%m/%d/%Y'), s[1])).filter(lambda x: x[0].year >= 2005)
year = lines.map(lambda x: (x[0].year, x[1]))
month = lines.map(lambda x: (x[0].month, x[1]))
year_month = lines.map(lambda x: (x[0].year, x[0].month, x[1]))
# Collect the statistics
statistic_count_year_boro(year)
statistic_count_month_boro(month)
statistic_count_year_month_boro(year_month)
command = 'hadoop fs -getmerge /user/netID/YEAR_PARKS_NM_count.out YEAR_PARKS_NM_count'
os.system(command)
command = 'hadoop fs -getmerge /user/netID/MONTH_PARKS_NM_count.out MONTH_PARKS_NM_count'
os.system(command)
command = 'hadoop fs -getmerge /user/netID/YEAR_MONTH_PARKS_NM_count.out YEAR_MONTH_PARKS_NM_count'
os.system(command)
sc.stop()
|
[
"cjesse1992@mail.com"
] |
cjesse1992@mail.com
|
d333612c150be3302596df477a7fc3ed0c5f4b67
|
a257bdea8b88202b31cd47ffb2844e9dc2b1c85c
|
/alert_updater.py
|
a12c1eb51bfdddf299d3799e371ba1216900e88a
|
[] |
no_license
|
Arjun-B-J/Online_Price_Alert_app
|
68fcb07e2e14bc7cb1bbb065c237e093a1d07de6
|
a068aaac5d3a481c65cc9b5dd23520ec7c574670
|
refs/heads/master
| 2022-08-21T23:41:29.037668
| 2020-05-23T15:29:38
| 2020-05-23T15:29:38
| 258,994,932
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 230
|
py
|
from models.alert import Alert
alerts = Alert.all()
for alert in alerts:
alert.load_item_price()
alert.notify_if_price_reached()
if not alerts:
print("No alerts have been created. Add an item and an alert to begin!")
|
[
"arjunbj@pop-os.localdomain"
] |
arjunbj@pop-os.localdomain
|
4d30a6fa3c0528577067282f97ba50d1b195c27c
|
b593e559732821e85ac28fdbf218469b1793d697
|
/app/models/__init__.py
|
ca2d74b1a2d738ea77b4f831b81daa3ee352be79
|
[] |
no_license
|
CarlosMartorini/api-rest-fast-food
|
3d27945acad80fa4c5e8fa1f7419c8cdc31654e8
|
d267a6df8aa391356da870925b5f62e41b58b331
|
refs/heads/master
| 2023-08-11T00:48:26.076971
| 2021-09-20T21:48:26
| 2021-09-20T21:48:26
| 407,607,702
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 109
|
py
|
from . import populate_table_model
populate_table_model.create_table()
populate_table_model.populate_table()
|
[
"c7.silveira@gmail.com"
] |
c7.silveira@gmail.com
|
d3e12ca839e75d001c22c25c2e6863ed1e501be6
|
c22820b9624bd19d2bfbabf731a9dbfb17d4e5e0
|
/guessinggame.py
|
2a2043e5c2cfb71b203fc7ec5eb3ea764c32520e
|
[] |
no_license
|
faithbuford/all-my-files
|
1873f29afb09a51a716639582e7781429d52ac36
|
e0119537967c9da5e207d9dd72598be223b5b934
|
refs/heads/master
| 2020-08-27T10:13:14.949361
| 2019-10-24T15:56:13
| 2019-10-24T15:56:13
| 217,329,698
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 379
|
py
|
import random
mysteryNum = random.randint(1, 100)
score = 0
while True:
guess = int(input("Pick a number between 1 and 100: "))
score = score + 1
if guess == mysteryNum:
print("good guess, you win")
break
elif guess > mysteryNum:
print("Too high, try again")
else:
print("too low, try again")
print("it took you " + str(score) + " guesses")
|
[
"noreply@github.com"
] |
noreply@github.com
|
61964e035530cecf47e0e989c18b99267fbdf452
|
a4842120ab3494f6e0ccf7e412f810df40938a93
|
/blog/migrations/0001_initial.py
|
9b61d34810d8056881fb2e394436816ff1c9bb94
|
[] |
no_license
|
noob261/Blog
|
c8025819d9eaf94802d51da1640607176865b5aa
|
174b11b6bd1a942612190498d74c3b63530003a7
|
refs/heads/master
| 2023-03-07T08:45:45.165464
| 2021-02-17T08:15:46
| 2021-02-17T08:15:46
| 339,649,127
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,286
|
py
|
# Generated by Django 3.1.6 on 2021-02-16 18:18
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=250)),
('slug', models.SlugField(max_length=250, unique_for_date='publish')),
('body', models.TextField()),
('publish', models.DateTimeField(default=django.utils.timezone.now)),
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
('status', models.CharField(choices=[('draft', 'Draft'), ('published', 'Published')], default='draft', max_length=10)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='blog_posts', to=settings.AUTH_USER_MODEL)),
],
),
]
|
[
"2740772761@qq.com"
] |
2740772761@qq.com
|
5bc363daf36b7861426faf8ca0f96fb85bcaa62c
|
136b7e39a0f6269691710b05065ff84f2c63d87e
|
/dnd_machine/shell_scripts/tiefling_db.py
|
397493a23e9a066a86b1c82b7776b5f46fe0bc1a
|
[] |
no_license
|
JustinHeidema/dnd_machine
|
988a31757fa3b5262cc215be68154f1ca65a7360
|
d5da41d70a39ba8708735dffdb2009d22d5b811b
|
refs/heads/master
| 2022-12-13T21:36:48.505669
| 2018-01-05T23:21:14
| 2018-01-05T23:21:14
| 115,017,831
| 0
| 1
| null | 2022-11-28T20:42:23
| 2017-12-21T15:03:43
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 2,049
|
py
|
from character_builder.models import Race, RaceTrait, Language
from character_builder.serializers import RaceSerializer, RaceTraitSerializer
from rest_framework.renderers import JSONRenderer
from rest_framework.parsers import JSONParser
tiefling = Race.objects.create(race_name='Tiefling',
intelligence_modifier=2,
charisma_modifier=1,
speed=25)
RaceTrait.objects.create(race_name=tiefling,
trait_name='Darkvision',
trait_effect='Thanks to your infernal heritage, you'
'have superior vision in dark and dim conditions. You'
'can see in dim light within 60 feet of you as if it were'
'bright light, and in darkness as if it w ere dim light. You'
'can\'t discern color in darkness, only shades o f gray.')
RaceTrait.objects.create(race_name=tiefling,
trait_name='Hellish Resilience',
trait_effect='You have resistance to fire damage')
RaceTrait.objects.create(race_name=tiefling,
trait_name='Infernal Legacy',
trait_effect='You know the thaumaturgy cantrip.'
'Once you reach 3rd level, you can cast the hellish'
'rebuke spell once per day as a 2nd-level spell. O nce you'
'reach 5th level, you can also cast the darkness spell'
'once per day. Charisma is your spellcasting ability for'
'these spells')
Language.objects.create(race_name=tiefling,
language='Common')
Language.objects.create(race_name=tiefling,
language='Infernal')
tiefling_serializer = RaceSerializer(instance=tiefling)
tiefling_serializer.data
|
[
"jrheidema@gmail.com"
] |
jrheidema@gmail.com
|
12f19a36a5f494443c16decf84b5d836dbc6dd08
|
71bccd94849abe90d003b36cd32807f405ebfc54
|
/day03/data.py
|
28829045d7ad061daf69667d45fea83d564d32ee
|
[] |
no_license
|
kezhangziyou/Python-study
|
2e25b832abb353aa54def6f57fffaad6554fc4f4
|
341915a7208036c83f9019787a25c8c253243e68
|
refs/heads/master
| 2020-08-27T22:15:16.084826
| 2019-10-28T05:43:08
| 2019-10-28T05:43:08
| 217,502,461
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,994
|
py
|
# 一 ,变量
"""
变量在使用谦都必须赋值,赋值才会被创建,不需要声明
不同类型数值可以随便赋值
java 是动态语言,赋值确定类型,python是动态语言
用全部是大写的变量名表示常量.
"""
name = "zhangke"
# 二 ,数据类型
"""
Python3 中有六个标准的数据类型:Number(数字)、String(字符串)、
List(列表)、Tuple(元组)、Sets(集合)、Dictionary(字典)。
Python3 的六个标准数据类型中:
不可变数据(3 个):Number(数字)、String(字符串)、Tuple(元组);
可变数据(3 个): List(列表)、Dictionary(字典)、Set(集合)。
"""
# 1-Number(数字)
"""
Python 支持三种不同的数值类型:
整型(Int) - 通常被称为是整型或整数,是正或负整数,不带小数点。Python3 整型是没有限制大小的,
可以当作 Long 类型使用,所以 Python3 没有 Python2 的 Long 类型。
浮点型(float) - 浮点型由整数部分与小数部分组成,浮点型也可以使用科学计数法表示(2.5e2 = 2.5 x 102 = 250)
复数( (complex)) - 复数由实数部分和虚数部分构成,可以用a + bj,或者complex(a,b)表示, 复数的实部a和虚部b都是浮点型。
"""
counter = 100 # 整型变量
miles = 1000.0 # 浮点型变量
name = "test" # 字符串
print(counter)
print(miles)
print(name)
# 2-数字类型转换
"""
int(x) 将x转换为一个整数。
float(x) 将x转换到一个浮点数。
complex(x) 将x转换到一个复数,实数部分为 x,虚数部分为 0。
complex(x, y) 将 x 和 y 转换到一个复数,实数部分为 x,虚数部分为 y。x 和 y 是数字表达式。 额外说明
和别的语言一样,数字类型支持各种常见的运算,不过 Python 的运算比别的大多数常见语言都更加丰富,此外,还有大量丰富的方法,提供更高效的开发。
"""
print(5 + 4) # 加法 输出 9
print(4.3 - 2) # 减法 输出 2.3
print(3 * 7) # 乘法 输出 21
print(2 / 4) # 除法,得到一个浮点数 输出 0.5
print(7 // 4) # 除法取整数,得到一个整数 输出 0
print(17 % 3) # 取余 输出 2
print(2 ** 5) # 乘方 输出 32
# 3-String(字符串)
"""
创建字符串可以使用单引号、双引号、三单引号和三双引号,其中三引号可以多行定义字符串,
Python 不支持单字符类型,单字符也在Python也是作为一个字符串使用。
我们定义一个 s=’python’语句,它在计算机中的执行顺序是先在内存中创建一个字符串Python,
在程序栈寄存器中创建一个变量s,最后把Python的地址赋给s 。
"""
s = '学习Python'
print(s)
# 3.1 切片
print(s[0]) #学
print(s[-1]) #n
print(s[3:]) # ython
print(s[::-1]) #nohtyP习学
# 3.2 替换,还可以使用正则表达式替换
s.replace('Python', 'Java') # '学习Java'
# 3.3 查找,find()、index()、rfind()、rindex()
s.find('P') # 3, 返回第一次出现的子串的下标
s.find('h', 2) # 6, 设定下标2开始查找
s.find('23333') # -1, 查找不到返回-1
s.index('y') # 4, 返回第一次出现的子串的下标
s.index('P') # 不同与find(), 查找不到会抛出异常
# 3.4 转大小写, upper()、lower()、swapcase()、capitalize()、
# istitle()、isupper()、islower()
s.upper() # '学习PYTHON'
s.swapcase() # '学习pYTHON', 大小写互换
s.istitle() # True
s.islower() # False
# 3.5 去空格,strip()、lstrip()、rstrip()
# 3.6 格式化
s1 = '%s %s' % ('Windrivder', 21) # 'Windrivder 21'
print("s1: "+s1)
s2 = '{}, {}'.format(21, 'Windridver') # 推荐使用format格式化字符串
print("s2: "+s2)
s3 = '{0}, {1}, {0}'.format('Windrivder', 21)
print("s3: "+s3)
s4 = '{name}: {age}'.format(age=21, name='Windrivder')
print("s4: "+s4)
# 3.7 连接与分割,使用 + 连接字符串,每次操作会重新计算、开辟、释放内存,
# 效率很低,所以推荐使用join
l = ['2017', '03', '29', '22:00']
s5 = '-'.join(l) # '2017-03-29-22:00'
print("s5: "+s5)
s6 = s5.split('-') # ['2017', '03', '29', '22:00']
print("s6: ")
print(s6)
# 3.8 字符串编码,所有的Python字符串都是Unicode字符串,当需要将文件保存到外设或
# 进行网络传输时,就要进行编码转换,将字符转换为字节,以提高效率encode将字符转换为字节
str = '学习Python'
print(str)
print(str.encode()) # 默认编码是 UTF-8 输出:b'\xe5\xad\xa6\xe4\xb9\xa0Python'
print(str.encode('gbk')) # 输出 b'\xd1\xa7\xcf\xb0Python'
# decode 将字节转换为字符
print(str.encode().decode('utf8')) # 输出 '学习Python'
print(str.encode('gbk').decode('gbk')) # 输出 '学习Python'
# 4-List(列表),类似 Java List 集合接口
"""
列表是写在方括号 [] 之间、用逗号分隔开的元素列表,列表可以完成大多数集合类的数据结构实现。列表中元素的类型可以不相同,
它支持数字,字符串甚至可以包含列表(所谓嵌套),列表中的元素是可以改变。
"""
Weekday = ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday']
print("Weekday[0]: ")
print(Weekday[0]) # 输出 Monday
# list 搜索
print("Weekday.index(\"Wednesday\"): ")
print(Weekday.index("Wednesday"))
# list 增加元素
Weekday.append("new")
print("Weekday,append: ")
print(Weekday)
# list 删除
Weekday.remove("Thursday")
print("Weekday,remove: ")
print(Weekday)
# 5-Tuple(元组),元素不能修改
"""
元组(tuple)与列表类似,不同之处在于元组的元素不能修改。元组写在小括号 () 里,
元素之间用逗号隔开,组中的元素类型也可以不相同。
"""
letters = ('a', 'b', 'c', 'd', 'e', 'f', 'g')
print("letters[0]: ");print(letters[0]) # 输出 'a'
print("letters[0:3]: ");print(letters[0:3]) # 输出一组 ('a', 'b', 'c')
# 6-Sets(集合),类似 Java Set 集合接口
"""
集合(set)是一个无序不重复元素的序列,使用大括号 {} 或者 set() 函数创建集合,
注意:创建一个空集合必须用 set() 而不是 {} ,因为 {} 是用来创建一个空字典。
集合不能被切片也不能被索引,除了做集合运算之外,集合元素可以被添加还有删除:
"""
a_set = {1, 2, 3, 4}
# 添加
a_set.add(5)
print("a_set,add:")
print(a_set) # 输出{1, 2, 3, 4, 5}
# 删除
a_set.discard(5)
print("a_set,discard:")
print(a_set) # 输出{1, 2, 3, 4}
# Dictionary(字典)类似 Java Map 集合接口
"""
字典是一种映射类型,它的元素是键值对,字典的关键字必须为不可变类型,且不能重复。创建空字典使用 {} 。
"""
Logo_code = {
'BIDU': 'Baidu',
'SINA': 'Sina',
'YOKU': 'Youku'
}
print(Logo_code)
# 输出{'BIDU': 'Baidu', 'YOKU': 'Youku', 'SINA': 'Sina'}
print(Logo_code['SINA']) # 输出键为 'one' 的值
print(Logo_code.keys()) # 输出所有键
print(Logo_code.values()) # 输出所有值
print(len(Logo_code)) # 输出字段长度
|
[
"zhangke306shdx@163.com"
] |
zhangke306shdx@163.com
|
f0d08498b71dff3d22c4c9ba80d85b77e29e3bff
|
a812cc02aae5fcc1ced41a90f5f5224cd5e6f193
|
/SMDB/smdb/models.py
|
d88212a1b57352def4728636406de17f5fb5b387
|
[] |
no_license
|
pgaspar/Social-Movie-Database
|
4ae48af923b973c6cc94aabb609902951e465917
|
39419092cd921d69a06181ac52dc75f99c0c36fe
|
refs/heads/master
| 2021-01-20T02:01:36.750067
| 2011-01-18T18:51:16
| 2011-01-18T18:51:16
| 1,114,952
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 534
|
py
|
from django.db import models
from django.contrib.auth.models import User
from smdb.semantic_models import SMDBUser
from rdflib import Literal, URIRef
class UserProfile(models.Model):
user = models.ForeignKey(User, unique=True)
_uri = models.CharField("URI", max_length=200)
def __unicode__(self):
return u"%s's Profile" % self.user.get_full_name()
@property
def uri(self):
return URIRef(self._uri)
@property
def semantic_user(self):
return SMDBUser(self._uri)
def get_absolute_url(self):
return self._uri
|
[
"pedro.gaxpar@gmail.com"
] |
pedro.gaxpar@gmail.com
|
09572d51a9baebb119cc93587532859f3a45d8fb
|
30a34b3503decf1b4516039df3106cd152631819
|
/4AL17IS006_VISHAK_AMIN/25May20/1.py
|
b6594db024945f9e47661e5fab76b0d0d9017085
|
[] |
no_license
|
alvas-education-foundation/ISE_3rd_Year_Coding_challenge
|
8ddb6c325bf6ab63e2f73d16573fa0b6e2484136
|
b4074cab4a47aad07ed0fa426eacccbfafdef7f8
|
refs/heads/master
| 2022-11-23T20:52:19.204693
| 2020-07-23T11:28:15
| 2020-07-23T11:28:15
| 265,195,514
| 4
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 228
|
py
|
#Codevita Coin Distribution Problem
#
value = int(input())
five = int((value - 4)/5)
if(value-five*5)%2:
one = 2
else:
one = 1
two = int((value - 5*five-1*one)/2)
sum = one + two +five
print(sum,five,two,one)
|
[
"aminvishak@gmail.com"
] |
aminvishak@gmail.com
|
983525aeb3a369cf1bd12f914b3440516b86d99a
|
7950c4faf15ec1dc217391d839ddc21efd174ede
|
/leetcode-cn/0670.0_Maximum_Swap.py
|
33229f1e40f8b51a167f593d9fa16ce58ddebd89
|
[] |
no_license
|
lixiang2017/leetcode
|
f462ecd269c7157aa4f5854f8c1da97ca5375e39
|
f93380721b8383817fe2b0d728deca1321c9ef45
|
refs/heads/master
| 2023-08-25T02:56:58.918792
| 2023-08-22T16:43:36
| 2023-08-22T16:43:36
| 153,090,613
| 5
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 832
|
py
|
'''
执行用时:36 ms, 在所有 Python3 提交中击败了68.71% 的用户
内存消耗:14.9 MB, 在所有 Python3 提交中击败了49.11% 的用户
通过测试用例:111 / 111
'''
class Solution:
def maximumSwap(self, num: int) -> int:
digits = list(str(num))
s_digits = sorted(digits, reverse=True)
if digits == s_digits:
return num
def max_index_after(i):
m = i
for j in range(i + 1, len(digits)):
if digits[j] >= digits[m]:
m = j
return m
n = len(digits)
for i in range(n - 1):
j = max_index_after(i)
if digits[i] < digits[j]:
digits[i], digits[j] = digits[j], digits[i]
break
return int(''.join(digits))
|
[
"838255715@qq.com"
] |
838255715@qq.com
|
f3ead3c08207f5c723c776c4b530e82595d97891
|
0a84236c084e96a95ed99a67b8f20505931232b3
|
/youdao_spider/settings.py
|
1150c06782e62e57239dedf3f31b41c9feed1cd0
|
[] |
no_license
|
hl1227/spider_youdao
|
8d29b2beab2cdde4086e79f4800b1f6016b52b8b
|
c12df7699df69f6d706d6c530eca896fdc1c563d
|
refs/heads/master
| 2023-04-02T22:47:40.858990
| 2021-04-02T02:35:01
| 2021-04-02T02:35:01
| 353,875,479
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,722
|
py
|
# Scrapy settings for youdao_spider project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# https://docs.scrapy.org/en/latest/topics/settings.html
# https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
# https://docs.scrapy.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'youdao_spider'
SPIDER_MODULES = ['youdao_spider.spiders']
NEWSPIDER_MODULE = 'youdao_spider.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'youdao_spider (+http://www.yourdomain.com)'
# Obey robots.txt rules
ROBOTSTXT_OBEY = False
# Configure maximum concurrent requests performed by Scrapy (default: 16)
CONCURRENT_REQUESTS = 64
# Configure a delay for requests for the same website (default: 0)
# See https://docs.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
DOWNLOAD_DELAY = 0.33
# The download delay setting will honor only one of:
CONCURRENT_REQUESTS_PER_DOMAIN = 64
CONCURRENT_REQUESTS_PER_IP = 0
# Disable cookies (enabled by default)
COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
# DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
# }
# Enable or disable spider middlewares
# See https://docs.scrapy.org/en/latest/topics/spider-middleware.html
# SPIDER_MIDDLEWARES = {
# 'youdao_spider.middlewares.YoudaoSpiderSpiderMiddleware': 543,
# }
# Enable or disable downloader middlewares
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
DOWNLOADER_MIDDLEWARES = {
'youdao_spider.middlewares.YoudaoSpiderDownloaderMiddleware': 543,
#'gerapy_proxy.middlewares.ProxyPoolMiddleware':544,
}
# Enable or disable extensions
# See https://docs.scrapy.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See https://docs.scrapy.org/en/latest/topics/item-pipeline.html
ITEM_PIPELINES = {
'youdao_spider.pipelines.YoudaoSpiderPipeline': 300,
}
# Enable and configure the AutoThrottle extension (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
LOG_ENABLED=True
LOG_LEVEL='WARNING'
LOG_ENCODING='utf-8'
HTTPERROR_ALLOWED_CODES = [403,404,407,429,500,502,503,504,505]
#增大这个线程池,默认10
REACTOR_THREADPOOL_MAXSIZE = 20
#开启重试
RETRY_ENABLED = True
#重试次数
RETRY_TIMES:5
#超时
DOWNLOAD_TIMEOUT = 15
#算法方式:
# DEPTH_PRIORITY = 1
# SCHEDULER_DISK_QUEUE = 'scrapy.squeues.PickleFifoDiskQueue'
# SCHEDULER_MEMORY_QUEUE = 'scrapy.squeues.FifoMemoryQueue'
#禁止重定向
REDIRECT_ENABLED = False
#设置错误数关闭爬虫
#CLOSESPIDER_ERRORCOUNT = 10
|
[
"spider@example.com"
] |
spider@example.com
|
ed82d43819a50cc8adfb850789934e1c87866bb5
|
2daa3894e6d6929fd04145100d8a3be5eedbe21c
|
/tests/artificial/transf_sqrt/trend_poly/cycle_0/ar_12/test_artificial_128_sqrt_poly_0_12_100.py
|
d9d41e9db30a39012110217754bd29484e6124f5
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
Henri-Lo/pyaf
|
a1f73a0cc807873bd7b79648fe51de9cfd6c126a
|
08c968425d85dcace974d90db7f07c845a0fe914
|
refs/heads/master
| 2021-07-01T12:27:31.600232
| 2017-09-21T11:19:04
| 2017-09-21T11:19:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 306
|
py
|
import pyaf.Bench.TS_datasets as tsds
import pyaf.tests.artificial.process_artificial_dataset as art
dataset = tsds.generate_random_TS(N = 128 , FREQ = 'D', seed = 0, trendtype = "poly", cycle_length = 0, transform = "sqrt", sigma = 0.0, exog_count = 100, ar_order = 12);
art.process_dataset(dataset);
|
[
"antoine.carme@laposte.net"
] |
antoine.carme@laposte.net
|
a09eae147b7a64aca6fda1554bf806f38342edc4
|
ead4f93f1188adbef0bd5dd015a524256b21e219
|
/API/inventory/forms.py
|
792c7836c322b41e1b8ce80794ae1f1274e98f07
|
[] |
no_license
|
65LimesAH/HalalCuts-API
|
9333cd22c3f38199686f2368527da744dcaa6551
|
8f1dd573b031bfae847d036f2ea3ef7c12472b2a
|
refs/heads/master
| 2023-01-05T16:37:14.826561
| 2020-10-17T23:37:18
| 2020-10-17T23:37:18
| 304,987,714
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 502
|
py
|
from django import forms
from .models import Menu
class MenuItemForm(forms.ModelForm):
class Meta:
model = Menu
fields = [
'hidden',
'name',
'alternateName',
'code',
'sku',
'price',
'unitName',
'cost',
'isRevenue',
'itemGroup',
'priceType',
'taxRate',
'canonical',
'itemStock',
'options',
]
|
[
"Andrew.Heldt@65limes.com"
] |
Andrew.Heldt@65limes.com
|
e4b9ae8070fb64421cd1a17b81be4ca33bd507bd
|
b3b066a566618f49ae83c81e963543a9b956a00a
|
/Intermediate Data Visualization with Seaborn/04_Creating Plots on Data Aware Grids/04_Building a PairGrid.py
|
9a654d32b4feb9d7dc4923d50e47cef330e416b7
|
[] |
no_license
|
ahmed-gharib89/DataCamp_Data_Scientist_with_Python_2020
|
666c4129c3f0b5d759b511529a365dfd36c12f1a
|
f3d20b788c8ef766e7c86c817e6c2ef7b69520b8
|
refs/heads/master
| 2022-12-22T21:09:13.955273
| 2020-09-30T01:16:05
| 2020-09-30T01:16:05
| 289,991,534
| 2
| 0
| null | 2020-08-24T17:15:43
| 2020-08-24T17:15:42
| null |
UTF-8
|
Python
| false
| false
| 1,354
|
py
|
"""
Building a PairGrid
When exploring a dataset, one of the earliest tasks is exploring the relationship between pairs of variables. This step is normally a precursor to additional investigation.
Seaborn supports this pair-wise analysis using the PairGrid. In this exercise, we will look at the Car Insurance Premium data we analyzed in Chapter 1. All data is available in the df variable.
Instructions 1/2
50 XP
1
2
Compare "fatal_collisions" to "premiums" by using a scatter plot mapped to a PairGrid()."""
# Create a PairGrid with a scatter plot for fatal_collisions and premiums
g = sns.PairGrid(df, vars=["fatal_collisions", "premiums"])
g2 = g.map(plt.scatter)
plt.show()
plt.clf()
"""
Create another PairGrid but plot a histogram on the diagonal and scatter plot on the off diagonal.
"""
# Create the same PairGrid but map a histogram on the diag
g = sns.PairGrid(df, vars=["fatal_collisions", "premiums"])
g2 = g.map_diag(plt.hist)
g3 = g2.map_offdiag(plt.scatter)
plt.show()
plt.clf()
#========================================================#
# DEVELOPER #
# BasitAminBhatti #
# Github #
# https://github.com/basitaminbhatti #
#========================================================#
|
[
"Your-Email"
] |
Your-Email
|
bdce9ca6acb87cf1e40299efade42b89dec4c38a
|
9de27e623c85b0d55da4afe4d843fe321b77954d
|
/Configuration/Geometry/python/GeometryDD4hepExtended2026D76_cff.py
|
1905e4de5a22bdb37bd5f0728b7fe57a842f0dc7
|
[
"Apache-2.0"
] |
permissive
|
PFCal-dev/cmssw
|
a97d566d691bc5ac900e48c632f4e87a005f94a2
|
232187f0f8a201210426312b27a1b62e55b6084c
|
refs/heads/hgc-tpg-devel-CMSSW_12_0_0_pre3
| 2022-06-01T08:27:39.166655
| 2021-11-23T15:28:18
| 2021-11-23T15:28:18
| 14,498,276
| 4
| 7
|
Apache-2.0
| 2022-02-08T11:01:38
| 2013-11-18T16:34:32
|
C++
|
UTF-8
|
Python
| false
| false
| 924
|
py
|
import FWCore.ParameterSet.Config as cms
# This config was generated automatically using generate2026Geometry.py
# If you notice a mistake, please update the generating script, not just this config
from Configuration.Geometry.GeometryDD4hep_cff import *
DDDetectorESProducer.confGeomXMLFiles = cms.FileInPath("Geometry/CMSCommonData/data/dd4hep/cmsExtendedGeometry2026D76.xml")
from Geometry.TrackerNumberingBuilder.trackerNumberingGeometry_cfi import *
from Geometry.EcalCommonData.ecalSimulationParameters_cff import *
from Geometry.HcalCommonData.hcalDDDSimConstants_cff import *
from Geometry.HGCalCommonData.hgcalParametersInitialization_cfi import *
from Geometry.HGCalCommonData.hgcalNumberingInitialization_cfi import *
from Geometry.MuonNumbering.muonGeometryConstants_cff import *
from Geometry.MuonNumbering.muonOffsetESProducer_cff import *
from Geometry.MTDNumberingBuilder.mtdNumberingGeometry_cff import *
|
[
"sunanda.banerjee@cern.ch"
] |
sunanda.banerjee@cern.ch
|
d1059b3e7acad5c413ee529d9f6dcd5d530089a0
|
b2135e3fc77666f043f0fbafd0d88ed9865d5b4f
|
/7183 Python Basics/05 Chapter 1.5 - About Lists/01 List basics/78629_06_code.py
|
6793446fdad2714f94b65334724482bacae4071d
|
[] |
no_license
|
Felienne/spea
|
164d05e9fbba82c7b7df8d00295f7157054f9248
|
ecb06c66aaf6a2dced3f141ca415be9efb7dbff5
|
refs/heads/master
| 2020-03-17T17:35:27.302219
| 2018-05-17T10:14:49
| 2018-05-17T10:14:49
| 133,794,299
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 97
|
py
|
# What is the length of an empty list?
nothing = []
length = len(nothing)
assertEqual(length, __)
|
[
"felienne@gmail.com"
] |
felienne@gmail.com
|
41fcaac041d3a6ffed68e1249826d29ae5121a5d
|
54f072c42ab7d93b29c2b40d2d1a922b4df54390
|
/brew_journal/authentication/models.py
|
e484b4449d680e2d66d67f0d0df45ecabce50bfc
|
[
"Apache-2.0"
] |
permissive
|
moonboy13/brew-journal
|
111a2cccbc53f5c3ce41c60ca7f454c1ef97d3e3
|
6d0a31f021755425d420394d84aa7250f86f5ebe
|
refs/heads/master
| 2023-02-19T15:29:32.014798
| 2022-03-25T00:06:44
| 2022-03-25T00:06:44
| 51,627,663
| 0
| 0
|
Apache-2.0
| 2023-02-08T00:43:05
| 2016-02-13T01:53:24
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 1,636
|
py
|
from django.contrib.auth.models import AbstractBaseUser, BaseUserManager
from django.db import models
# Create your models here.
class AccountManager(BaseUserManager):
"""Manager class for handling interactions with the Account model"""
def create_user(self, username, password=None, **kwargs):
if not username:
raise ValueError('Users must have a valid username.')
# TODO: Add graceful validation for the username, since its unique should offer unique names
# upon failure
account = self.model(
username=username, email=self.normalize_email(kwargs.get('email'))
)
account.set_password(password)
account.save()
return account
def create_superuser(self, email, password, **kwargs):
account = self.create_user(email, password, **kwargs)
account.is_admin = True
account.save()
return account
class Account(AbstractBaseUser):
"""Customized User model"""
username = models.CharField(max_length=40, unique=True)
email = models.EmailField(blank=True)
first_name = models.CharField(max_length=40, blank=True)
last_name = models.CharField(max_length=40, blank=True)
is_admin = models.BooleanField(default=False)
is_active = models.BooleanField(default=True)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
objects = AccountManager()
USERNAME_FIELD = 'username'
REQUIRED_FIELDS = ['email']
def __unicode__(self):
return self.username
def get_full_name(self):
return ' '.join([self.first_name, self.last_name])
def get_short_name(self):
return self.first_name
|
[
"conlonkyle@gmail.com"
] |
conlonkyle@gmail.com
|
b0f9b6b001cd1ae84ad5d4eaeb9896f41feb9f81
|
d8a714be08dab3ed0448341985b52cebaa9a9625
|
/train/train.py
|
5289b671842b695ff6af4e7abf866aeb8c10c447
|
[] |
no_license
|
otichibueze/SageMaker_PyTorch
|
e3fdc521c85dd927fe6b93298073283894e358ef
|
9ee1c8f5bc89b3155da632e66bfc74d4d9bc4dbe
|
refs/heads/master
| 2022-11-29T23:34:07.367945
| 2020-08-14T00:01:24
| 2020-08-14T00:01:24
| 287,403,238
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,407
|
py
|
import argparse
import json
import os
import pickle
import sys
import sagemaker_containers
import pandas as pd
import torch
import torch.optim as optim
import torch.utils.data
from model import LSTMClassifier
def model_fn(model_dir):
"""Load the PyTorch model from the `model_dir` directory."""
print("Loading model.")
# First, load the parameters used to create the model.
model_info = {}
model_info_path = os.path.join(model_dir, 'model_info.pth')
with open(model_info_path, 'rb') as f:
model_info = torch.load(f)
print("model_info: {}".format(model_info))
# Determine the device and construct the model.
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = LSTMClassifier(model_info['embedding_dim'], model_info['hidden_dim'], model_info['vocab_size'])
# Load the stored model parameters.
model_path = os.path.join(model_dir, 'model.pth')
with open(model_path, 'rb') as f:
model.load_state_dict(torch.load(f))
# Load the saved word_dict.
word_dict_path = os.path.join(model_dir, 'word_dict.pkl')
with open(word_dict_path, 'rb') as f:
model.word_dict = pickle.load(f)
model.to(device).eval()
print("Done loading model.")
return model
def _get_train_data_loader(batch_size, training_dir):
print("Get train data loader.")
train_data = pd.read_csv(os.path.join(training_dir, "train.csv"), header=None, names=None)
train_y = torch.from_numpy(train_data[[0]].values).float().squeeze()
train_X = torch.from_numpy(train_data.drop([0], axis=1).values).long()
train_ds = torch.utils.data.TensorDataset(train_X, train_y)
return torch.utils.data.DataLoader(train_ds, batch_size=batch_size)
def train(model, train_loader, epochs, optimizer, loss_fn, device):
"""
This is the training method that is called by the PyTorch training script. The parameters
passed are as follows:
model - The PyTorch model that we wish to train.
train_loader - The PyTorch DataLoader that should be used during training.
epochs - The total number of epochs to train for.
optimizer - The optimizer to use during training.
loss_fn - The loss function used for training.
device - Where the model and data should be loaded (gpu or cpu).
"""
# TODO: Paste the train() method developed in the notebook here.
for epoch in range(1, epochs + 1):
model.train()
total_loss = 0
for batch in train_loader:
batch_X, batch_y = batch
batch_X = batch_X.to(device)
batch_y = batch_y.to(device)
# TODO: Complete this train method to train the model provided.
# zero accumulated gradients
model.zero_grad()
# get the output from the model
output = model(batch_X)
# calculate the loss and perform backprop
loss = loss_fn(output.squeeze(), batch_y.float())
loss.backward()
optimizer.step()
total_loss += loss.data.item()
print("Epoch: {}, BCELoss: {}".format(epoch, total_loss / len(train_loader)))
if __name__ == '__main__':
# All of the model parameters and training parameters are sent as arguments when the script
# is executed. Here we set up an argument parser to easily access the parameters.
parser = argparse.ArgumentParser()
# Training Parameters
parser.add_argument('--batch-size', type=int, default=512, metavar='N',
help='input batch size for training (default: 512)')
parser.add_argument('--epochs', type=int, default=10, metavar='N',
help='number of epochs to train (default: 10)')
parser.add_argument('--seed', type=int, default=1, metavar='S',
help='random seed (default: 1)')
# Model Parameters
parser.add_argument('--embedding_dim', type=int, default=32, metavar='N',
help='size of the word embeddings (default: 32)')
parser.add_argument('--hidden_dim', type=int, default=100, metavar='N',
help='size of the hidden dimension (default: 100)')
parser.add_argument('--vocab_size', type=int, default=5000, metavar='N',
help='size of the vocabulary (default: 5000)')
# SageMaker Parameters
parser.add_argument('--hosts', type=list, default=json.loads(os.environ['SM_HOSTS']))
parser.add_argument('--current-host', type=str, default=os.environ['SM_CURRENT_HOST'])
parser.add_argument('--model-dir', type=str, default=os.environ['SM_MODEL_DIR'])
parser.add_argument('--data-dir', type=str, default=os.environ['SM_CHANNEL_TRAINING'])
parser.add_argument('--num-gpus', type=int, default=os.environ['SM_NUM_GPUS'])
args = parser.parse_args()
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print("Using device {}.".format(device))
torch.manual_seed(args.seed)
# Load the training data.
train_loader = _get_train_data_loader(args.batch_size, args.data_dir)
# Build the model.
model = LSTMClassifier(args.embedding_dim, args.hidden_dim, args.vocab_size).to(device)
with open(os.path.join(args.data_dir, "word_dict.pkl"), "rb") as f:
model.word_dict = pickle.load(f)
print("Model loaded with embedding_dim {}, hidden_dim {}, vocab_size {}.".format(
args.embedding_dim, args.hidden_dim, args.vocab_size
))
# Train the model.
optimizer = optim.Adam(model.parameters())
loss_fn = torch.nn.BCELoss()
train(model, train_loader, args.epochs, optimizer, loss_fn, device)
# Save the parameters used to construct the model
model_info_path = os.path.join(args.model_dir, 'model_info.pth')
with open(model_info_path, 'wb') as f:
model_info = {
'embedding_dim': args.embedding_dim,
'hidden_dim': args.hidden_dim,
'vocab_size': args.vocab_size,
}
torch.save(model_info, f)
# Save the word_dict
word_dict_path = os.path.join(args.model_dir, 'word_dict.pkl')
with open(word_dict_path, 'wb') as f:
pickle.dump(model.word_dict, f)
# Save the model parameters
model_path = os.path.join(args.model_dir, 'model.pth')
with open(model_path, 'wb') as f:
torch.save(model.cpu().state_dict(), f)
|
[
"otichibueze@gmail.com"
] |
otichibueze@gmail.com
|
649d76925e81b3a260732ead9f7c2f79e696308d
|
9f387c703dbf4d970d0259424c7b299108c369f5
|
/dd_sdk_1_0/dd_sdk_1_0/api/ddboost_storage_units_api.py
|
9c5ab1a86a2c4d83f0fe5eb6e1108b8fe66de0c0
|
[] |
no_license
|
gcezaralmeida/datadomain_sdk_python
|
c989e6846bae9435c523ab09e230fc12d020f7f1
|
e102ec85cea5d888c8329626892347571832e079
|
refs/heads/main
| 2023-08-23T22:42:47.083754
| 2021-10-25T21:52:49
| 2021-10-25T21:52:49
| 370,805,524
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 60,436
|
py
|
# coding: utf-8
"""
DataDomain Rest API Documentation
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: 1.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from dd_sdk_1_0.api_client import ApiClient
class DdboostStorageUnitsApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def rest_v10_dd_systems_systemid_protocols_ddboost_storage_units_get(self, system_id, **kwargs): # noqa: E501
"""Get DDBoost storage unit information. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.rest_v10_dd_systems_systemid_protocols_ddboost_storage_units_get(system_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str system_id: DD system identifier. @#$type=xs:string (required)
:param str authorization: Clients need to specify Authorization or X-DD-AUTH-TOKEN. @#$type=xs:string
:param str x_dd_auth_token: Clients need to specify Authorization or X-DD-AUTH-TOKEN. @#$type=xs:string
:param int page: page number, starting from 0 @#$type=xs:unsignedInt
:param int size: Paging size @#$type=xs:unsignedInt
:param str sort: sort=\"name,role\". For descending order, prefix the key with a dash (-). Ex: -name @#$type=ddboostStorageUnitSortQuery
:param str filter: filter=\"name=value\". value should be a valid regular expression. @#$type=ddboostStorageUnitFilterQuery
:param str exclude_fields: Comma separated list of fields to be excluded from response object. Required and general fields such as paging will not be excluded. For example, \"msg, status,severity\" for an alert. @#$type=commaSeparatedStrings
:param str include_fields: Comma separated list of fields to be included in response object. For example, \"msg, status,severity\" for an alert. @#$type=commaSeparatedStrings
:return: DdboostStorageUnitInfos
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.rest_v10_dd_systems_systemid_protocols_ddboost_storage_units_get_with_http_info(system_id, **kwargs) # noqa: E501
else:
(data) = self.rest_v10_dd_systems_systemid_protocols_ddboost_storage_units_get_with_http_info(system_id, **kwargs) # noqa: E501
return data
def rest_v10_dd_systems_systemid_protocols_ddboost_storage_units_get_with_http_info(self, system_id, **kwargs): # noqa: E501
"""Get DDBoost storage unit information. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.rest_v10_dd_systems_systemid_protocols_ddboost_storage_units_get_with_http_info(system_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str system_id: DD system identifier. @#$type=xs:string (required)
:param str authorization: Clients need to specify Authorization or X-DD-AUTH-TOKEN. @#$type=xs:string
:param str x_dd_auth_token: Clients need to specify Authorization or X-DD-AUTH-TOKEN. @#$type=xs:string
:param int page: page number, starting from 0 @#$type=xs:unsignedInt
:param int size: Paging size @#$type=xs:unsignedInt
:param str sort: sort=\"name,role\". For descending order, prefix the key with a dash (-). Ex: -name @#$type=ddboostStorageUnitSortQuery
:param str filter: filter=\"name=value\". value should be a valid regular expression. @#$type=ddboostStorageUnitFilterQuery
:param str exclude_fields: Comma separated list of fields to be excluded from response object. Required and general fields such as paging will not be excluded. For example, \"msg, status,severity\" for an alert. @#$type=commaSeparatedStrings
:param str include_fields: Comma separated list of fields to be included in response object. For example, \"msg, status,severity\" for an alert. @#$type=commaSeparatedStrings
:return: DdboostStorageUnitInfos
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['system_id', 'authorization', 'x_dd_auth_token', 'page', 'size', 'sort', 'filter', 'exclude_fields', 'include_fields'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method rest_v10_dd_systems_systemid_protocols_ddboost_storage_units_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'system_id' is set
if self.api_client.client_side_validation and ('system_id' not in params or
params['system_id'] is None): # noqa: E501
raise ValueError("Missing the required parameter `system_id` when calling `rest_v10_dd_systems_systemid_protocols_ddboost_storage_units_get`") # noqa: E501
if self.api_client.client_side_validation and ('x_dd_auth_token' in params and
len(params['x_dd_auth_token']) < 1):
raise ValueError("Invalid value for parameter `x_dd_auth_token` when calling `rest_v10_dd_systems_systemid_protocols_ddboost_storage_units_get`, length must be greater than or equal to `1`") # noqa: E501
if self.api_client.client_side_validation and ('page' in params and params['page'] < 0): # noqa: E501
raise ValueError("Invalid value for parameter `page` when calling `rest_v10_dd_systems_systemid_protocols_ddboost_storage_units_get`, must be a value greater than or equal to `0`") # noqa: E501
if self.api_client.client_side_validation and ('size' in params and params['size'] < 0): # noqa: E501
raise ValueError("Invalid value for parameter `size` when calling `rest_v10_dd_systems_systemid_protocols_ddboost_storage_units_get`, must be a value greater than or equal to `0`") # noqa: E501
if self.api_client.client_side_validation and ('sort' in params and not re.search(r'^(\\s*-?(name)\\s*,)*\\s*-?(name)\\s*$', params['sort'])): # noqa: E501
raise ValueError("Invalid value for parameter `sort` when calling `rest_v10_dd_systems_systemid_protocols_ddboost_storage_units_get`, must conform to the pattern `/^(\\s*-?(name)\\s*,)*\\s*-?(name)\\s*$/`") # noqa: E501
if self.api_client.client_side_validation and ('filter' in params and not re.search(r'^(\\s*(name)\\s*=\\s*(\\S*|(\\([^,]*,[^\\)]*\\))|(\"([^\"]*(\\\")*)*\"))\\s+[aA][nN][dD]\\s+)*\\s*(name)\\s*=\\s*(\\S*|(\\([^,]*,[^\\)]*\\))|(\"([^\"]*(\\\")*)*\"))\\s*$', params['filter'])): # noqa: E501
raise ValueError("Invalid value for parameter `filter` when calling `rest_v10_dd_systems_systemid_protocols_ddboost_storage_units_get`, must conform to the pattern `/^(\\s*(name)\\s*=\\s*(\\S*|(\\([^,]*,[^\\)]*\\))|(\"([^\"]*(\\\")*)*\"))\\s+[aA][nN][dD]\\s+)*\\s*(name)\\s*=\\s*(\\S*|(\\([^,]*,[^\\)]*\\))|(\"([^\"]*(\\\")*)*\"))\\s*$/`") # noqa: E501
if self.api_client.client_side_validation and ('exclude_fields' in params and not re.search(r'^([^,]+,*)+$', params['exclude_fields'])): # noqa: E501
raise ValueError("Invalid value for parameter `exclude_fields` when calling `rest_v10_dd_systems_systemid_protocols_ddboost_storage_units_get`, must conform to the pattern `/^([^,]+,*)+$/`") # noqa: E501
if self.api_client.client_side_validation and ('include_fields' in params and not re.search(r'^([^,]+,*)+$', params['include_fields'])): # noqa: E501
raise ValueError("Invalid value for parameter `include_fields` when calling `rest_v10_dd_systems_systemid_protocols_ddboost_storage_units_get`, must conform to the pattern `/^([^,]+,*)+$/`") # noqa: E501
collection_formats = {}
path_params = {}
if 'system_id' in params:
path_params['SYSTEM-ID'] = params['system_id'] # noqa: E501
query_params = []
if 'page' in params:
query_params.append(('page', params['page'])) # noqa: E501
if 'size' in params:
query_params.append(('size', params['size'])) # noqa: E501
if 'sort' in params:
query_params.append(('sort', params['sort'])) # noqa: E501
if 'filter' in params:
query_params.append(('filter', params['filter'])) # noqa: E501
if 'exclude_fields' in params:
query_params.append(('exclude_fields', params['exclude_fields'])) # noqa: E501
if 'include_fields' in params:
query_params.append(('include_fields', params['include_fields'])) # noqa: E501
header_params = {}
if 'authorization' in params:
header_params['Authorization'] = params['authorization'] # noqa: E501
if 'x_dd_auth_token' in params:
header_params['X-DD-AUTH-TOKEN'] = params['x_dd_auth_token'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/xml']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/rest/v1.0/dd-systems/{SYSTEM-ID}/protocols/ddboost/storage-units', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='DdboostStorageUnitInfos', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def rest_v10_dd_systems_systemid_protocols_ddboost_storage_units_id_delete(self, system_id, id, **kwargs): # noqa: E501
"""Delete a Storage Unit. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.rest_v10_dd_systems_systemid_protocols_ddboost_storage_units_id_delete(system_id, id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str system_id: DD system identifier. @#$type=xs:string (required)
:param str id: ddboost storage units identifier. @#$type=xs:string (required)
:param str authorization: Clients need to specify Authorization or X-DD-AUTH-TOKEN. @#$type=xs:string
:param str x_dd_auth_token: Clients need to specify Authorization or X-DD-AUTH-TOKEN. @#$type=xs:string
:return: ServiceStatus
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.rest_v10_dd_systems_systemid_protocols_ddboost_storage_units_id_delete_with_http_info(system_id, id, **kwargs) # noqa: E501
else:
(data) = self.rest_v10_dd_systems_systemid_protocols_ddboost_storage_units_id_delete_with_http_info(system_id, id, **kwargs) # noqa: E501
return data
def rest_v10_dd_systems_systemid_protocols_ddboost_storage_units_id_delete_with_http_info(self, system_id, id, **kwargs): # noqa: E501
"""Delete a Storage Unit. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.rest_v10_dd_systems_systemid_protocols_ddboost_storage_units_id_delete_with_http_info(system_id, id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str system_id: DD system identifier. @#$type=xs:string (required)
:param str id: ddboost storage units identifier. @#$type=xs:string (required)
:param str authorization: Clients need to specify Authorization or X-DD-AUTH-TOKEN. @#$type=xs:string
:param str x_dd_auth_token: Clients need to specify Authorization or X-DD-AUTH-TOKEN. @#$type=xs:string
:return: ServiceStatus
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['system_id', 'id', 'authorization', 'x_dd_auth_token'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method rest_v10_dd_systems_systemid_protocols_ddboost_storage_units_id_delete" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'system_id' is set
if self.api_client.client_side_validation and ('system_id' not in params or
params['system_id'] is None): # noqa: E501
raise ValueError("Missing the required parameter `system_id` when calling `rest_v10_dd_systems_systemid_protocols_ddboost_storage_units_id_delete`") # noqa: E501
# verify the required parameter 'id' is set
if self.api_client.client_side_validation and ('id' not in params or
params['id'] is None): # noqa: E501
raise ValueError("Missing the required parameter `id` when calling `rest_v10_dd_systems_systemid_protocols_ddboost_storage_units_id_delete`") # noqa: E501
if self.api_client.client_side_validation and ('x_dd_auth_token' in params and
len(params['x_dd_auth_token']) < 1):
raise ValueError("Invalid value for parameter `x_dd_auth_token` when calling `rest_v10_dd_systems_systemid_protocols_ddboost_storage_units_id_delete`, length must be greater than or equal to `1`") # noqa: E501
collection_formats = {}
path_params = {}
if 'system_id' in params:
path_params['SYSTEM-ID'] = params['system_id'] # noqa: E501
if 'id' in params:
path_params['ID'] = params['id'] # noqa: E501
query_params = []
header_params = {}
if 'authorization' in params:
header_params['Authorization'] = params['authorization'] # noqa: E501
if 'x_dd_auth_token' in params:
header_params['X-DD-AUTH-TOKEN'] = params['x_dd_auth_token'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/xml']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/rest/v1.0/dd-systems/{SYSTEM-ID}/protocols/ddboost/storage-units/{ID}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ServiceStatus', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def rest_v10_dd_systems_systemid_protocols_ddboost_storage_units_id_get(self, system_id, id, **kwargs): # noqa: E501
"""Get a DDBoost storage unit. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.rest_v10_dd_systems_systemid_protocols_ddboost_storage_units_id_get(system_id, id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str system_id: DD system identifier. @#$type=xs:string (required)
:param str id: ddboost storage units identifier. @#$type=xs:string (required)
:param str authorization: Clients need to specify Authorization or X-DD-AUTH-TOKEN. @#$type=xs:string
:param str x_dd_auth_token: Clients need to specify Authorization or X-DD-AUTH-TOKEN. @#$type=xs:string
:param str exclude_fields: Comma separated list of fields to be excluded from response object. Required and general fields such as paging will not be excluded. For example, \"msg, status,severity\" for an alert. @#$type=commaSeparatedStrings
:param str include_fields: Comma separated list of fields to be included in response object. For example, \"msg, status,severity\" for an alert. @#$type=commaSeparatedStrings
:return: DdboostStorageUnitInfoDetail
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.rest_v10_dd_systems_systemid_protocols_ddboost_storage_units_id_get_with_http_info(system_id, id, **kwargs) # noqa: E501
else:
(data) = self.rest_v10_dd_systems_systemid_protocols_ddboost_storage_units_id_get_with_http_info(system_id, id, **kwargs) # noqa: E501
return data
def rest_v10_dd_systems_systemid_protocols_ddboost_storage_units_id_get_with_http_info(self, system_id, id, **kwargs): # noqa: E501
"""Get a DDBoost storage unit. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.rest_v10_dd_systems_systemid_protocols_ddboost_storage_units_id_get_with_http_info(system_id, id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str system_id: DD system identifier. @#$type=xs:string (required)
:param str id: ddboost storage units identifier. @#$type=xs:string (required)
:param str authorization: Clients need to specify Authorization or X-DD-AUTH-TOKEN. @#$type=xs:string
:param str x_dd_auth_token: Clients need to specify Authorization or X-DD-AUTH-TOKEN. @#$type=xs:string
:param str exclude_fields: Comma separated list of fields to be excluded from response object. Required and general fields such as paging will not be excluded. For example, \"msg, status,severity\" for an alert. @#$type=commaSeparatedStrings
:param str include_fields: Comma separated list of fields to be included in response object. For example, \"msg, status,severity\" for an alert. @#$type=commaSeparatedStrings
:return: DdboostStorageUnitInfoDetail
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['system_id', 'id', 'authorization', 'x_dd_auth_token', 'exclude_fields', 'include_fields'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method rest_v10_dd_systems_systemid_protocols_ddboost_storage_units_id_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'system_id' is set
if self.api_client.client_side_validation and ('system_id' not in params or
params['system_id'] is None): # noqa: E501
raise ValueError("Missing the required parameter `system_id` when calling `rest_v10_dd_systems_systemid_protocols_ddboost_storage_units_id_get`") # noqa: E501
# verify the required parameter 'id' is set
if self.api_client.client_side_validation and ('id' not in params or
params['id'] is None): # noqa: E501
raise ValueError("Missing the required parameter `id` when calling `rest_v10_dd_systems_systemid_protocols_ddboost_storage_units_id_get`") # noqa: E501
if self.api_client.client_side_validation and ('x_dd_auth_token' in params and
len(params['x_dd_auth_token']) < 1):
raise ValueError("Invalid value for parameter `x_dd_auth_token` when calling `rest_v10_dd_systems_systemid_protocols_ddboost_storage_units_id_get`, length must be greater than or equal to `1`") # noqa: E501
if self.api_client.client_side_validation and ('exclude_fields' in params and not re.search(r'^([^,]+,*)+$', params['exclude_fields'])): # noqa: E501
raise ValueError("Invalid value for parameter `exclude_fields` when calling `rest_v10_dd_systems_systemid_protocols_ddboost_storage_units_id_get`, must conform to the pattern `/^([^,]+,*)+$/`") # noqa: E501
if self.api_client.client_side_validation and ('include_fields' in params and not re.search(r'^([^,]+,*)+$', params['include_fields'])): # noqa: E501
raise ValueError("Invalid value for parameter `include_fields` when calling `rest_v10_dd_systems_systemid_protocols_ddboost_storage_units_id_get`, must conform to the pattern `/^([^,]+,*)+$/`") # noqa: E501
collection_formats = {}
path_params = {}
if 'system_id' in params:
path_params['SYSTEM-ID'] = params['system_id'] # noqa: E501
if 'id' in params:
path_params['ID'] = params['id'] # noqa: E501
query_params = []
if 'exclude_fields' in params:
query_params.append(('exclude_fields', params['exclude_fields'])) # noqa: E501
if 'include_fields' in params:
query_params.append(('include_fields', params['include_fields'])) # noqa: E501
header_params = {}
if 'authorization' in params:
header_params['Authorization'] = params['authorization'] # noqa: E501
if 'x_dd_auth_token' in params:
header_params['X-DD-AUTH-TOKEN'] = params['x_dd_auth_token'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/xml']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/rest/v1.0/dd-systems/{SYSTEM-ID}/protocols/ddboost/storage-units/{ID}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='DdboostStorageUnitInfoDetail', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def rest_v10_dd_systems_systemid_protocols_ddboost_storage_units_id_put(self, system_id, id, ddboost_storage_unit_modify, **kwargs): # noqa: E501
"""Modify a Storage Unit. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.rest_v10_dd_systems_systemid_protocols_ddboost_storage_units_id_put(system_id, id, ddboost_storage_unit_modify, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str system_id: DD system identifier. @#$type=xs:string (required)
:param str id: ddboost storage units identifier. @#$type=xs:string (required)
:param DdboostStorageUnitModify ddboost_storage_unit_modify: (required)
:param str authorization: Clients need to specify Authorization or X-DD-AUTH-TOKEN. @#$type=xs:string
:param str x_dd_auth_token: Clients need to specify Authorization or X-DD-AUTH-TOKEN. @#$type=xs:string
:return: DdboostStorageUnitInfo
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.rest_v10_dd_systems_systemid_protocols_ddboost_storage_units_id_put_with_http_info(system_id, id, ddboost_storage_unit_modify, **kwargs) # noqa: E501
else:
(data) = self.rest_v10_dd_systems_systemid_protocols_ddboost_storage_units_id_put_with_http_info(system_id, id, ddboost_storage_unit_modify, **kwargs) # noqa: E501
return data
def rest_v10_dd_systems_systemid_protocols_ddboost_storage_units_id_put_with_http_info(self, system_id, id, ddboost_storage_unit_modify, **kwargs): # noqa: E501
"""Modify a Storage Unit. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.rest_v10_dd_systems_systemid_protocols_ddboost_storage_units_id_put_with_http_info(system_id, id, ddboost_storage_unit_modify, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str system_id: DD system identifier. @#$type=xs:string (required)
:param str id: ddboost storage units identifier. @#$type=xs:string (required)
:param DdboostStorageUnitModify ddboost_storage_unit_modify: (required)
:param str authorization: Clients need to specify Authorization or X-DD-AUTH-TOKEN. @#$type=xs:string
:param str x_dd_auth_token: Clients need to specify Authorization or X-DD-AUTH-TOKEN. @#$type=xs:string
:return: DdboostStorageUnitInfo
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['system_id', 'id', 'ddboost_storage_unit_modify', 'authorization', 'x_dd_auth_token'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method rest_v10_dd_systems_systemid_protocols_ddboost_storage_units_id_put" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'system_id' is set
if self.api_client.client_side_validation and ('system_id' not in params or
params['system_id'] is None): # noqa: E501
raise ValueError("Missing the required parameter `system_id` when calling `rest_v10_dd_systems_systemid_protocols_ddboost_storage_units_id_put`") # noqa: E501
# verify the required parameter 'id' is set
if self.api_client.client_side_validation and ('id' not in params or
params['id'] is None): # noqa: E501
raise ValueError("Missing the required parameter `id` when calling `rest_v10_dd_systems_systemid_protocols_ddboost_storage_units_id_put`") # noqa: E501
# verify the required parameter 'ddboost_storage_unit_modify' is set
if self.api_client.client_side_validation and ('ddboost_storage_unit_modify' not in params or
params['ddboost_storage_unit_modify'] is None): # noqa: E501
raise ValueError("Missing the required parameter `ddboost_storage_unit_modify` when calling `rest_v10_dd_systems_systemid_protocols_ddboost_storage_units_id_put`") # noqa: E501
if self.api_client.client_side_validation and ('x_dd_auth_token' in params and
len(params['x_dd_auth_token']) < 1):
raise ValueError("Invalid value for parameter `x_dd_auth_token` when calling `rest_v10_dd_systems_systemid_protocols_ddboost_storage_units_id_put`, length must be greater than or equal to `1`") # noqa: E501
collection_formats = {}
path_params = {}
if 'system_id' in params:
path_params['SYSTEM-ID'] = params['system_id'] # noqa: E501
if 'id' in params:
path_params['ID'] = params['id'] # noqa: E501
query_params = []
header_params = {}
if 'authorization' in params:
header_params['Authorization'] = params['authorization'] # noqa: E501
if 'x_dd_auth_token' in params:
header_params['X-DD-AUTH-TOKEN'] = params['x_dd_auth_token'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
if 'ddboost_storage_unit_modify' in params:
body_params = params['ddboost_storage_unit_modify']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/xml']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/xml', 'application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/rest/v1.0/dd-systems/{SYSTEM-ID}/protocols/ddboost/storage-units/{ID}', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='DdboostStorageUnitInfo', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def rest_v10_dd_systems_systemid_protocols_ddboost_storage_units_post(self, system_id, ddboost_storage_unit_create, **kwargs): # noqa: E501
"""Create a Storage Unit. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.rest_v10_dd_systems_systemid_protocols_ddboost_storage_units_post(system_id, ddboost_storage_unit_create, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str system_id: DD system identifier. @#$type=xs:string (required)
:param DdboostStorageUnitCreate ddboost_storage_unit_create: (required)
:param str authorization: Clients need to specify Authorization or X-DD-AUTH-TOKEN. @#$type=xs:string
:param str x_dd_auth_token: Clients need to specify Authorization or X-DD-AUTH-TOKEN. @#$type=xs:string
:return: DdboostStorageUnitInfo
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.rest_v10_dd_systems_systemid_protocols_ddboost_storage_units_post_with_http_info(system_id, ddboost_storage_unit_create, **kwargs) # noqa: E501
else:
(data) = self.rest_v10_dd_systems_systemid_protocols_ddboost_storage_units_post_with_http_info(system_id, ddboost_storage_unit_create, **kwargs) # noqa: E501
return data
def rest_v10_dd_systems_systemid_protocols_ddboost_storage_units_post_with_http_info(self, system_id, ddboost_storage_unit_create, **kwargs): # noqa: E501
"""Create a Storage Unit. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.rest_v10_dd_systems_systemid_protocols_ddboost_storage_units_post_with_http_info(system_id, ddboost_storage_unit_create, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str system_id: DD system identifier. @#$type=xs:string (required)
:param DdboostStorageUnitCreate ddboost_storage_unit_create: (required)
:param str authorization: Clients need to specify Authorization or X-DD-AUTH-TOKEN. @#$type=xs:string
:param str x_dd_auth_token: Clients need to specify Authorization or X-DD-AUTH-TOKEN. @#$type=xs:string
:return: DdboostStorageUnitInfo
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['system_id', 'ddboost_storage_unit_create', 'authorization', 'x_dd_auth_token'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method rest_v10_dd_systems_systemid_protocols_ddboost_storage_units_post" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'system_id' is set
if self.api_client.client_side_validation and ('system_id' not in params or
params['system_id'] is None): # noqa: E501
raise ValueError("Missing the required parameter `system_id` when calling `rest_v10_dd_systems_systemid_protocols_ddboost_storage_units_post`") # noqa: E501
# verify the required parameter 'ddboost_storage_unit_create' is set
if self.api_client.client_side_validation and ('ddboost_storage_unit_create' not in params or
params['ddboost_storage_unit_create'] is None): # noqa: E501
raise ValueError("Missing the required parameter `ddboost_storage_unit_create` when calling `rest_v10_dd_systems_systemid_protocols_ddboost_storage_units_post`") # noqa: E501
if self.api_client.client_side_validation and ('x_dd_auth_token' in params and
len(params['x_dd_auth_token']) < 1):
raise ValueError("Invalid value for parameter `x_dd_auth_token` when calling `rest_v10_dd_systems_systemid_protocols_ddboost_storage_units_post`, length must be greater than or equal to `1`") # noqa: E501
collection_formats = {}
path_params = {}
if 'system_id' in params:
path_params['SYSTEM-ID'] = params['system_id'] # noqa: E501
query_params = []
header_params = {}
if 'authorization' in params:
header_params['Authorization'] = params['authorization'] # noqa: E501
if 'x_dd_auth_token' in params:
header_params['X-DD-AUTH-TOKEN'] = params['x_dd_auth_token'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
if 'ddboost_storage_unit_create' in params:
body_params = params['ddboost_storage_unit_create']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/xml']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/xml', 'application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/rest/v1.0/dd-systems/{SYSTEM-ID}/protocols/ddboost/storage-units', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='DdboostStorageUnitInfo', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def rest_v20_dd_systems_systemid_protocols_ddboost_storage_units_id_get(self, system_id, id, **kwargs): # noqa: E501
"""Get a DDBoost storage unit. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.rest_v20_dd_systems_systemid_protocols_ddboost_storage_units_id_get(system_id, id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str system_id: DD system identifier. @#$type=xs:string (required)
:param str id: ddboost storage units identifier. @#$type=xs:string (required)
:param str authorization: Clients need to specify Authorization or X-DD-AUTH-TOKEN. @#$type=xs:string
:param str x_dd_auth_token: Clients need to specify Authorization or X-DD-AUTH-TOKEN. @#$type=xs:string
:param str exclude_fields: Comma separated list of fields to be excluded from response object. Required and general fields such as paging will not be excluded. For example, \"msg, status,severity\" for an alert. @#$type=commaSeparatedStrings
:param str include_fields: Comma separated list of fields to be included in response object. For example, \"msg, status,severity\" for an alert. @#$type=commaSeparatedStrings
:return: DdboostStorageUnitInfoDetail20
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.rest_v20_dd_systems_systemid_protocols_ddboost_storage_units_id_get_with_http_info(system_id, id, **kwargs) # noqa: E501
else:
(data) = self.rest_v20_dd_systems_systemid_protocols_ddboost_storage_units_id_get_with_http_info(system_id, id, **kwargs) # noqa: E501
return data
def rest_v20_dd_systems_systemid_protocols_ddboost_storage_units_id_get_with_http_info(self, system_id, id, **kwargs): # noqa: E501
"""Get a DDBoost storage unit. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.rest_v20_dd_systems_systemid_protocols_ddboost_storage_units_id_get_with_http_info(system_id, id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str system_id: DD system identifier. @#$type=xs:string (required)
:param str id: ddboost storage units identifier. @#$type=xs:string (required)
:param str authorization: Clients need to specify Authorization or X-DD-AUTH-TOKEN. @#$type=xs:string
:param str x_dd_auth_token: Clients need to specify Authorization or X-DD-AUTH-TOKEN. @#$type=xs:string
:param str exclude_fields: Comma separated list of fields to be excluded from response object. Required and general fields such as paging will not be excluded. For example, \"msg, status,severity\" for an alert. @#$type=commaSeparatedStrings
:param str include_fields: Comma separated list of fields to be included in response object. For example, \"msg, status,severity\" for an alert. @#$type=commaSeparatedStrings
:return: DdboostStorageUnitInfoDetail20
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['system_id', 'id', 'authorization', 'x_dd_auth_token', 'exclude_fields', 'include_fields'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method rest_v20_dd_systems_systemid_protocols_ddboost_storage_units_id_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'system_id' is set
if self.api_client.client_side_validation and ('system_id' not in params or
params['system_id'] is None): # noqa: E501
raise ValueError("Missing the required parameter `system_id` when calling `rest_v20_dd_systems_systemid_protocols_ddboost_storage_units_id_get`") # noqa: E501
# verify the required parameter 'id' is set
if self.api_client.client_side_validation and ('id' not in params or
params['id'] is None): # noqa: E501
raise ValueError("Missing the required parameter `id` when calling `rest_v20_dd_systems_systemid_protocols_ddboost_storage_units_id_get`") # noqa: E501
if self.api_client.client_side_validation and ('x_dd_auth_token' in params and
len(params['x_dd_auth_token']) < 1):
raise ValueError("Invalid value for parameter `x_dd_auth_token` when calling `rest_v20_dd_systems_systemid_protocols_ddboost_storage_units_id_get`, length must be greater than or equal to `1`") # noqa: E501
if self.api_client.client_side_validation and ('exclude_fields' in params and not re.search(r'^([^,]+,*)+$', params['exclude_fields'])): # noqa: E501
raise ValueError("Invalid value for parameter `exclude_fields` when calling `rest_v20_dd_systems_systemid_protocols_ddboost_storage_units_id_get`, must conform to the pattern `/^([^,]+,*)+$/`") # noqa: E501
if self.api_client.client_side_validation and ('include_fields' in params and not re.search(r'^([^,]+,*)+$', params['include_fields'])): # noqa: E501
raise ValueError("Invalid value for parameter `include_fields` when calling `rest_v20_dd_systems_systemid_protocols_ddboost_storage_units_id_get`, must conform to the pattern `/^([^,]+,*)+$/`") # noqa: E501
collection_formats = {}
path_params = {}
if 'system_id' in params:
path_params['SYSTEM-ID'] = params['system_id'] # noqa: E501
if 'id' in params:
path_params['ID'] = params['id'] # noqa: E501
query_params = []
if 'exclude_fields' in params:
query_params.append(('exclude_fields', params['exclude_fields'])) # noqa: E501
if 'include_fields' in params:
query_params.append(('include_fields', params['include_fields'])) # noqa: E501
header_params = {}
if 'authorization' in params:
header_params['Authorization'] = params['authorization'] # noqa: E501
if 'x_dd_auth_token' in params:
header_params['X-DD-AUTH-TOKEN'] = params['x_dd_auth_token'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/xml']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/rest/v2.0/dd-systems/{SYSTEM-ID}/protocols/ddboost/storage-units/{ID}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='DdboostStorageUnitInfoDetail20', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def rest_v20_dd_systems_systemid_protocols_ddboost_storage_units_id_put(self, system_id, id, ddboost_storage_unit_modify, **kwargs): # noqa: E501
"""Modify a Storage Unit. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.rest_v20_dd_systems_systemid_protocols_ddboost_storage_units_id_put(system_id, id, ddboost_storage_unit_modify, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str system_id: DD system identifier. @#$type=xs:string (required)
:param str id: ddboost storage units identifier. @#$type=xs:string (required)
:param DdboostStorageUnitModify ddboost_storage_unit_modify: (required)
:param str authorization: Clients need to specify Authorization or X-DD-AUTH-TOKEN. @#$type=xs:string
:param str x_dd_auth_token: Clients need to specify Authorization or X-DD-AUTH-TOKEN. @#$type=xs:string
:return: DdboostStorageUnitInfoDetail20
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.rest_v20_dd_systems_systemid_protocols_ddboost_storage_units_id_put_with_http_info(system_id, id, ddboost_storage_unit_modify, **kwargs) # noqa: E501
else:
(data) = self.rest_v20_dd_systems_systemid_protocols_ddboost_storage_units_id_put_with_http_info(system_id, id, ddboost_storage_unit_modify, **kwargs) # noqa: E501
return data
def rest_v20_dd_systems_systemid_protocols_ddboost_storage_units_id_put_with_http_info(self, system_id, id, ddboost_storage_unit_modify, **kwargs): # noqa: E501
"""Modify a Storage Unit. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.rest_v20_dd_systems_systemid_protocols_ddboost_storage_units_id_put_with_http_info(system_id, id, ddboost_storage_unit_modify, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str system_id: DD system identifier. @#$type=xs:string (required)
:param str id: ddboost storage units identifier. @#$type=xs:string (required)
:param DdboostStorageUnitModify ddboost_storage_unit_modify: (required)
:param str authorization: Clients need to specify Authorization or X-DD-AUTH-TOKEN. @#$type=xs:string
:param str x_dd_auth_token: Clients need to specify Authorization or X-DD-AUTH-TOKEN. @#$type=xs:string
:return: DdboostStorageUnitInfoDetail20
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['system_id', 'id', 'ddboost_storage_unit_modify', 'authorization', 'x_dd_auth_token'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method rest_v20_dd_systems_systemid_protocols_ddboost_storage_units_id_put" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'system_id' is set
if self.api_client.client_side_validation and ('system_id' not in params or
params['system_id'] is None): # noqa: E501
raise ValueError("Missing the required parameter `system_id` when calling `rest_v20_dd_systems_systemid_protocols_ddboost_storage_units_id_put`") # noqa: E501
# verify the required parameter 'id' is set
if self.api_client.client_side_validation and ('id' not in params or
params['id'] is None): # noqa: E501
raise ValueError("Missing the required parameter `id` when calling `rest_v20_dd_systems_systemid_protocols_ddboost_storage_units_id_put`") # noqa: E501
# verify the required parameter 'ddboost_storage_unit_modify' is set
if self.api_client.client_side_validation and ('ddboost_storage_unit_modify' not in params or
params['ddboost_storage_unit_modify'] is None): # noqa: E501
raise ValueError("Missing the required parameter `ddboost_storage_unit_modify` when calling `rest_v20_dd_systems_systemid_protocols_ddboost_storage_units_id_put`") # noqa: E501
if self.api_client.client_side_validation and ('x_dd_auth_token' in params and
len(params['x_dd_auth_token']) < 1):
raise ValueError("Invalid value for parameter `x_dd_auth_token` when calling `rest_v20_dd_systems_systemid_protocols_ddboost_storage_units_id_put`, length must be greater than or equal to `1`") # noqa: E501
collection_formats = {}
path_params = {}
if 'system_id' in params:
path_params['SYSTEM-ID'] = params['system_id'] # noqa: E501
if 'id' in params:
path_params['ID'] = params['id'] # noqa: E501
query_params = []
header_params = {}
if 'authorization' in params:
header_params['Authorization'] = params['authorization'] # noqa: E501
if 'x_dd_auth_token' in params:
header_params['X-DD-AUTH-TOKEN'] = params['x_dd_auth_token'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
if 'ddboost_storage_unit_modify' in params:
body_params = params['ddboost_storage_unit_modify']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/xml']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/xml', 'application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/rest/v2.0/dd-systems/{SYSTEM-ID}/protocols/ddboost/storage-units/{ID}', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='DdboostStorageUnitInfoDetail20', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def rest_v20_dd_systems_systemid_protocols_ddboost_storage_units_post(self, system_id, ddboost_storage_unit_create, **kwargs): # noqa: E501
"""Create a Storage Unit. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.rest_v20_dd_systems_systemid_protocols_ddboost_storage_units_post(system_id, ddboost_storage_unit_create, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str system_id: DD system identifier. @#$type=xs:string (required)
:param DdboostStorageUnitCreate ddboost_storage_unit_create: (required)
:param str authorization: Clients need to specify Authorization or X-DD-AUTH-TOKEN. @#$type=xs:string
:param str x_dd_auth_token: Clients need to specify Authorization or X-DD-AUTH-TOKEN. @#$type=xs:string
:return: DdboostStorageUnitInfoDetail20
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.rest_v20_dd_systems_systemid_protocols_ddboost_storage_units_post_with_http_info(system_id, ddboost_storage_unit_create, **kwargs) # noqa: E501
else:
(data) = self.rest_v20_dd_systems_systemid_protocols_ddboost_storage_units_post_with_http_info(system_id, ddboost_storage_unit_create, **kwargs) # noqa: E501
return data
def rest_v20_dd_systems_systemid_protocols_ddboost_storage_units_post_with_http_info(self, system_id, ddboost_storage_unit_create, **kwargs): # noqa: E501
"""Create a Storage Unit. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.rest_v20_dd_systems_systemid_protocols_ddboost_storage_units_post_with_http_info(system_id, ddboost_storage_unit_create, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str system_id: DD system identifier. @#$type=xs:string (required)
:param DdboostStorageUnitCreate ddboost_storage_unit_create: (required)
:param str authorization: Clients need to specify Authorization or X-DD-AUTH-TOKEN. @#$type=xs:string
:param str x_dd_auth_token: Clients need to specify Authorization or X-DD-AUTH-TOKEN. @#$type=xs:string
:return: DdboostStorageUnitInfoDetail20
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['system_id', 'ddboost_storage_unit_create', 'authorization', 'x_dd_auth_token'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method rest_v20_dd_systems_systemid_protocols_ddboost_storage_units_post" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'system_id' is set
if self.api_client.client_side_validation and ('system_id' not in params or
params['system_id'] is None): # noqa: E501
raise ValueError("Missing the required parameter `system_id` when calling `rest_v20_dd_systems_systemid_protocols_ddboost_storage_units_post`") # noqa: E501
# verify the required parameter 'ddboost_storage_unit_create' is set
if self.api_client.client_side_validation and ('ddboost_storage_unit_create' not in params or
params['ddboost_storage_unit_create'] is None): # noqa: E501
raise ValueError("Missing the required parameter `ddboost_storage_unit_create` when calling `rest_v20_dd_systems_systemid_protocols_ddboost_storage_units_post`") # noqa: E501
if self.api_client.client_side_validation and ('x_dd_auth_token' in params and
len(params['x_dd_auth_token']) < 1):
raise ValueError("Invalid value for parameter `x_dd_auth_token` when calling `rest_v20_dd_systems_systemid_protocols_ddboost_storage_units_post`, length must be greater than or equal to `1`") # noqa: E501
collection_formats = {}
path_params = {}
if 'system_id' in params:
path_params['SYSTEM-ID'] = params['system_id'] # noqa: E501
query_params = []
header_params = {}
if 'authorization' in params:
header_params['Authorization'] = params['authorization'] # noqa: E501
if 'x_dd_auth_token' in params:
header_params['X-DD-AUTH-TOKEN'] = params['x_dd_auth_token'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
if 'ddboost_storage_unit_create' in params:
body_params = params['ddboost_storage_unit_create']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/xml']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/xml', 'application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/rest/v2.0/dd-systems/{SYSTEM-ID}/protocols/ddboost/storage-units', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='DdboostStorageUnitInfoDetail20', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
|
[
"root@s6006st157.petrobras.biz"
] |
root@s6006st157.petrobras.biz
|
bc47d286fda4479958fbd49dd8f596957c627662
|
d83118503614bb83ad8edb72dda7f449a1226f8b
|
/src/dprj/platinumegg/app/cabaret/views/application/scoutevent/resultanim.py
|
e2efa0cc4796d7c81b9432230cea05603a9db449
|
[] |
no_license
|
hitandaway100/caba
|
686fe4390e182e158cd9714c90024a082deb8c69
|
492bf477ac00c380f2b2758c86b46aa7e58bbad9
|
refs/heads/master
| 2021-08-23T05:59:28.910129
| 2017-12-03T19:03:15
| 2017-12-03T19:03:15
| 112,512,044
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,483
|
py
|
# -*- coding: utf-8 -*-
from platinumegg.app.cabaret.util.cabareterror import CabaretError
from platinumegg.app.cabaret.util.api import BackendApi
import settings
from platinumegg.app.cabaret.util.url_maker import UrlMaker
from platinumegg.app.cabaret.views.application.scoutevent.base import ScoutHandler
import urllib
from defines import Defines
import settings_sub
class Handler(ScoutHandler):
"""スカウト結果.
引数:
実行したスカウトのID.
確認キー.
結果のindex.
"""
@classmethod
def getViewerPlayerClassList(cls):
return []
def process(self):
self.__swf_params = {}
args = self.getUrlArgs('/sceventresultanim/')
try:
stageid = int(args.get(0))
scoutkey = urllib.unquote(args.get(1))
index = int(args.get(2) or 0)
except:
raise CabaretError(u'引数が不正です', CabaretError.Code.ILLEGAL_ARGS)
v_player = self.getViewerPlayer()
model_mgr = self.getModelMgr()
using = settings.DB_READONLY
flag_skip = BackendApi.get_scoutskip_flag(v_player.id)
eventmaster = BackendApi.get_current_scouteventmaster(model_mgr, using=using)
if eventmaster is None:
raise CabaretError(u'Event Closed.', CabaretError.Code.EVENT_CLOSED)
mid = eventmaster.id
# 進行情報.
playdata = BackendApi.get_event_playdata(model_mgr, mid, v_player.id, using)
if playdata and playdata.confirmkey == scoutkey:
# DBからとり直すべき.
playdata = BackendApi.get_event_playdata(model_mgr, mid, v_player.id, using=settings.DB_DEFAULT, reflesh=True)
if playdata is None or playdata.alreadykey != scoutkey:
if settings_sub.IS_LOCAL:
raise CabaretError(u'キーが正しくありません %s vs %s' % (playdata.alreadykey if playdata else 'None', scoutkey))
url = self.makeAppLinkUrlRedirect(UrlMaker.scoutevent())
self.appRedirect(url)
return
eventlist = playdata.result.get('event', [])[index:]
if not eventlist:
raise CabaretError(u'引数が不正です', CabaretError.Code.ILLEGAL_ARGS)
table = {
Defines.ScoutEventType.COMPLETE : (self.procComplete, False),
Defines.ScoutEventType.LEVELUP : (self.procLevelup, True),
Defines.ScoutEventType.HAPPENING : (self.procHappening, True),
}
proc = None
next_event = None
for idx, event in enumerate(eventlist):
next_event = eventlist[idx+1] if (idx + 1) < len(eventlist) else None
tmp = table.get(event.get_type(), None)
if tmp is None:
index += idx
break
tmp_proc, is_skipok = tmp
if flag_skip and is_skipok:
continue
index += idx
proc = tmp_proc
break
if not proc:
url = UrlMaker.scouteventresult(stageid, scoutkey)
self.appRedirect(self.makeAppLinkUrlRedirect(url))
return
if next_event and table.has_key(next_event.get_type()):
url = UrlMaker.scouteventresultanim(stageid, scoutkey, index+1)
else:
url = UrlMaker.scouteventresult(stageid, scoutkey)
self.__swf_params['backUrl'] = self.makeAppLinkUrl(url)
self.__playdata = playdata
proc(event)
def procComplete(self, event):
"""スカウト完了演出.
"""
self.__swf_params['text'] = Defines.EffectTextFormat.SCOUTRESULT_COMPLETE_TEXT
self.appRedirectToEffect('scoutclear/effect.html', self.__swf_params)
def procLevelup(self, event):
"""レベルアップ演出.
"""
resulttexts = []
# レベル情報.
resulttexts.append(Defines.EffectTextFormat.LEVELUP_STATUSTEXT % event.level)
self.__swf_params['statusText'] = u'\n'.join(resulttexts)
self.appRedirectToEffect('levelup/effect.html', self.__swf_params)
def procHappening(self, event):
"""ハプニング発生演出.
"""
self.appRedirectToEffect('chohutokyaku/effect.html', self.__swf_params)
def main(request):
return Handler.run(request)
|
[
"shangye@mail.com"
] |
shangye@mail.com
|
0566b1988b7ad3de1036a4e79ec4999bc4696341
|
149c303569ecc2a1c4a4107e24d2b4bc50e88952
|
/twenty.py
|
7b082bd3e792bd6041c17ba160e9958aa1dc45c6
|
[
"MIT"
] |
permissive
|
wogsland/think-python
|
8ed86ab96a36f5b766c17dff3fdff868f5f8a188
|
a192676c8c5e5d1d96950418facf6cf132b433b5
|
refs/heads/master
| 2020-05-23T09:28:07.661953
| 2017-03-17T16:49:16
| 2017-03-17T16:49:16
| 84,760,062
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 110
|
py
|
moby = open('words.txt')
for line in moby:
word = line.strip()
if len(word) >= 20:
print word
|
[
"bradley@wogsland.org"
] |
bradley@wogsland.org
|
6e6f3dccdbcc5e1215398c4c2605d64ab759adb7
|
4cdf4e243891c0aa0b99dd5ee84f09a7ed6dd8c8
|
/python/decorator/12.py
|
88fcefe3e80c692e7cf23963349684364a407982
|
[
"MIT"
] |
permissive
|
gozeon/code-collections
|
464986c7765df5dca980ac5146b847416b750998
|
13f07176a6c7b6ac13586228cec4c1e2ed32cae4
|
refs/heads/master
| 2023-08-17T18:53:24.189958
| 2023-08-10T04:52:47
| 2023-08-10T04:52:47
| 99,432,793
| 1
| 0
|
NOASSERTION
| 2020-07-17T09:25:44
| 2017-08-05T15:56:53
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 395
|
py
|
#coding=utf-8
# -*- coding=utf-8 -*-
from functools import wraps
def my_decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
'''decorator'''
print('Calling decorated function...')
return func(*args, **kwargs)
return wrapper
@my_decorator
def example():
"""Docstring"""
print('Called example function')
print(example.__name__, example.__doc__)
|
[
"goze.qiu@gmail.com"
] |
goze.qiu@gmail.com
|
c257cf6d72eb6e72a9dda74373bd50cd90131f5e
|
15e4e8909d22ecfcf60d2dab03137c3f8672dd5c
|
/ex28.py
|
5a038f8dc0623981f6321063e62b1d7aa193a9eb
|
[] |
no_license
|
RimikaM/python-practice
|
08b578b4eeec786b4d71d0db6e152fec69e09b94
|
a5cbb6c32b31172d1076aec7a0b14c699d0e8da9
|
refs/heads/master
| 2021-09-01T08:12:34.153218
| 2017-12-25T23:59:11
| 2017-12-25T23:59:11
| 111,735,272
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,293
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Fri Dec 1 00:14:42 2017
@author: rimikamajumdar
"""
# Exercise 28: Boolean Practice
# remember: any 'and' expression that has a False is immediately False!
print("1", True and True) #True
print("2", False and True) #False
print("3", 1 == 1 and 2 == 1) #True and False -> False
print("4", "test" == "test") #True
print("5", 1 == 1 or 2 != 1) #True or False -> True
print("6", True and 1 == 1) #True
print("7", False and 0 != 0) #False and False -> False
print("8", True or 1 == 1) #True
print("9", "test" == "testing") #False
print("10", 1 != 0 and 2 == 1) #True and False -> False
print("11", "test" != "testing") #True
print("12", "test" == 1) #False
print("13", not (True and False)) #not False -> True
print("14", not (1 == 1 and 0 != 1)) #not True -> False
print("15", not (10 == 1 or 1000 == 1000)) #not True -> False
print("16", not (1 != 10 or 3 == 4)) #not True -> False
print("17", not ("testing" == "testing" and "Rimika" == "awesome")) #not False -> True
print("18", 1==1 and (not ("testing" == 1 or 1 == 0))) #True and (not False) -> True
print("19", "chunky" == "bacon" and (not (3 == 4 or 3 == 3))) #False and (not True) -> False
print("20", 3 == 3 and (not ("testing" == "testing" or "Python" == "Fun"))) #True and (not True) -> False
|
[
"rimikamaju@gmail.com"
] |
rimikamaju@gmail.com
|
d319a777775a0ffe11718c976c9346769e3aa48d
|
528045c017eb0374042392fb8b97937e9d374f9a
|
/electrum_sum/gui/qt/console.py
|
f1763282e11149a5b8d65e6920024f506830658e
|
[
"MIT"
] |
permissive
|
sumcoinlabs/electrum-sum
|
44211905157cf71ce1bd32c9a8d5b4e5cb096579
|
96589ca2cdc2b570da07fc135232b6809b89a3f1
|
refs/heads/master
| 2023-01-19T00:04:30.740271
| 2020-11-22T19:57:15
| 2020-11-22T19:57:15
| 226,197,023
| 3
| 3
|
MIT
| 2020-09-21T03:00:03
| 2019-12-05T22:05:29
|
Python
|
UTF-8
|
Python
| false
| false
| 11,822
|
py
|
# source: http://stackoverflow.com/questions/2758159/how-to-embed-a-python-interpreter-in-a-pyqt-widget
import sys
import os
import re
import traceback
from PyQt5 import QtCore
from PyQt5 import QtGui
from PyQt5 import QtWidgets
from electrum_sum import util
from electrum_sum.i18n import _
from .util import MONOSPACE_FONT
class OverlayLabel(QtWidgets.QLabel):
STYLESHEET = '''
QLabel, QLabel link {
color: rgb(0, 0, 0);
background-color: rgb(248, 240, 200);
border: 1px solid;
border-color: rgb(255, 114, 47);
padding: 2px;
}
'''
def __init__(self, text, parent):
super().__init__(text, parent)
self.setMinimumHeight(150)
self.setGeometry(0, 0, self.width(), self.height())
self.setStyleSheet(self.STYLESHEET)
self.setMargin(0)
parent.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)
self.setWordWrap(True)
def mousePressEvent(self, e):
self.hide()
def on_resize(self, w):
padding = 2 # px, from the stylesheet above
self.setFixedWidth(w - padding)
class Console(QtWidgets.QPlainTextEdit):
def __init__(self, prompt='>> ', startup_message='', parent=None):
QtWidgets.QPlainTextEdit.__init__(self, parent)
self.prompt = prompt
self.history = []
self.namespace = {}
self.construct = []
self.setGeometry(50, 75, 600, 400)
self.setWordWrapMode(QtGui.QTextOption.WrapAnywhere)
self.setUndoRedoEnabled(False)
self.document().setDefaultFont(QtGui.QFont(MONOSPACE_FONT, 10, QtGui.QFont.Normal))
self.showMessage(startup_message)
self.updateNamespace({'run':self.run_script})
self.set_json(False)
warning_text = "<h1>{}</h1><br>{}<br><br>{}".format(
_("Warning!"),
_("Do not paste code here that you don't understand. Executing the wrong code could lead "
"to your coins being irreversibly lost."),
_("Click here to hide this message.")
)
self.messageOverlay = OverlayLabel(warning_text, self)
def resizeEvent(self, e):
super().resizeEvent(e)
vertical_scrollbar_width = self.verticalScrollBar().width() * self.verticalScrollBar().isVisible()
self.messageOverlay.on_resize(self.width() - vertical_scrollbar_width)
def set_json(self, b):
self.is_json = b
def run_script(self, filename):
with open(filename) as f:
script = f.read()
# eval is generally considered bad practice. use it wisely!
result = eval(script, self.namespace, self.namespace)
def updateNamespace(self, namespace):
self.namespace.update(namespace)
def showMessage(self, message):
self.appendPlainText(message)
self.newPrompt()
def clear(self):
self.setPlainText('')
self.newPrompt()
def newPrompt(self):
if self.construct:
prompt = '.' * len(self.prompt)
else:
prompt = self.prompt
self.completions_pos = self.textCursor().position()
self.completions_visible = False
self.appendPlainText(prompt)
self.moveCursor(QtGui.QTextCursor.End)
def getCommand(self):
doc = self.document()
curr_line = doc.findBlockByLineNumber(doc.lineCount() - 1).text()
curr_line = curr_line.rstrip()
curr_line = curr_line[len(self.prompt):]
return curr_line
def setCommand(self, command):
if self.getCommand() == command:
return
doc = self.document()
curr_line = doc.findBlockByLineNumber(doc.lineCount() - 1).text()
self.moveCursor(QtGui.QTextCursor.End)
for i in range(len(curr_line) - len(self.prompt)):
self.moveCursor(QtGui.QTextCursor.Left, QtGui.QTextCursor.KeepAnchor)
self.textCursor().removeSelectedText()
self.textCursor().insertText(command)
self.moveCursor(QtGui.QTextCursor.End)
def show_completions(self, completions):
if self.completions_visible:
self.hide_completions()
c = self.textCursor()
c.setPosition(self.completions_pos)
completions = map(lambda x: x.split('.')[-1], completions)
t = '\n' + ' '.join(completions)
if len(t) > 500:
t = t[:500] + '...'
c.insertText(t)
self.completions_end = c.position()
self.moveCursor(QtGui.QTextCursor.End)
self.completions_visible = True
def hide_completions(self):
if not self.completions_visible:
return
c = self.textCursor()
c.setPosition(self.completions_pos)
l = self.completions_end - self.completions_pos
for x in range(l): c.deleteChar()
self.moveCursor(QtGui.QTextCursor.End)
self.completions_visible = False
def getConstruct(self, command):
if self.construct:
prev_command = self.construct[-1]
self.construct.append(command)
if not prev_command and not command:
ret_val = '\n'.join(self.construct)
self.construct = []
return ret_val
else:
return ''
else:
if command and command[-1] == (':'):
self.construct.append(command)
return ''
else:
return command
def getHistory(self):
return self.history
def setHisory(self, history):
self.history = history
def addToHistory(self, command):
if command[0:1] == ' ':
return
if command and (not self.history or self.history[-1] != command):
self.history.append(command)
self.history_index = len(self.history)
def getPrevHistoryEntry(self):
if self.history:
self.history_index = max(0, self.history_index - 1)
return self.history[self.history_index]
return ''
def getNextHistoryEntry(self):
if self.history:
hist_len = len(self.history)
self.history_index = min(hist_len, self.history_index + 1)
if self.history_index < hist_len:
return self.history[self.history_index]
return ''
def getCursorPosition(self):
c = self.textCursor()
return c.position() - c.block().position() - len(self.prompt)
def setCursorPosition(self, position):
self.moveCursor(QtGui.QTextCursor.StartOfLine)
for i in range(len(self.prompt) + position):
self.moveCursor(QtGui.QTextCursor.Right)
def register_command(self, c, func):
methods = { c: func}
self.updateNamespace(methods)
def runCommand(self):
command = self.getCommand()
self.addToHistory(command)
command = self.getConstruct(command)
if command:
tmp_stdout = sys.stdout
class stdoutProxy():
def __init__(self, write_func):
self.write_func = write_func
self.skip = False
def flush(self):
pass
def write(self, text):
if not self.skip:
stripped_text = text.rstrip('\n')
self.write_func(stripped_text)
QtCore.QCoreApplication.processEvents()
self.skip = not self.skip
if type(self.namespace.get(command)) == type(lambda:None):
self.appendPlainText("'{}' is a function. Type '{}()' to use it in the Python console."
.format(command, command))
self.newPrompt()
return
sys.stdout = stdoutProxy(self.appendPlainText)
try:
try:
# eval is generally considered bad practice. use it wisely!
result = eval(command, self.namespace, self.namespace)
if result is not None:
if self.is_json:
util.print_msg(util.json_encode(result))
else:
self.appendPlainText(repr(result))
except SyntaxError:
# exec is generally considered bad practice. use it wisely!
exec(command, self.namespace, self.namespace)
except SystemExit:
self.close()
except BaseException:
traceback_lines = traceback.format_exc().split('\n')
# Remove traceback mentioning this file, and a linebreak
for i in (3,2,1,-1):
traceback_lines.pop(i)
self.appendPlainText('\n'.join(traceback_lines))
sys.stdout = tmp_stdout
self.newPrompt()
self.set_json(False)
def keyPressEvent(self, event):
if event.key() == QtCore.Qt.Key_Tab:
self.completions()
return
self.hide_completions()
if event.key() in (QtCore.Qt.Key_Enter, QtCore.Qt.Key_Return):
self.runCommand()
return
if event.key() == QtCore.Qt.Key_Home:
self.setCursorPosition(0)
return
if event.key() == QtCore.Qt.Key_PageUp:
return
elif event.key() in (QtCore.Qt.Key_Left, QtCore.Qt.Key_Backspace):
if self.getCursorPosition() == 0:
return
elif event.key() == QtCore.Qt.Key_Up:
self.setCommand(self.getPrevHistoryEntry())
return
elif event.key() == QtCore.Qt.Key_Down:
self.setCommand(self.getNextHistoryEntry())
return
elif event.key() == QtCore.Qt.Key_L and event.modifiers() == QtCore.Qt.ControlModifier:
self.clear()
super(Console, self).keyPressEvent(event)
def completions(self):
cmd = self.getCommand()
# note for regex: new words start after ' ' or '(' or ')'
lastword = re.split(r'[ ()]', cmd)[-1]
beginning = cmd[0:-len(lastword)]
path = lastword.split('.')
prefix = '.'.join(path[:-1])
prefix = (prefix + '.') if prefix else prefix
ns = self.namespace.keys()
if len(path) == 1:
ns = ns
else:
assert len(path) > 1
obj = self.namespace.get(path[0])
try:
for attr in path[1:-1]:
obj = getattr(obj, attr)
except AttributeError:
ns = []
else:
ns = dir(obj)
completions = []
for name in ns:
if name[0] == '_':continue
if name.startswith(path[-1]):
completions.append(prefix+name)
completions.sort()
if not completions:
self.hide_completions()
elif len(completions) == 1:
self.hide_completions()
self.setCommand(beginning + completions[0])
else:
# find common prefix
p = os.path.commonprefix(completions)
if len(p)>len(lastword):
self.hide_completions()
self.setCommand(beginning + p)
else:
self.show_completions(completions)
welcome_message = '''
---------------------------------------------------------------
Welcome to a primitive Python interpreter.
---------------------------------------------------------------
'''
if __name__ == '__main__':
app = QtWidgets.QApplication(sys.argv)
console = Console(startup_message=welcome_message)
console.updateNamespace({'myVar1' : app, 'myVar2' : 1234})
console.show()
sys.exit(app.exec_())
|
[
"kristian@beyonddata.llc"
] |
kristian@beyonddata.llc
|
ade10f3b6717fd934044a0c9eb6b07cb95b52817
|
8a2469c011133a71b19026dcd065ca2df13bea5c
|
/code/utils/make_smooth_graphs.py
|
35e0d9f6672c51695d44dc6ab13b74cec2da6184
|
[] |
no_license
|
pigriver123/project-theta
|
41a08b84123e412d31c8348fd632c4924e6e21a5
|
35d2758aad54a14fc437c9eb77cf4c50da27e9c9
|
refs/heads/master
| 2021-01-14T08:46:54.011978
| 2015-12-02T00:00:25
| 2015-12-02T00:00:25
| 47,159,910
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,006
|
py
|
#import numpy as np
import matplotlib.pyplot as plt
import nibabel as nib
import smooth_gaussian
#possibly put in os function to specify path later
#this file was more of a test, but since can easily adapt it into a more useful
#script i left it in utils
img = nib.load("../../data/ds005/sub001/BOLD/task001_run001/bold.nii.gz")
data = img.get_data()
#just plot a random slice to see if works
spatial2 = smooth_gaussian.smooth_spatial(data, fwhm = 5)[:, :, 16, 50]
spatial3 = smooth_gaussian.smooth_spatial(data,fwhm = 4)[:, :, 16, 50]
spatial4 = smooth_gaussian.smooth_spatial(data,fwhm = 3)[:, :, 16, 50]
f, (ax1, ax2, ax3, ax4) = plt.subplots(4, sharex = True, sharey = True)
ax1.imshow(data[:, :, 16, 50], cmap = 'gray')
ax1.set_title('Original')
ax2.imshow(spatial2, cmap = 'gray')
ax2.set_title('fwhm = 5 mm')
ax3.imshow(spatial3, cmap = 'gray')
ax3.set_title('fwhm = 4 mm')
ax4.imshow(spatial4, cmap = 'gray')
ax4.set_title('fwhm = 3mm')
plt.savefig("smoothed_images.png")
plt.close()
|
[
"brianqiu2000@gmail.com"
] |
brianqiu2000@gmail.com
|
1d7a8eece1e29b7452c9e39513c25e6cffdabbf2
|
fb97539728891fa10c6546f2c6c1b6b9beafa62f
|
/logger.py
|
676de8a8b4f4613bd2596c4a626a58a0bf0a9250
|
[
"MIT"
] |
permissive
|
sile16/swarm
|
f905da838a0532202be3ee6c7dfa88ba1ecbc431
|
4c7f7b33b189d520ced5f25164909dff74bd8785
|
refs/heads/master
| 2016-08-09T07:14:02.304963
| 2016-04-07T17:04:08
| 2016-04-07T17:04:08
| 55,651,273
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,054
|
py
|
import zmq
import sys
import common
import time
import argparse
import json
import datetime
import curses
stdscr = None
class JobLogger():
def __init__(self,args):
#Get client IP
logger_ip_port = "{}:{}".format(common.get_client_ip(args.server), common.logging_port)
#Setup logging Port
self.context = zmq.Context()
socket_logs = self.context.socket(zmq.PULL)
socket_logs.bind('tcp://*:{}'.format(common.logging_port))
#Setup to receive published messages from server
socket_sub = self.context.socket(zmq.SUB)
socket_sub.connect("tcp://{}:{}".format(args.server, common.server_pub_port))
socket_sub.setsockopt(zmq.SUBSCRIBE,'worker')
socket_sub.setsockopt(zmq.SUBSCRIBE,'logger')
#Setup to push messages to Server
socket_req = self.context.socket(zmq.REQ)
socket_req.connect("tcp://{}:{}".format(args.server, common.server_port))
self.socket_sub = socket_sub
self.socket_logs = socket_logs
self.socket_req = socket_req
self.logger_ip_port = logger_ip_port
self.state = 'waiting'
print ("logger starting with ip:{}".format(logger_ip_port))
self.notify_server()
self.job = None
self.workers = None
self.heartbeats = {}
self.finished_workers = set()
def notify_server(self):
msg = {'cmd':'new_logger','logger': self.logger_ip_port}
self.socket_req.send_json(msg)
self.socket_req.recv()
def process_server_control(self,topic,msg):
print("Received control Topic: {} msg: {}".format(topic, msg))
if topic == 'worker' and 'cmd' in msg:
if msg['cmd'] == 'init':
self.job = common.job_loggers[msg['job']]
self.heartbeats = {w:datetime.datetime.now() for w in msg['workers']}
self.workers = msg['workers']
self.finished_workers = set()
self.job.init(msg)
self.state = 'running'
elif msg['cmd'] == 'finish':
self.state = 'finish'
def process_log(self,msg):
if msg['type'] == 'heartbeat':
self.heartbeats[msg['worker']] = datetime.datetime.now()
elif msg['type'] == 'finished':
self.finished_workers.add(msg['worker'])
if self.job:
self.job.process_log(msg)
else:
print("log: {}".format(msg))
def finish(self):
self.job.finish()
def main_loop(self):
#setup polling device
poller = zmq.Poller()
poller.register(self.socket_sub,zmq.POLLIN)
poller.register(self.socket_logs,zmq.POLLIN)
print('logger starting loop ')
should_continue = True
start = datetime.datetime.now()
last_log = datetime.datetime.now()
while should_continue:
socks = dict(poller.poll(100))
if self.socket_sub in socks and socks[self.socket_sub] == zmq.POLLIN:
message = self.socket_sub.recv()
topic = message.split()[0]
msg = json.loads(message[len(topic)+1:])
self.process_server_control(topic,msg)
if self.socket_logs in socks and socks[self.socket_logs] == zmq.POLLIN:
msg = self.socket_logs.recv_json()
self.process_log(msg)
last_log = datetime.datetime.now()
now = datetime.datetime.now()
if self.state == 'waiting' and (now-start) > datetime.timedelta(seconds=5):
print("Annoucing to server")
self.notify_server()
start = datetime.datetime.now()
elif self.state == 'finish' and (now-last_log) > datetime.timedelta(seconds=5):
last_log = now
for worker in self.workers:
if worker not in self.finished_workers:
if (now - self.heartbeats[worker]) > datetime.timedelta(seconds=30):
#worker stopped sending heartbeats. Mark as dead
self.finished_workers.add(worker)
print("worker dead: {}".format(worker))
#Once all workers are either finished or timedout we are done:
if len(self.finished_workers) >= len(self.workers):
#we are done
self.state = 'waiting'
print("Finished! going to waiting state")
self.job.finish()
msg = {'cmd':'logger_finished','logger': self.logger_ip_port}
self.socket_req.send_json(msg)
self.socket_req.recv()
def main():
#stdscr = curses_screen
#stdscr.clear()
parser = argparse.ArgumentParser()
parser.add_argument("-s",'--server',help='Server IP',required=True)
args = parser.parse_args()
JobLogger(args).main_loop()
if __name__ == "__main__":
#curses.wrapper(main)
main()
|
[
"roberm3@usxxroberm3m3.corp.emc.com"
] |
roberm3@usxxroberm3m3.corp.emc.com
|
c45d32151f0d66c973fb5bacb9a5461f3ec29116
|
e72f7ea804658d52f85df39852e02429340bdff1
|
/BalloonPrediction/Processing_WKT.py
|
6b5046e88c5519e62173d743fbbbb6e31988a13d
|
[
"MIT"
] |
permissive
|
UMDBPP/BalloonPrediction
|
986cda80e8e2577deff0acb9419b352547336288
|
975517301c9692b8bd4607fbf6435a75f7b78846
|
refs/heads/master
| 2018-12-18T23:57:24.223335
| 2018-09-15T02:45:30
| 2018-09-15T02:45:30
| 118,663,238
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,418
|
py
|
"""
Created on May 21, 2018
@author: Zach
"""
from BalloonPrediction import CUSFPredictionAPI
def json_to_csv_line(query_json, launch_location_name, predict_id):
query_prediction = query_json['prediction']
print(f'Using dataset {query_json["request"]["dataset"].replace("-", "").replace(":", "")[0:13]}')
points = []
# add field values and make points for each entry
for stage in query_prediction:
for entry in stage['trajectory']:
# convert to [-180, 180] longitude
entry['longitude'] = entry['longitude'] - 360
point_entry = (entry['longitude'], entry['latitude'], entry['altitude'])
# add point to array
points.append(point_entry)
polyline_wkt = f'POLYLINE ({",".join([" ".join(entry) for entry in points])})'
csv_line = ','.join([launch_location_name,
polyline_wkt, query_json['request']['launch_datetime'],
query_json['request']['dataset'],
query_json['request']['launch_longitude'],
query_json['request']['launch_latitude'],
query_json['request']['launch_altitude'],
query_json['request']['ascent_rate'],
query_json['request']['burst_altitude'],
query_json['request']['descent_rate']
])
return csv_line
columns = ['Name', 'geom', 'Launch_Time', 'Dataset', 'Launch_Longitude', 'Launch_Latitude', 'Launch_Altitude_m',
'Ascent_Rate_m_s', 'Burst_Altitude_m', 'Descent_Rate_m_s']
def write_polylines_csv(output_filename, launch_datetime, launch_locations):
with open(output_filename, 'w') as output_text_file:
output_text_file.write('')
# set predict Id
current_predict_id = 1
# populate fields for each launch location predict
for name, launch_location in launch_locations.items():
print(f'Getting prediction for {name}')
query_json = CUSFPredictionAPI.request_prediction(launch_longitude=launch_location[0],
launch_latitude=launch_location[1],
launch_datetime=launch_datetime)
output_text_file.write(json_to_csv_line(query_json, name, current_predict_id))
|
[
"zrb@umd.edu"
] |
zrb@umd.edu
|
0fd43a2c2cdcb3f73eb8f6335dc0f94817910e1f
|
7febd7836c9336de22aaf984ab8bca8755be3c99
|
/Rodis Rosado Dictionary world map.py
|
337c451cd5182228dba3fd9e907bea776b6719d6
|
[] |
no_license
|
Mouse10/CSE
|
0d1ec4c3409841ea8ec6e9fe0705ea6f26675e7d
|
9f2b7f00285ad09620f515014964a0da2f6e606a
|
refs/heads/master
| 2020-04-02T07:51:01.200083
| 2019-05-21T20:20:35
| 2019-05-21T20:20:35
| 154,217,558
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 959
|
py
|
world_map = {
"R19A": {
"NAME": "Mr. Wibe's room",
"DESCRIPTION": "This is the room that you are in.",
"PATHS": {
"NORTH": "PARKING_LOT"
}
},
"PARKING_LOT": {
"NAME": "A Parking lot",
"DESCRIPTION": "There are a few cars parked here.",
"PATHS": {
"SOUTH": "R19A"
}
}
}
# other Variables
directions = ["NORTH", "SOUTH", "EAST", "WEST", "UP", "DOWN"]
current_node = world_map["R19A"] # This is your current location
playing = True
# Controller
while playing:
print(current_node["NAME"])
command = input(">_")
if command.lower() in ["q", "quit", "exit"]:
playing = False
elif command in directions:
try:
room_name = current_node["PATHS"][command]
current_node = world_map[room_name]
except KeyError:
print("I can't go that way")
else
print("Command not recognized.")
|
[
"44381333+Mouse10@users.noreply.github.com"
] |
44381333+Mouse10@users.noreply.github.com
|
03b61e4f9d40fb2fc54015b86d22a76cbb0dc213
|
00eb6e6245072243b65c13967d54f986beb084ad
|
/template.cgi
|
d592b12c3f2d69f70c63c8a23d2b2732c2d0a05f
|
[] |
no_license
|
mesodiar/web-infor
|
3af06a17753e4dbc8b89ff7e252f53728eb41dfe
|
0e869128dffe6fb19142face67797069a6135875
|
refs/heads/master
| 2020-05-21T05:57:20.200993
| 2017-05-26T03:23:19
| 2017-05-26T03:23:19
| 84,582,490
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 515
|
cgi
|
#!/usr/local/bin/python3
import cgi
def htmlTop():
print("""Content-type:text/html\n\n
<!DOCTYPE html>
<html lang="en">
<head>
<meta chardet="utf-8"/>
<title> My server-side template </title>
</head>
<body>""")
def htmlTail():
print("""</body>
</html>""")
#main program
if __name__ == "__main__":
try:
htmlTop()
htmlTail()
except:
cgi.print_exception()
|
[
"mils@prontomarketing.com"
] |
mils@prontomarketing.com
|
3bb2bda63bb05e17d287b72bc50bda27aba736b4
|
18fe3f034f203bc8a22d08f15b29297ebcc7dfaf
|
/py/qlazypy/lib/densop_mcx.py
|
e7a3b8c9e9d186908e9c3aa923a516079358fdfc
|
[
"Apache-2.0"
] |
permissive
|
katou-boop/qlazy
|
b8802c48b0cba0ba89cc1e1a69f551e0f4fdcc73
|
6b62fff65939a589603af7ed8be921c9f1669bb3
|
refs/heads/master
| 2023-02-17T12:30:05.419650
| 2021-01-17T23:20:20
| 2021-01-17T23:20:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 917
|
py
|
# -*- coding: utf-8 -*-
from qlazypy.error import *
from qlazypy.config import *
from qlazypy.util import *
# multi-controlled X gate
def __gray_code(de, n):
for k in range(2**n):
yield k^(k>>1)
def densop_mcx(de,qid=[]):
# controled and target register
qid_ctr = qid[:-1]
qid_tar = qid[-1]
# hadamard
de.h(qid_tar)
# controlled-RZ(psi), psi=pi/(2**(bitnum-1))
bitnum = len(qid_ctr)
psi = 1.0/(2**(bitnum-1)) # unit=pi(radian)
gray_pre = 0
for gray in __gray_code(de, bitnum):
if gray == 0:
continue
msb = len(str(bin(gray)))-3
chb = len(str(bin(gray^gray_pre)))-3
if gray != 1:
if chb == msb:
chb -= 1
de.cx(qid_ctr[chb], qid_ctr[msb])
de.cp(qid_ctr[msb], qid_tar, phase=psi)
psi = -psi
gray_pre = gray
# hadamard
de.h(qid_tar)
|
[
"saminriver33@gmail.com"
] |
saminriver33@gmail.com
|
8e578b5c5e911fbe8995ba795536316e66e5a61b
|
0ee72dc1b03070e25d3036bf6b562fc9b809ee72
|
/freeze/__init__.py
|
18f5610c310bc0963162bfdbec6dfe13797a4bdd
|
[
"MIT"
] |
permissive
|
fabiocaccamo/django-freeze
|
d36a9c7a9e197b23fa63dc77f0901aba89e4dfaf
|
c2d5dfbf38b072d79e1a37489b07e91c8af9461c
|
refs/heads/main
| 2023-08-29T12:50:19.069297
| 2023-07-18T07:35:52
| 2023-07-18T07:35:52
| 44,330,200
| 91
| 19
|
MIT
| 2023-09-08T13:52:25
| 2015-10-15T16:20:55
|
Python
|
UTF-8
|
Python
| false
| false
| 269
|
py
|
from freeze.metadata import (
__author__,
__copyright__,
__description__,
__license__,
__title__,
__version__,
)
__all__ = [
"__author__",
"__copyright__",
"__description__",
"__license__",
"__title__",
"__version__",
]
|
[
"fabio.caccamo@gmail.com"
] |
fabio.caccamo@gmail.com
|
7d3349c97fd726093461d218e60254f5301ebfd8
|
37bbe6a81b2d565c311ed79b37bea95bf7f6cc66
|
/summarization/transmission/extract_transmission.py
|
7653c82780e4be1595d9bb60dca01d9a9a2b2c99
|
[] |
no_license
|
krudra/epidemic_classification_summarization_2019
|
7a52bdbd66ffce0035699450ca2868040a5ab0e4
|
12b472670a7f7598c0845bb10f9a65c64f13724d
|
refs/heads/master
| 2020-09-20T15:27:43.811963
| 2019-11-27T21:50:06
| 2019-11-27T21:50:06
| 224,518,670
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,577
|
py
|
# - *- coding: utf- 8 - *-
#!/usr/bin/python2
import sys
import os
import random
import json
import re
import codecs
import string
import networkx as nx
from operator import itemgetter
from happyfuntokenizing import *
from nltk.corpus import stopwords
from textblob import *
from nltk.stem.wordnet import WordNetLemmatizer
import aspell
import numpy as np
import gzip
import pickle
ASPELL = aspell.Speller('lang', 'en')
cachedstopwords = stopwords.words("english") # English Stop Words
Tagger_Path = 'SET_YOUR_PATH/ark-tweet-nlp-0.3.2/' # Twitter pos tagger path
lmtzr = WordNetLemmatizer() # Lemmatizer
def negatedContextCount(s):
negation = re.compile("(?:^(?:never|no|nothing|nowhere|noone|none|not|havent|hasnt|hadnt|cant|couldnt|shouldnt|wont|wouldnt|dont|doesnt|didnt|isnt|arent|aint)$)|n't")
clauseLevelPunctuation = re.compile("^[.:;!?]$")
tok = Tokenizer(preserve_case=False)
tokenized = tok.tokenize(s)
count= 0
for token in tokenized :
if negation.search(token) :
for t in tokenized[tokenized.index(token) :] :
if clauseLevelPunctuation.search(t) :
break
count+=1
break
if count>=1:
return 1
return count
####################################################################
# Inputs:
# 1. ifname_parsed: Parsed tweets from transmission category (Tweebo parser)
# 2. positive_ofname: Output file for medium responsible for transmission
# 3. negative_ofname: Output file for medium not responsible for transmission
####################################################################
def transmission_summarization(ifname_parsed,positive_ofname,negative_ofname):
TAGREJECT = ['U','@','#','~','E','~',',']
fp = open(ifname_parsed,'r')
medium = {}
medium_neg = {}
pos_tweet = set([])
neg_tweet = set([])
dic = {}
for l in fp:
wl = l.split('\t')
if len(wl)==8:
seq = int(wl[0])
word = wl[1].strip(' #\t\n\r').lower()
tag = wl[4].strip(' \t\n\r')
dep = wl[6].strip(' \t\n\r')
if dep=='_':
dep = int(wl[7].strip(' \t\n\r'))
else:
dep = int(wl[6])
if tag=='N':
try:
w = lmtzr.lemmatize(word)
word = w.lower()
except Exception as e:
pass
elif tag=='V':
try:
w = Word(word.lower())
x = w.lemmatize("v")
except Exception as e:
x = word.lower()
word = x.lower()
else:
pass
temp = [word,tag,dep]
dic[seq] = temp
else:
temp = dic.keys()
temp.sort()
G = nx.Graph()
for x in temp:
G.add_node(x)
for x in temp:
dep = dic[x][2]
if dep!=-1 and dep!=0 and dic[x][1] not in TAGREJECT:
G.add_edge(dep,x)
temp = sorted(nx.connected_components(G), key = len, reverse=True)
for i in range(0,len(temp),1):
comp = temp[i]
flag = 0
TR = []
for x in comp:
if dic[x][0]=='transmission' or dic[x][0]=='transmit':
TR.append(x)
NEGCON = 0
if len(TR)>0:
s = ''
for x in comp:
s = s + dic[x][0] + ' '
s = s.strip(' ')
NEGCON = negatedContextCount(s)
for x in comp:
if dic[x][1]=='N' and ASPELL.check(dic[x][0])==1 and len(dic[x][0])>2:
shp = []
for y in TR:
if x!=y:
shp.append(nx.shortest_path_length(G,source=x,target=y))
shp.sort()
if len(shp)>0:
if shp[0]<=2:
if NEGCON==0:
pos_tweet.add(s)
if medium.__contains__(dic[x][0])==True:
v = medium[dic[x][0]]
v+=1
medium[dic[x][0]] = v
else:
medium[dic[x][0]] = 1
else:
neg_tweet.add(s)
if medium_neg.__contains__(dic[x][0])==True:
v = medium_neg[dic[x][0]]
v+=1
medium_neg[dic[x][0]] = v
else:
medium_neg[dic[x][0]] = 1
dic = {}
fp.close()
###### Rank medium ######
temp = []
for k,v in medium.iteritems():
temp.append((k,v))
temp.sort(key=itemgetter(1),reverse=True)
fo = open(positive_ofname,'w')
for x in temp:
fo.write(x + '\n')
fo.close()
###### Rank negative medium ######
temp_neg = []
for k,v in medium_neg.iteritems():
temp_neg.append((k,v))
temp_neg.sort(key=itemgetter(1),reverse=True)
fo = open(negative_ofname,'w')
for x in temp_neg:
s = x[0] + '\t' + str(x[1])
fo.write(s+'\n')
fo.close()
def main():
try:
_, ifname_parsed, positive_ofname, negative_ofname = sys.argv
except Exception as e:
print(e)
sys.exit(0)
transmission_summarization(ifname_parsed, positive_ofname, negative_ofname)
if __name__=='__main__':
main()
|
[
"krudra@pc154.l3s.uni-hannover.de"
] |
krudra@pc154.l3s.uni-hannover.de
|
b50f4ee6dab81a831281eb7ccf32eedfbf3b617d
|
6f44147e71372fea868867aad78fd4ef82d4d570
|
/apriori LW/final_end.py
|
cc9ae69e7b2569292cd4070929de8d6a18abfd0a
|
[] |
no_license
|
pratyayroy/Ranking-of-Association-Rule-towards-Smart-Decision-for-Smart-City
|
b224a39c38d46fe747070fad058c7805218b2f2b
|
f09bc1b18225eec555d4afa0def6202a6cec25d4
|
refs/heads/master
| 2021-01-22T16:09:39.302250
| 2017-09-05T09:47:42
| 2017-09-05T09:47:42
| 102,389,119
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,513
|
py
|
import csv
__author__ = "Pratyay Roy"
__copyright__ = "© 2016, Project Nightjar"
__credits__ = "Subrata Datta, Pratyay Roy"
__maintainer__ = "Pratyay Roy"
__email__ = "pratyayroy@outlook.com"
__status__ = "N-item minimum confidence"
import csv
import itertools
import time
start_time = time.time()
f = open('extend_1k.csv')
csv_f = csv.reader(f)
unique_items = []
transaction = 0
for row in csv_f:
row = list(filter(None, row))
transaction += 1
l = len(row)
for i in range(0, l):
if row[i] in unique_items:
pos = unique_items.index(row[i])
else:
unique_items.append(row[i])
print("Read " + str(transaction) + " transactions successfully..")
print("\n################### Generating unique items ###################\n")
for i, e in enumerate(unique_items, 1):
print(str(i) + '.', e)
channel2 = unique_items
item_set = []
i = 0
cnt = 1
min_support = float(input("Enter the minimum support :: "))
while channel2:
i += 1
channel1 = []
print("\n################### Generating Frequent " + str(i) + " item-set ###################\n")
for x in itertools.combinations(channel2, i):
total_occurence = 0
f.seek(0)
for row in csv_f:
row = list(filter(None, row))
x = list(x)
# print(x)
# print(set(row))
if set(x).issubset(row):
total_occurence += 1
if total_occurence / transaction >= min_support:
print(str(cnt) + ". " + str(x) + " | " + str(total_occurence / transaction))
cnt += 1
item_set.append(x)
channel1.append(x)
channel2 = []
# channel1 = sum(channel1, [])
for x in channel1:
# print(x)
# print(channel2)
channel2 = channel2 + list(set(x) - set(channel2))
# print(channel2)
# dummy = input("test")
print(item_set, sep="\n")
##############################################################################################
conf = input("Enter a minimum confidence :: ")
rules = [[]]
cnt = 0
dissociation = 0
total_confidence = 0
for x in itertools.permutations(item_set, 2):
if set(x[0]) - set(x[1]) != set(x[0]):
continue
numerator = 0
denominator = 0
s_a1b = 0
s_ab1 = 0
s_a = 0
s_b = 0
s_ab = 0
s_ab1 = 0
s_a1b = 0
s_a1b1 = 0
# Formula goes like.. Sup(x[0] + x[1])/supp(x[0])
# Checking the number of times (x[0]&x[1]) exists
f.seek(0)
for row in csv_f:
row = list(filter(None, row))
if set(x[0]).issubset(row):
denominator += 1
if set(x[0]).issubset(row) and set(x[1]).issubset(row):
numerator += 1
if set(x[0]).issubset(row):
s_a += 1
if set(x[1]).issubset(row):
s_b += 1
if set(x[0]).issubset(row) and set(x[1]).issubset(row):
s_ab += 1
if set(x[0]).issubset(row) and not set(x[1]).issubset(row):
s_ab1 += 1
if not set(x[0]).issubset(row) and set(x[1]).issubset(row):
s_a1b += 1
if not set(x[0]).issubset(row) and not set(x[1]).issubset(row):
s_a1b1 += 1
if numerator / denominator >= float(conf):
cnt += 1
p_ab = s_ab / transaction
p_ab1 = s_ab1 / transaction
p_a1b = s_a1b / transaction
p_a1b1 = s_a1b1 / transaction
p_a = s_a / transaction
p_b = s_b / transaction
trust = max(p_ab, (1 - (p_a1b + p_ab1))) / (max(p_a, p_b) + p_a1b1)
dissociation = p_a1b + p_ab1
# Weighted Relative Accuracy (WRAcc)
WRAcc = p_a * ((p_ab / p_a) - p_b)
# Certainity Factor (cf)
cf = max((((p_ab / p_a) - p_b) / (1 - p_b)), (((p_ab / p_b) - p_a) / (1 - p_a)))
# Probability of Trust (pot)
pot = trust / (trust + dissociation)
# Gravity (gr) = u + WRAcc + cf
gr = pot + WRAcc + cf
# Antecedent Length
ant_length = len(x[0])
print(str(cnt) + ". " + str(x[0]) + " -> " + str(x[1]) + "(" + str(numerator / denominator) + ")" + " | " + str(
p_a1b + p_ab1))
# 0 1 2 3 4 5 6 7 8 9
# Rules are dispalyed as [rank], [antecedent], [consequent], [gravity], [ant length], [confidence] [support] [cf] [diss] [pt]
rules[cnt - 1] = [cnt + 1, x[0], x[1], gr, ant_length, numerator/denominator, numerator/transaction, cf, dissociation, pot]
rules += [[]]
print(dissociation)
print("\n################### After Applying Gravity ###################\n")
del (rules[cnt])
print(rules)
rules = sorted(rules, key=lambda q: (q[5]), reverse=True) # Ranked on Gravity
n_rules = [[]]
for i, e in enumerate(rules):
n_rules[i] = [i+1, e[1], e[2], e[3], e[5], e[6], e[7], e[8], e[9]]
n_rules += [[]]
# LHS RHS gravity confidence
print(str(i + 1) + '.' + str(e[1]) + " -> " + str(e[2]) + "(" + str(e[3]) + ")" + " | " + str(e[5]))
print("The average dissociation is " + str(dissociation / cnt))
with open("graph_extend_5k.csv", "w") as ff:
writer = csv.writer(ff)
writer.writerows(n_rules)
print("The average dissociation is " + str(dissociation / cnt))
print("The average confidence is " + str(total_confidence / cnt))
print("--- Execution Time :: %s seconds ---" % (time.time() - start_time))
|
[
"pratyayy@gmail.com"
] |
pratyayy@gmail.com
|
3c5b293c6d389c7c7dc932dead2e0c0535d49fc5
|
2af6a5c2d33e2046a1d25ae9dd66d349d3833940
|
/res/scripts/client/gui/shared/formatters/__init__.py
|
95539a9befb1c59e1830abe651b2f06f1e199360
|
[] |
no_license
|
webiumsk/WOT-0.9.12-CT
|
e6c8b5bb106fad71b5c3056ada59fb1aebc5f2b2
|
2506e34bd6634ad500b6501f4ed4f04af3f43fa0
|
refs/heads/master
| 2021-01-10T01:38:38.080814
| 2015-11-11T00:08:04
| 2015-11-11T00:08:04
| 45,803,240
| 0
| 0
| null | null | null | null |
WINDOWS-1250
|
Python
| false
| false
| 776
|
py
|
# 2015.11.10 21:29:02 Střední Evropa (běžný čas)
# Embedded file name: scripts/client/gui/shared/formatters/__init__.py
import BigWorld
from gui.shared.formatters import icons
from gui.shared.formatters import text_styles
from gui.shared.formatters import time_formatters
__all__ = ('icons', 'text_styles', 'time_formatters')
def getClanAbbrevString(clanAbbrev):
return '[{0:>s}]'.format(clanAbbrev)
def getGlobalRatingFmt(globalRating):
if globalRating >= 0:
return BigWorld.wg_getIntegralFormat(globalRating)
return '--'
# okay decompyling c:\Users\PC\wotsources\files\originals\res\scripts\client\gui\shared\formatters\__init__.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2015.11.10 21:29:02 Střední Evropa (běžný čas)
|
[
"info@webium.sk"
] |
info@webium.sk
|
06734f6661cb4bef83281eb50aca4bb20859409c
|
f0f223b93d43f96c57dc13c8509953e0869a96f4
|
/web/tasks.py
|
c56d86c2dfc03949884d90293cdaa66da14ba10f
|
[] |
no_license
|
xuebajunzhu/wechatdjangoproject
|
26fd2e487216460a0f0f314b8d3af2011cc50374
|
cc615a6e3f439ab0224b290bb9c1e4737406ea19
|
refs/heads/master
| 2021-01-02T13:41:16.784797
| 2020-02-13T07:14:09
| 2020-02-13T07:14:09
| 239,639,021
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,000
|
py
|
# -*- coding:utf-8 -*-
# Author:cqk
# Data:2020/2/12 10:12
import uuid
import datetime
import itertools
from celery import shared_task
from sale import models
from utils.encrypt import md5
@shared_task
def to_preview_status_task(auction_id):
print("预展开始了")
models.SaleCategory.objects.filter(id=auction_id).update(status=2)
models.Commodity.objects.filter(salecategory_id=auction_id).update(status=2)
print("to_preview_status_task结束了")
@shared_task
def to_auction_status_task(auction_id):
print("拍卖开始了")
models.SaleCategory.objects.filter(id=auction_id).update(status=3)
models.Commodity.objects.filter(salecategory_id=auction_id).update(status=3)
print("to_auction_status_task结束了")
@shared_task
def end_auction_task(auction_id):
print("拍卖结束了")
models.SaleCategory.objects.filter(id=auction_id).update(status=4)
models.Commodity.objects.filter(salecategory_id=auction_id).update(status=4)
print("end_auction_task结束了")
# 判断每个拍品的最高价,以及是否出价,没有人出价则流拍
total = 0
total_unfortunate_list = []
lucky_auction_deposit_id = set()
auction_object = models.SaleCategory.objects.filter(id=auction_id).first()
item_object_list = models.Commodity.objects.filter(salecategory=auction_object)
# 循环所有拍品
for item_object in item_object_list:
# 获取当前最高价
lucky_object = models.BidRecord.objects.filter(commodity=item_object).order_by('-bid_price').first()
# 无出价,则流派
if not lucky_object:
item_object.status = 5
item_object.save()
continue
lucky_object.status = 2
lucky_object.save()
# 拍品设置成交价:
item_object.transaction_price = lucky_object.bid_price
item_object.save()
# 专场成交总额
total += lucky_object.bid_price
# 获取当前用户为此拍品缴纳的保证金(单品/全场)对象
deposit_object = models.CashDeposit.objects.filter(
user=lucky_object.bidder,
commodity=item_object,
status=1).first()
if not deposit_object:
# deposit_object
deposit_object = models.CashDeposit.objects.filter(user=lucky_object.bidder,
salecategory=auction_object,
status=2, commodity__isnull=True).first()
# 所有已经拍到的人缴纳的保证金记录的id
lucky_auction_deposit_id.add(deposit_object.id)
# 生成订单
order_object = models.Pay_Order.objects.create(
uid=md5(uuid.uuid4()),
user=lucky_object.bidder,
commodity=item_object,
deposit=deposit_object, # 单品或者专场保证金的记录
due_money=lucky_object.bid_price,
)
# 单品保证金: 所有没有拍到的商品 & 缴纳的是单品保证金记录
item_unfortunate_list = models.CashDeposit.objects.filter(commodity=item_object, status=1).exclude(
user=lucky_object.bidder)
total_unfortunate_list.extend(item_unfortunate_list)
# 调用定时任务:24小时内要支付,否则流派 扣除保证金
date = datetime.datetime.utcnow() + datetime.timedelta(hours=24)
task_id = twenty_four_hour.apply_async(args=[order_object.id],
eta=date).id
order_object.twenty_four_task_id = task_id
order_object.save()
# 专场:更新成交额
auction_object.trading_volume = total
auction_object.save()
# 未拍到任何商品的用户的全场保证金
auction_unfortunate_list = models.CashDeposit.objects.filter(
status=2,
salecategory=auction_object,
commodity__isnull=True).exclude(id__in=lucky_auction_deposit_id)
# 退保证金(原路退还)
for deposit in itertools.chain(total_unfortunate_list, auction_unfortunate_list):
uid = md5(uuid.uuid4())
if deposit.pay_type == 1: # 微信
# res = refund(uid, deposit.uid, deposit.amount, deposit.amount)
res = True
models.DepositRefundRecord.objects.create(
uid=uid,
status=2 if res else 1,
amount=deposit.guarantee_sum,
deposit=deposit
)
if res:
deposit.margin_balance = 0
deposit.save()
else: # 余额
deposit.user.balance += deposit.amount
deposit.user.save()
models.DepositRefundRecord.objects.create(
uid=uid,
status=2,
amount=deposit.guarantee_sum,
deposit=deposit
)
deposit.margin_balance = 0
deposit.save()
@shared_task
def twenty_four_hour(order_id):
""" 24小时不支付订单,则直接扣除保证金 """
print("处理未支付的订单")
# 订单已支付
order_object = models.Pay_Order.objects.filter(id=order_id).first()
if order_object.status != 1:
return
# 订单状态为 逾期未付款
order_object.status = 4
order_object.save()
# 单品保证金 直接扣除
if order_object.deposit.status == 1:
order_object.deposit.margin_balance = 0
order_object.deposit.save()
models.DepositDeduct.objects.create(order=order_object, amount=order_object.deposit.guarantee_sum)
return
# 全场保证金,扣除部分保证金(如果有剩余,则检查是否还有其他订单,如果没有则剩余保证金直接返回到原账户)
"""
情景一:
全场保证金:1000
A:9000 200 扣除200 退还800
全场保证金:1000
A 9000 200 扣除200
B 800 400 扣除400 退还400
全场保证金:1000
A 9000 200 扣除200
B 9000 900 扣除800 退还0
"""
if order_object.deposit.margin_balance <= order_object.commodity.cash_deposit:
models.DepositDeduct.objects.create(order=order_object,
amount=order_object.deposit.margin_balance)
order_object.deposit.margin_balance = 0
order_object.deposit.save()
return
order_object.deposit.margin_balance -= order_object.commodity.cash_deposit
order_object.deposit.save()
models.DepositDeduct.objects.create(order=order_object,
amount=order_object.commodity.cash_deposit)
# 检查此专场保证金下是否还有其他的订单未支付
exists = models.Pay_Order.objects.filter(user=order_object.user, status=1,
commodity__salecategory=order_object.deposit.salecategory).\
exclude(id=order_id).exists()
if exists:
return
uid = md5(uuid.uuid4())
if order_object.deposit.pay_type == 1: # 微信
# res = refund(uid, deposit.uid, deposit.amount, deposit.amount)
res = True
models.DepositRefundRecord.objects.create(
uid=uid,
status=2 if res else 1,
amount=order_object.deposit.margin_balance,
deposit=order_object.deposit
)
if res:
order_object.deposit.margin_balance = 0
order_object.deposit.save()
else: # 余额
order_object.deposit.user.balance += order_object.deposit.margin_balance
order_object.deposit.user.save()
models.DepositRefundRecord.objects.create(
uid=uid,
status=2,
amount=order_object.deposit.margin_balance,
deposit=order_object.deposit
)
order_object.deposit.margin_balance = 0
order_object.deposit.save()
|
[
"52723852+xuebajunzhu@users.noreply.github.com"
] |
52723852+xuebajunzhu@users.noreply.github.com
|
48f211c64c9cd052f15ed656c0ea4ff5a1a3171e
|
271fc3a3425b5b72a14007aec4b05167bb24a611
|
/python/llt/demo_board_examples/ltm90xx/ltm9010/ltm9010_14_dc1751a_b.py
|
24039cc58c97c865b453c0a05537bb67a5599cb0
|
[] |
no_license
|
LinearTechnology/linear_lab_tools
|
584b5750ea5f3a40d8afaf519db227a361eae476
|
be580699aea03d6941b1c616e602eef77e4734a9
|
refs/heads/master
| 2021-01-25T06:45:54.486604
| 2017-08-22T21:19:20
| 2017-08-22T21:19:20
| 91,368,960
| 1
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,138
|
py
|
# -*- coding: utf-8 -*-
"""
Copyright (c) 2016, Linear Technology Corp.(LTC)
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
The views and conclusions contained in the software and documentation are
those of the authors and should not be interpreted as representing official
policies, either expressed or implied, of Linear Technology Corp.
Description:
The purpose of this module is to demonstrate how to communicate with
the LTM9010 demo board through python with the DC1371.
"""
import llt.common.dc1371 as dc1371
import llt.common.functions as funcs
import llt.common.constants as consts
def ltm9010_14_dc1751a_b(num_samples, spi_registers, verbose = False, do_plot = False,
do_write_to_file = False):
with Dc1751aB(spi_registers, verbose) as controller:
# You can call this multiple times with the same controller if you need to
ch0, ch1, ch2, ch3, ch4, ch5, ch6, ch7 = controller.collect(num_samples, consts.TRIGGER_NONE)
if do_plot:
funcs.plot_channels(controller.get_num_bits(),
ch0, ch1, ch2, ch3, ch4, ch5, ch6, ch7,
verbose=verbose)
if do_write_to_file:
funcs.write_channels_to_file_32_bit("data.txt",
ch0, ch1, ch2, ch3, ch4, ch5, ch6, ch7,
verbose=verbose)
return ch0, ch1, ch2, ch3, ch4, ch5, ch6, ch7
class Dc1751aB(dc1371.Demoboard2ChipSelects):
"""
A DC1371 demo board with settings for the DC1751A-B
"""
def __init__(self, spi_registers, verbose = False):
dc1371.Demoboard.__init__(self,
dc_number = 'DC1751A-B',
fpga_load = 'S9011',
num_channels = 8,
num_bits = 14,
alignment = 14,
is_bipolar = False,
demo_config = 0x28321000,
spi_reg_values = spi_registers,
verbose = verbose)
if __name__ == '__main__':
NUM_SAMPLES = 32 * 1024
spi_reg = [ # addr, value
0x00, 0x80,
0x01, 0x00,
0x02, 0x80,
0x03, 0x00,
0x04, 0x00
]
# to use this function in your own code you would typically do
# data = ltm9010_14_dc1751a_b(num_samples, spi_reg)
ltm9010_14_dc1751a_b(NUM_SAMPLES, spi_reg, verbose=True, do_plot=True, do_write_to_file=True)
|
[
"jsorensen@2d5ccaef-af5a-4037-8d26-727f02bf293a"
] |
jsorensen@2d5ccaef-af5a-4037-8d26-727f02bf293a
|
1f9b109192968687be953ae31ed89405140c4775
|
4a63e96d7015e3e13d9b5204fc0261c05f600d3b
|
/Standard Library/tempfile/app.py
|
7fc029dac7ed9b6e14fd7f28165dcf25da70c0c0
|
[
"Apache-2.0"
] |
permissive
|
shubhamnag14/Python-Documents
|
0e38f58298d35b4df5b61adb361d720337148a00
|
d3fee0ad90232b413f6ac1b562588fb255b79e42
|
refs/heads/master
| 2023-06-08T23:51:26.089840
| 2021-06-20T15:07:44
| 2021-06-20T15:07:44
| 380,832,776
| 0
| 0
|
Apache-2.0
| 2021-06-27T20:33:08
| 2021-06-27T20:31:41
| null |
UTF-8
|
Python
| false
| false
| 320
|
py
|
import tempfile
def one():
file = tempfile.mkstemp()
with open(file[1], 'w+') as f:
f.write("This is a test")
f.seek(0)
print(f.read())
print(tempfile.gettempdir())
print(tempfile.gettempdirb())
print(tempfile.gettempprefix())
print(tempfile.gettempprefixb())
print(tempfile.tempdir)
|
[
"subhadeep@klizos.com"
] |
subhadeep@klizos.com
|
ccbfa8469210227d79a0ac4f59f6d865fc4ffd76
|
edc272ef8ea976676ffa5da438bf83c25b3e04a3
|
/assignment3/MyTest.py
|
416aa65030ff247256d6d810ec4c3ce1c008740c
|
[] |
no_license
|
songb/reinforcement
|
527ba3c153f16921fc0c4dc6fb7f4b3c8b70573d
|
82a83352eeafffdb9c3b5be04422219ebac46eba
|
refs/heads/master
| 2023-04-10T16:20:28.886005
| 2019-09-15T20:36:11
| 2019-09-15T20:36:11
| 192,816,672
| 0
| 0
| null | 2023-03-24T22:32:47
| 2019-06-19T23:22:21
|
Python
|
UTF-8
|
Python
| false
| false
| 308
|
py
|
import tensorflow as tf
X = tf.Variable(initial_value=10.0, trainable=True)
loss = 2*X*X - 5*X + 4
op = tf.train.AdamOptimizer(0.03).minimize(loss)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for i in range(2000):
sess.run(op)
print(sess.run([X, loss]))
|
[
"admin@lm-38.tajen.edu.tw"
] |
admin@lm-38.tajen.edu.tw
|
a104479a871826d28058785a7b500aeaa1742a25
|
773009e5de8a6cabdad032e975bd22dddecdc6c1
|
/tools/recognize.py
|
db7a7100c94ae6afadcd0d63e19eace3db4b0007
|
[] |
no_license
|
burmisha/latex
|
e108430dbc179c1c6e4975b07b0e5549091dc9eb
|
7602037419741513929df00370821203e7ee872b
|
refs/heads/master
| 2023-08-03T08:32:12.800674
| 2023-05-09T10:40:32
| 2023-05-09T10:40:32
| 6,659,113
| 3
| 0
| null | 2023-07-20T11:55:59
| 2012-11-12T19:27:46
|
TeX
|
UTF-8
|
Python
| false
| false
| 1,008
|
py
|
from tools.books import get_all_books
from library.structure.page import PagesRange
from library.normalize import format_plain_text
from library.util import fn_filter
import library.process
import os
import logging
log = logging.getLogger(__name__)
def runRecognize(args):
indices = []
for str_page_range in args.pages.split(','):
pages = PagesRange(str_page_range)
indices += pages.pages_indicies
for book in get_all_books():
if not args.filter.match(os.path.basename(book.pdf_file)):
continue
log.info(f'Processing {book}')
text = book.decode_as_text(indices=indices)
text = format_plain_text(text, fill=False)
library.process.pbcopy(text)
def populate_parser(parser):
parser.add_argument('-f', '--filter', help='fnmatch expression for book basename', required=True, type=fn_filter.FnFilter)
parser.add_argument('-p', '--pages', help='Pages indices', required=True)
parser.set_defaults(func=runRecognize)
|
[
"burmisha@gmail.com"
] |
burmisha@gmail.com
|
2da085a7d514ecdf39bd1fbbb336b52f04a83977
|
e5cd9c925a2e16c10fec32df21058bc59cc36cb2
|
/rozvrh/urls.py
|
f9087a7e2a4ff876574a9a70b32e2832e2ec0dff
|
[] |
no_license
|
olinpin/onlinebakalarigit
|
2f9fc29c120c1a6514961cd8c28518a9997fbcd4
|
a5a0ca3c9d6f31bfe4f093049a6d2d4d61493d8c
|
refs/heads/master
| 2023-05-02T07:24:18.867430
| 2021-03-29T07:12:36
| 2021-03-29T07:12:36
| 304,345,329
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 291
|
py
|
from django.urls import path
from . import views
app_name = "rozvrh"
urlpatterns = [
path("", views.index, name="index"),
#path("<str:name>", views.greet, name="greet"),
path('privacy', views.PrivacyPolicy, name="privacy"),
path('form', views.rozvrhAdd, name="rozvrhAdd")
]
|
[
"Oliver.Hnat@gmail.com"
] |
Oliver.Hnat@gmail.com
|
934429d6b9f71ef8c85ac71d12a9b0e8aeb8ac13
|
d01f5a09c3ca3dd7dd04218e1d10278142403853
|
/test/test_baseclasses.py
|
b5c96a5b35c56f5eee7dfdcd2478bb9fffcdd249
|
[
"BSD-3-Clause-LBNL"
] |
permissive
|
LBNL-ETA/FMLC
|
d911098bce778cbc0224116427e1adf303c54a54
|
e8a1c98e7924050f37e0b11720c0d90d4a884e52
|
refs/heads/master
| 2023-05-12T11:18:02.618796
| 2023-05-08T20:07:59
| 2023-05-08T20:07:59
| 182,856,106
| 3
| 3
|
NOASSERTION
| 2022-03-04T17:59:22
| 2019-04-22T19:42:06
|
Python
|
UTF-8
|
Python
| false
| false
| 808
|
py
|
import sys
from os import path
sys.path.append(path.dirname(path.dirname(path.abspath(__file__))))
from fmlc.baseclasses import eFMU
def test_controller1():
class testcontroller1(eFMU):
def __init__(self):
self.input = {'a': None, 'b': None}
self.output = {'c': None}
def compute(self):
self.output['c'] = self.input['a'] * self.input['b']
testcontroller = testcontroller1()
variables = list(testcontroller.get_model_variables())
inputs = {}
for var in variables:
inputs[var] = 3
testcontroller.do_step(inputs=inputs)
assert variables == ['a', 'b']
assert testcontroller.get_input() == {'a': 3, 'b': 3}
assert testcontroller.get_output() == {'c': 9}
assert testcontroller.get_var('output') == {'c': 9}
|
[
"cgehbauer@lbl.gov"
] |
cgehbauer@lbl.gov
|
4203ccce21fd6f3bb111cf8bfb5875de639db6a3
|
e99f022a3f5251c730c8f18c7a20f0be8b3a9d0a
|
/DNSDumpsterAPI.py
|
b5fb7bc7c4cde7052869179252bde79907198795
|
[
"MIT"
] |
permissive
|
fakegit/CloudFail
|
6a87ae26c03d48b84fd307bb48eba5146216db3f
|
a322ec7a2482c34e67cde537c7ebedc1b4120564
|
refs/heads/master
| 2022-09-23T11:19:23.277469
| 2022-09-22T10:46:37
| 2022-09-22T10:46:37
| 158,049,763
| 0
| 0
|
MIT
| 2022-09-22T17:34:32
| 2018-11-18T04:28:16
|
Python
|
UTF-8
|
Python
| false
| false
| 4,482
|
py
|
"""
This is the (unofficial) Python API for dnsdumpster.com Website.
Using this code, you can retrieve subdomains
"""
from __future__ import print_function
import requests
import re
import sys
import base64
from bs4 import BeautifulSoup
class DNSDumpsterAPI(object):
"""DNSDumpsterAPI Main Handler"""
def __init__(self, verbose=False, session=None):
self.verbose = verbose
if not session:
self.session = requests.Session()
else:
self.session = session
def display_message(self, s):
if self.verbose:
print('[verbose] %s' % s)
def retrieve_results(self, table):
res = []
trs = table.findAll('tr')
for tr in trs:
tds = tr.findAll('td')
pattern_ip = r'([0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3})'
try:
ip = re.findall(pattern_ip, tds[1].text)[0]
domain = str(tds[0]).split('<br/>')[0].split('>')[1].split('<')[0]
header = ' '.join(tds[0].text.replace('\n', '').split(' ')[1:])
reverse_dns = tds[1].find('span', attrs={}).text
additional_info = tds[2].text
country = tds[2].find('span', attrs={}).text
autonomous_system = additional_info.split(' ')[0]
provider = ' '.join(additional_info.split(' ')[1:])
provider = provider.replace(country, '')
data = {'domain': domain,
'ip': ip,
'reverse_dns': reverse_dns,
'as': autonomous_system,
'provider': provider,
'country': country,
'header': header}
res.append(data)
except:
pass
return res
def retrieve_txt_record(self, table):
res = []
for td in table.findAll('td'):
res.append(td.text)
return res
def search(self, domain):
dnsdumpster_url = 'https://dnsdumpster.com/'
req = self.session.get(dnsdumpster_url)
soup = BeautifulSoup(req.content, 'html.parser')
csrf_middleware = soup.findAll('input', attrs={'name': 'csrfmiddlewaretoken'})[0]['value']
self.display_message('Retrieved token: %s' % csrf_middleware)
cookies = {'csrftoken': csrf_middleware}
headers = {'Referer': dnsdumpster_url, 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.107 Safari/537.36'}
data = {'csrfmiddlewaretoken': csrf_middleware, 'targetip': domain, 'user': 'free'}
req = self.session.post(dnsdumpster_url, cookies=cookies, data=data, headers=headers)
if req.status_code != 200:
print(
"Unexpected status code from {url}: {code}".format(
url=dnsdumpster_url, code=req.status_code),
file=sys.stderr,
)
return []
if 'There was an error getting results' in req.content.decode('utf-8'):
print("There was an error getting results", file=sys.stderr)
return []
soup = BeautifulSoup(req.content, 'html.parser')
tables = soup.findAll('table')
res = {}
res['domain'] = domain
res['dns_records'] = {}
res['dns_records']['dns'] = self.retrieve_results(tables[0])
res['dns_records']['mx'] = self.retrieve_results(tables[1])
res['dns_records']['txt'] = self.retrieve_txt_record(tables[2])
res['dns_records']['host'] = self.retrieve_results(tables[3])
# Network mapping image
try:
tmp_url = 'https://dnsdumpster.com/static/map/{}.png'.format(domain)
image_data = base64.b64encode(self.session.get(tmp_url).content)
except:
image_data = None
finally:
res['image_data'] = image_data
# XLS hosts.
# eg. tsebo.com-201606131255.xlsx
try:
pattern = r'/static/xls/' + domain + '-[0-9]{12}\.xlsx'
xls_url = re.findall(pattern, req.content.decode('utf-8'))[0]
xls_url = 'https://dnsdumpster.com' + xls_url
xls_data = base64.b64encode(self.session.get(xls_url).content)
except Exception as err:
print(err)
xls_data = None
finally:
res['xls_data'] = xls_data
return res
|
[
"erowdy1@gmail.com"
] |
erowdy1@gmail.com
|
8f0f47ef704ca1bf13b6b054d960ad79eb855848
|
a838d4bed14d5df5314000b41f8318c4ebe0974e
|
/sdk/containerservice/azure-mgmt-containerservice/azure/mgmt/containerservice/v2019_02_01/aio/operations/_operations.py
|
8379386a6db3839f741ead07cbbbe9c6844a901e
|
[
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] |
permissive
|
scbedd/azure-sdk-for-python
|
ee7cbd6a8725ddd4a6edfde5f40a2a589808daea
|
cc8bdfceb23e5ae9f78323edc2a4e66e348bb17a
|
refs/heads/master
| 2023-09-01T08:38:56.188954
| 2021-06-17T22:52:28
| 2021-06-17T22:52:28
| 159,568,218
| 2
| 0
|
MIT
| 2019-08-11T21:16:01
| 2018-11-28T21:34:49
|
Python
|
UTF-8
|
Python
| false
| false
| 4,659
|
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class Operations:
"""Operations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.containerservice.v2019_02_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
**kwargs
) -> AsyncIterable["_models.OperationListResult"]:
"""Gets a list of compute operations.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either OperationListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.containerservice.v2019_02_01.models.OperationListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.OperationListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-02-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('OperationListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/providers/Microsoft.ContainerService/operations'} # type: ignore
|
[
"noreply@github.com"
] |
noreply@github.com
|
fc696582a78cdd7c5d1899b2b36105b5ae57fb27
|
cc2029f40a12e82712072275fc76a07ac59b5940
|
/battles/tourneys/20170409_2015.py
|
47a50e202ae61271c8e51095af49e9ed277655a0
|
[
"MIT"
] |
permissive
|
heitorchang/learn-code
|
d3fb8e45d539d302372126fe28e85032590b5707
|
5e6e56f7257de1910830619c01d470e892d7f9d8
|
refs/heads/master
| 2023-08-09T13:46:18.623772
| 2023-07-21T16:57:11
| 2023-07-21T16:57:11
| 147,522,837
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,482
|
py
|
from math import log
description = """
You are playing a number guessing game with your friend. Your friend thought of some integer x from 1 to n. In order to guess the number, you can ask two types of questions:
"is x smaller or equal to a?" for some integer a;
"is x greater or equal to a?" for some integer a.
If the answer to your question is "yes", you should pay your friend $2, otherwise you should pay him $1.
How much will you have to pay to your friend, assuming that you apply the strategy that minimizes the amount of money you have to pay in order to guess the number in the worst case scenario?
"""
def numberGuessingNaive(n):
# solution by sensytive
p=[0]*(n+1)
for i in range(2,n+1):
p[i]=i
for m in range(1,i):
pr('p[i] 1+p[m] 2+p[i-m]')
p[i] = min(p[i], max(1+p[m], 2+p[i-m]))
pr('p')
return p[-1]
def factorialsProductTrailingZeros(l, r):
result = 0
last = 0
for i in range(1, r + 1):
number = i
while number % 5 == 0:
number /= 5
result += 1
if i >= l:
pr('result last')
result += last
return result
def test():
testeql(numberGuessingNaive(4),4)
testeql(numberGuessingNaive(3),3)
testeql(numberGuessingNaive(1),0)
# testeql(numberGuessingNaive(534),14)
testeql(factorialsProductTrailingZeros(4, 10), 7)
testeql(numberGuessingNaive(15), 0)
testeql(numberGuessingNaive(9), 0)
|
[
"heitorchang@gmail.com"
] |
heitorchang@gmail.com
|
7c21708c07f793fe8b7ea0a740e301f39cdba0f3
|
00ce7b1e677abbfe7912a472e74b3fab92b9fc50
|
/Data_processing/MNIST/checkSource.py
|
0ca2d86a677aa1ee43da5cc7832283efb90ac5f8
|
[] |
no_license
|
Xharlie/MultiGen
|
a19b8cd76bc1933773411d69200e86bf1ba8ed74
|
e3e646289249ce9418fb40f5a246310ac37e9a96
|
refs/heads/master
| 2021-01-22T10:56:16.432939
| 2017-12-02T03:34:41
| 2017-12-02T03:34:41
| 82,051,628
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 877
|
py
|
import h5py
from PIL import Image
import numpy as np
DIGIT=2
FORM=4
COLOR=0
TRANSFORM=1
def check():
imgArray = []
with h5py.File('../../Source/MNIST/all_info.h5', 'r') as f:
imgArray = f['data']
segm = f['segm']
digit = f['digit']
form = f['form']
color = f['color']
transform = f['transform']
index = 0
for i in range(imgArray.shape[0]):
if (digit[i][DIGIT] == 1 and form[i][FORM] == 1 and color[i][COLOR] == 1 and transform[i][TRANSFORM] == 1):
index = i
break
img = np.transpose(imgArray[index],(1,2,0))
img = (img)*255
img = Image.fromarray(img.astype(np.int8), 'RGB')
img.show()
# print segm.shape
# img2 = segm
# print img2
# img2 = Image.fromarray((img2 * 255).astype(np.int8), 'L')
# img2.show()
print digit,form,color,transform
if __name__ == '__main__':
check()
|
[
"charlie.xu007@yahoo.com"
] |
charlie.xu007@yahoo.com
|
6db1394c31c689f64f58cffb4a65caedab7887b6
|
55c46d50ed426a3dccef8c44904df4524de43aa1
|
/oldp/apps/cases/api_views.py
|
ff02e857cdc98c72953bdf206858a565d1bfcd76
|
[
"MIT"
] |
permissive
|
docsuleman/oldp
|
1a438a9c669a54aab2f76133200e566d627d9668
|
8dcaa8e6e435794c872346b5014945ace885adb4
|
refs/heads/master
| 2020-06-29T10:45:18.787344
| 2019-08-04T18:21:02
| 2019-08-04T18:21:02
| 200,513,942
| 0
| 0
|
MIT
| 2019-08-04T17:36:52
| 2019-08-04T16:07:25
|
Python
|
UTF-8
|
Python
| false
| false
| 2,439
|
py
|
import coreapi
import coreschema
from django.utils.decorators import method_decorator
from django.views.decorators.cache import cache_page
from django_filters.rest_framework import DjangoFilterBackend
from drf_haystack.filters import HaystackFilter
from drf_haystack.generics import HaystackGenericAPIView
from rest_framework import viewsets
from rest_framework.filters import OrderingFilter
from rest_framework.mixins import ListModelMixin
from rest_framework.permissions import AllowAny
from rest_framework.viewsets import ViewSetMixin
from oldp.api import SmallResultsSetPagination
from oldp.apps.cases.filters import CaseAPIFilter
from oldp.apps.cases.models import Case
from oldp.apps.cases.search_indexes import CaseIndex
from oldp.apps.cases.serializers import CaseSerializer, CASE_API_FIELDS, CaseSearchSerializer
from oldp.apps.search.filters import SearchSchemaFilter
class CaseViewSet(viewsets.ModelViewSet):
"""
List view for cases
"""
pagination_class = SmallResultsSetPagination # limit page (other content field blows up response size)
queryset = Case.get_queryset()
serializer_class = CaseSerializer
# lookup_field = 'slug'
filter_backends = (OrderingFilter, DjangoFilterBackend, )
filterset_class = CaseAPIFilter
ordering_fields = ('date', )
@method_decorator(cache_page(60))
def dispatch(self, *args, **kwargs):
return super().dispatch(*args, **kwargs)
def get_queryset(self):
return Case.get_queryset()\
.select_related('court')\
.only(*CASE_API_FIELDS)
class CaseSearchSchemaFilter(SearchSchemaFilter):
search_index_class = CaseIndex
def get_default_schema_fields(self):
return [
# Search query field is required
coreapi.Field(
name='text',
location='query',
required=True,
schema=coreschema.String(description='Search query on text content (Lucence syntax support).'),
)
]
class CaseSearchViewSet(ListModelMixin, ViewSetMixin, HaystackGenericAPIView):
"""
Search view (list only)
"""
permission_classes = (AllowAny,)
pagination_class = SmallResultsSetPagination # limit page (other content field blows up response size)
index_models = [
Case
]
serializer_class = CaseSearchSerializer
filter_backends = (HaystackFilter, CaseSearchSchemaFilter,)
|
[
"gitlab@i.mieo.de"
] |
gitlab@i.mieo.de
|
6f58ea89e0a72bc170187b3c7eeffeca8ec3d68d
|
b5bcd5e4127089f6726bf64f574faa52c1b4b8e1
|
/BTagReco/python/AnalysisConfiguration_SIG.py
|
4eabe0c054a97e0b632371efb5ec1a88613acf29
|
[] |
no_license
|
romeof/BTagRunII
|
3b225277ad4697694dfd3307c41aed75740db1fc
|
7ec1351e2b41bae1b50368bcb538e58f2a0a7ade
|
refs/heads/master
| 2021-01-19T20:15:27.999078
| 2015-05-13T09:31:12
| 2015-05-13T09:31:12
| 35,540,157
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,719
|
py
|
import FWCore.ParameterSet.Config as cms
process = cms.Process("Demo")
#####
## Modules for the analysis
#####
process.load("FWCore.MessageService.MessageLogger_cfi")
process.load("TrackingTools/TransientTrack/TransientTrackBuilder_cfi")
process.load("Configuration.StandardSequences.Geometry_cff")
process.load("Configuration.StandardSequences.FrontierConditions_GlobalTag_cff")
process.load("Configuration.StandardSequences.MagneticField_38T_cff")
#process.GlobalTag.globaltag = 'PHYS14_25_V3::All'
process.GlobalTag.globaltag = 'PLS170_V7AN1::All'
#####
## Interaction with the analyser
#####
process.demo = cms.EDAnalyzer("BTagReco",
#Collections of objects
pruned = cms.InputTag("prunedGenParticles"),
packed = cms.InputTag("packedGenParticles"),
bits = cms.InputTag("TriggerResults","","HLT"),
objects = cms.InputTag("selectedPatTrigger"),
prescales = cms.InputTag("patTrigger"),
vertices = cms.InputTag("offlineSlimmedPrimaryVertices"),
muons = cms.InputTag("slimmedMuons"),
electrons = cms.InputTag("slimmedElectrons"),
taus = cms.InputTag("slimmedTaus"),
photons = cms.InputTag("slimmedPhotons"),
jets = cms.InputTag("slimmedJets"),
fatjets = cms.InputTag("slimmedJetsAK8"),
mets = cms.InputTag("slimmedMETs"),
pfCands = cms.InputTag("packedPFCandidates"),
lostTracks = cms.InputTag("lostTracks"),
#Values for the whole analysis
mindr_p3 = cms.untracked.double(0.3),
mindr_p5 = cms.untracked.double(0.5),
first_jet_highest_btag = cms.bool(True),
first_jet_lowest_btag = cms.bool(False)
)
#####
## Input files
#####
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring(
'/store/mc/Spring14miniaod/TTbarH_HToWWTo2L2Nu_M-125_13TeV_amcatnlo-pythia8-tauola/MINIAODSIM/141029_PU40bx50_PLS170_V6AN2-v1/10000/6619F511-6065-E411-B131-0023AEFDE908.root'
#'/store/mc/Spring14miniaod/TTJets_MSDecaysCKM_central_Tune4C_13TeV-madgraph-tauola/MINIAODSIM/PU20bx25_POSTLS170_V5-v1/00000/C04535F7-8BFC-E311-B271-0026189438D5.root'
#'/store/mc/Spring14miniaod/TTJets_MSDecaysCKM_central_Tune4C_13TeV-madgraph-tauola/Phys14DR-PU20bx25_PHYS14_25_V1-v1/MINIAODSIM/'
#'/store/mc/Spring14miniaod/TTbarH_HToWWTo2L2Nu_M-125_13TeV_amcatnlo-pythia8-tauola/MINIAODSIM/',
#skipEvents = cms.untracked.uint32(25) #Skip the first n evt, or comment this line if you do not want to skip evt
),
)
process.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(30000) ) #Num of evt to be analysed (whatever is the starting evt)
#####
## Output file
#####
process.TFileService = cms.Service("TFileService",
fileName = cms.string('tthtree_signal.root')
)
#####
## Analysis chain
#####
process.p = cms.Path(process.demo)
|
[
"fromeo@cern.ch"
] |
fromeo@cern.ch
|
786ebc8282cb594394de4ac386c827aa6cd2c2cb
|
f63277c2b0dd4a68d829a69762f701d7ce715a6f
|
/Audio.py
|
391fdf7f44e25b9179ee8f99831d410b89743e8e
|
[] |
no_license
|
anmolp95/AudioTube
|
6f241593033715b4d06d205aa0b79c686c098b0c
|
30953085bdd6c330c8739331e088712c2a7bb6c2
|
refs/heads/master
| 2020-05-18T14:27:16.755476
| 2017-03-11T20:35:26
| 2017-03-11T20:35:26
| 84,248,097
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,284
|
py
|
import Utils
import AppConfig
import time
class Audio:
def __init__(self,id,title,videoAttributes=None):
"""
self.size=audio.get_filesize()
self.type=audio.extension"""
#ID
self.id=id
#Audio Related Features
self.title=Utils.sanitiseTitle(title)
if videoAttributes:
#Video Related Features
self.dur=videoAttributes[0]
self.viewcount=int(videoAttributes[1])
self.likes=int(videoAttributes[2])
self.dislikes=int(videoAttributes[3])
self.audioStream=None
def __hash__(self):
return hash(self.id)
def show(self):
print "Title:",self.title
print "Id:",self.id
print "Views:",self.viewcount
liking=self.likes*50/(self.likes+self.dislikes)
disliking=self.dislikes*50/(self.likes+self.dislikes)
#Low level features
print "Duration:",self.dur
likeStr='+'*liking
dislikeStr='-'*disliking
print "Likes %d , Disklikes %d " %(self.likes,self.dislikes)
print likeStr+dislikeStr
"""sizeInMb=self.size/float(1024*1024)
print "Size: %.3f MB" %sizeInMb
print "Type:",self.type"""
return
def populateStreamInfo(self,audioStream):
self.audioStream=audioStream
#self.audioStream=audioStream
def isPrepared(self):
if self.audioStream!=None:
return True
else:
return False
def stream(self):
if self.isPrepared():
x1=Utils.download(self.audioStream.url,self.title)
print "Started..."
succ=False
ct=0
while 1:
ct+=1
ret=x1.stderr.readline()
print ret
if "Press [q] to stop, [?] for help" in ret:
succ=True
#break
if ct>40:
break
print "Yes"
if succ == True:
print "Not succ"
# x2=Utils.stream(self.title,AppConfig.streamDelay,x1)
else:
raise Exception(" Coudn't Start Stream ")
else:
raise Exception('Audio Streams Not Prepared Yet do audioObject.prepare()')
|
[
"anmolp2010@gmail.com"
] |
anmolp2010@gmail.com
|
4768e228dfa9b0bbd436a7cb79eba2b32e8a96fe
|
39f78b00d7d79a4e0f29f6b1fe15f20ecc74bea2
|
/74 - Search 2D matrix.py
|
9320b251ab54bd90f5bd20ff89e97bb7001b178e
|
[] |
no_license
|
KaranKaur/Leetcode
|
ca1ac5a590de720d37a3c0fca014065086e6e38e
|
765fb39ba57634d2c180eb1fd90522c781d409c4
|
refs/heads/master
| 2020-03-28T09:44:12.318384
| 2018-09-10T14:07:05
| 2018-09-10T14:07:05
| 148,056,159
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,143
|
py
|
"""
Search a 2D matrix,
Write an efficient algorithm that searches for a value in an m x n matrix.
This matrix has the following properties:
1) Integers in each row are sorted from left to right.
2) The first integer of each row is greater than the last integer of the
previous row.
Example 1:
Input:
matrix = [
[1, 3, 5, 7],
[10, 11, 16, 20],
[23, 30, 34, 50]
]
target = 3
Output: true
Example 2:
Input:
matrix = [
[1, 3, 5, 7],
[10, 11, 16, 20],
[23, 30, 34, 50]
]
target = 13
Output: false
"""
def search_2D_matrix(matrix, target):
# @param matrix, a list of lists of integers
# @param target, an integer
# @return a boolean
rows , cols = len(matrix), len(matrix[0])
low , high = 0, rows * cols - 1
while low <= high:
mid = (low + high) / 2
num = matrix[mid/cols][mid%cols]
if num == target:
return True
elif num < target:
low = mid + 1
elif num > target:
high = mid - 1
return False
m = [
[1, 3, 5, 7],
[10, 11, 16, 20],
[23, 30, 34, 50]
]
target = 3
print(search_2D_matrix(m, target))
|
[
"kkaur89@umd.edu"
] |
kkaur89@umd.edu
|
9c17717ec5417d326d418ca005fef3767db03bf2
|
6e70fba9cd4595efdbbb9e9294e9c1bb1ec648b7
|
/Thesis Research/Convolutional Neural Networks for Image Recognition/A2/assignment2/cs231n/solver.py
|
95f8807c01b2dd30a9756b0b74148ed495441443
|
[] |
no_license
|
maespinosa/Thesis_Documents
|
71bfa8ad26e9aec382ec6017cadac47d565faa52
|
61bc2dd6cac1b89a5dc08df377129fb7fd5b95da
|
refs/heads/master
| 2020-11-29T12:00:05.105873
| 2019-05-21T06:45:06
| 2019-05-21T06:45:06
| 87,496,867
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,857
|
py
|
import numpy as np
from cs231n import optim
class Solver(object):
"""
A Solver encapsulates all the logic necessary for training classification
models. The Solver performs stochastic gradient descent using different
update rules defined in optim.py.
The solver accepts both training and validataion data and labels so it can
periodically check classification accuracy on both training and validation
data to watch out for overfitting.
To train a model, you will first construct a Solver instance, passing the
model, dataset, and various optoins (learning rate, batch size, etc) to the
constructor. You will then call the train() method to run the optimization
procedure and train the model.
After the train() method returns, model.params will contain the parameters
that performed best on the validation set over the course of training.
In addition, the instance variable solver.loss_history will contain a list
of all losses encountered during training and the instance variables
solver.train_acc_history and solver.val_acc_history will be lists containing
the accuracies of the model on the training and validation set at each epoch.
Example usage might look something like this:
data = {
'X_train': # training data
'y_train': # training labels
'X_val': # validation data
'X_train': # validation labels
}
model = MyAwesomeModel(hidden_size=100, reg=10)
solver = Solver(model, data,
update_rule='sgd',
optim_config={
'learning_rate': 1e-3,
},
lr_decay=0.95,
num_epochs=10, batch_size=100,
print_every=100)
solver.train()
A Solver works on a model object that must conform to the following API:
- model.params must be a dictionary mapping string parameter names to numpy
arrays containing parameter values.
- model.loss(X, y) must be a function that computes training-time loss and
gradients, and test-time classification scores, with the following inputs
and outputs:
Inputs:
- X: Array giving a minibatch of input data of shape (N, d_1, ..., d_k)
- y: Array of labels, of shape (N,) giving labels for X where y[i] is the
label for X[i].
Returns:
If y is None, run a test-time forward pass and return:
- scores: Array of shape (N, C) giving classification scores for X where
scores[i, c] gives the score of class c for X[i].
If y is not None, run a training time forward and backward pass and return
a tuple of:
- loss: Scalar giving the loss
- grads: Dictionary with the same keys as self.params mapping parameter
names to gradients of the loss with respect to those parameters.
"""
def __init__(self, model, data, **kwargs):
"""
Construct a new Solver instance.
Required arguments:
- model: A model object conforming to the API described above
- data: A dictionary of training and validation data with the following:
'X_train': Array of shape (N_train, d_1, ..., d_k) giving training images
'X_val': Array of shape (N_val, d_1, ..., d_k) giving validation images
'y_train': Array of shape (N_train,) giving labels for training images
'y_val': Array of shape (N_val,) giving labels for validation images
Optional arguments:
- update_rule: A string giving the name of an update rule in optim.py.
Default is 'sgd'.
- optim_config: A dictionary containing hyperparameters that will be
passed to the chosen update rule. Each update rule requires different
hyperparameters (see optim.py) but all update rules require a
'learning_rate' parameter so that should always be present.
- lr_decay: A scalar for learning rate decay; after each epoch the learning
rate is multiplied by this value.
- batch_size: Size of minibatches used to compute loss and gradient during
training.
- num_epochs: The number of epochs to run for during training.
- print_every: Integer; training losses will be printed every print_every
iterations.
- verbose: Boolean; if set to false then no output will be printed during
training.
"""
self.model = model
#print('model', model)
self.X_train = data['X_train']
self.y_train = data['y_train']
self.X_val = data['X_val']
self.y_val = data['y_val']
#print('X_train shape = ', self.X_train.shape)
#print('y_train shape = ', self.y_train.shape)
#print('input_dim = ', self.model.params)
#print('SOLVER INIT')
# Unpack keyword arguments
self.update_rule = kwargs.pop('update_rule', 'sgd')
self.optim_config = kwargs.pop('optim_config', {})
self.lr_decay = kwargs.pop('lr_decay', 1.0)
self.batch_size = kwargs.pop('batch_size', 100)
self.num_epochs = kwargs.pop('num_epochs', 10)
self.print_every = kwargs.pop('print_every', 10)
self.verbose = kwargs.pop('verbose', True)
# Throw an error if there are extra keyword arguments
if len(kwargs) > 0:
extra = ', '.join('"%s"' % k for k in kwargs.keys())
raise ValueError('Unrecognized arguments %s' % extra)
# Make sure the update rule exists, then replace the string
# name with the actual function
if not hasattr(optim, self.update_rule):
raise ValueError('Invalid update_rule "%s"' % self.update_rule)
self.update_rule = getattr(optim, self.update_rule)
self._reset()
def _reset(self):
"""
Set up some book-keeping variables for optimization. Don't call this
manually.
"""
# Set up some variables for book-keeping
self.epoch = 0
self.best_val_acc = 0
self.best_params = {}
self.loss_history = []
self.train_acc_history = []
self.val_acc_history = []
#print('SOLVER _reset')
# Make a deep copy of the optim_config for each parameter
self.optim_configs = {}
for p in self.model.params:
d = {k: v for k, v in self.optim_config.items()}
self.optim_configs[p] = d
def _step(self):
#print('SOLVER _step')
"""
Make a single gradient update. This is called by train() and should not
be called manually.
"""
# Make a minibatch of training data
num_train = self.X_train.shape[0]
batch_mask = np.random.choice(num_train, self.batch_size)
#print('batch_mask = ', batch_mask)
#print('batch_mask size = ', batch_mask.shape)
X_batch = self.X_train[batch_mask]
#print('X_batch shape = ', X_batch.shape)
y_batch = self.y_train[batch_mask]
# Compute loss and gradient
#self.model.__init__()
#print('W1 shape = ', self.model.params['W1'].shape)
loss, grads = self.model.loss(X_batch, y_batch)
self.loss_history.append(loss)
# Perform a parameter update
for p, w in self.model.params.items():
dw = grads[p]
config = self.optim_configs[p]
next_w, next_config = self.update_rule(w, dw, config)
#print('next_w = ', next_w)
self.model.params[p] = next_w
self.optim_configs[p] = next_config
def check_accuracy(self, X, y, num_samples=None, batch_size=100):
#print('SOLVER check_accuracy')
"""
Check accuracy of the model on the provided data.
Inputs:
- X: Array of data, of shape (N, d_1, ..., d_k)
- y: Array of labels, of shape (N,)
- num_samples: If not None, subsample the data and only test the model
on num_samples datapoints.
- batch_size: Split X and y into batches of this size to avoid using too
much memory.
Returns:
- acc: Scalar giving the fraction of instances that were correctly
classified by the model.
"""
# Maybe subsample the data
N = X.shape[0]
# print('N = ', N)
if num_samples is not None and N > num_samples:
mask = np.random.choice(N, num_samples)
# print('mask = ', mask)
N = num_samples
#print('num_samples = ', num_samples)
# print('N = ', N)
X = X[mask]
y = y[mask]
# Compute predictions in batches
num_batches = N / batch_size
if N % batch_size != 0:
num_batches += 1
# print('num_batches', num_batches)
y_pred = []
for i in range(int(num_batches)):
#print('i = ', i)
start = i * batch_size
end = (i + 1) * batch_size
#self.model.__init__()
scores = self.model.loss(X[start:end])
#print('scores = ', scores)
#print('scores shape = ', scores.shape)
y_pred.append(np.argmax(scores, axis=1))
#print('y_pred shape = ', y_pred[0].shape)
#print('y_pred = ', y_pred)
# print('y_pred shape = ', len(y_pred))
# print('y_pred = ', y_pred)
y_pred = np.hstack(y_pred)
#print('y_pred = ', y_pred)
acc = np.mean(y_pred == y)
#print('acc = ', acc)
return acc
def train(self):
#print('SOLVER train')
"""
Run optimization to train the model.
"""
num_train = self.X_train.shape[0]
# print('num_train = ', num_train)
iterations_per_epoch = max(num_train / self.batch_size, 1)
#print('iterations_per_epoch = ', iterations_per_epoch)
num_iterations = self.num_epochs * iterations_per_epoch
#print('num_iterations = ', num_iterations)
for t in range(int(num_iterations)):
self._step()
# Maybe print training loss
if self.verbose and t % self.print_every == 0:
print ('(Iteration %d / %d) loss: %f' % (t + 1, num_iterations, self.loss_history[-1]))
# At the end of every epoch, increment the epoch counter and decay the
# learning rate.
epoch_end = (t + 1) % iterations_per_epoch == 0
if epoch_end:
self.epoch += 1
for k in self.optim_configs:
self.optim_configs[k]['learning_rate'] *= self.lr_decay
# Check train and val accuracy on the first iteration, the last
# iteration, and at the end of each epoch.
first_it = (t == 0)
last_it = (t == num_iterations + 1)
if first_it or last_it or epoch_end:
print('TRAIN_ACC')
train_acc = self.check_accuracy(self.X_train, self.y_train,
num_samples=1000)
print('VAL_ACC')
val_acc = self.check_accuracy(self.X_val, self.y_val)
self.train_acc_history.append(train_acc)
self.val_acc_history.append(val_acc)
if self.verbose:
print ('(Epoch %d / %d) train acc: %f; val_acc: %f' % (self.epoch, self.num_epochs, train_acc, val_acc))
# Keep track of the best model
if val_acc > self.best_val_acc:
self.best_val_acc = val_acc
self.best_params = {}
for k, v in self.model.params.items():
self.best_params[k] = v.copy()
# At the end of training swap the best params into the model
self.model.params = self.best_params
|
[
"markanthonyespinosa@gmail.com"
] |
markanthonyespinosa@gmail.com
|
0ee425d47fec6a76ce5e13d55e2fa477fd61d4f0
|
331a3f3e57a0ab7a2290e5a084eaa6bb8b4124e6
|
/pandas/tests/apply/test_invalid_arg.py
|
6ed962c8f68e6b6c93aaa2be78420eb6b25dd962
|
[
"BSD-3-Clause"
] |
permissive
|
WuraolaOyewusi/pandas
|
f76aad45df9950ab7fcc4dfcae42dacab0b91f21
|
318f13a328f0f486b6712e9b6f26ca0249327843
|
refs/heads/master
| 2022-11-19T02:20:18.371013
| 2022-10-31T16:51:46
| 2022-10-31T16:51:46
| 220,716,671
| 2
| 0
|
BSD-3-Clause
| 2022-10-31T16:51:47
| 2019-11-09T23:26:39
|
Python
|
UTF-8
|
Python
| false
| false
| 11,077
|
py
|
# Tests specifically aimed at detecting bad arguments.
# This file is organized by reason for exception.
# 1. always invalid argument values
# 2. missing column(s)
# 3. incompatible ops/dtype/args/kwargs
# 4. invalid result shape/type
# If your test does not fit into one of these categories, add to this list.
from itertools import chain
import re
import numpy as np
import pytest
from pandas.errors import SpecificationError
from pandas import (
Categorical,
DataFrame,
Series,
date_range,
notna,
)
import pandas._testing as tm
@pytest.mark.parametrize("result_type", ["foo", 1])
def test_result_type_error(result_type, int_frame_const_col):
# allowed result_type
df = int_frame_const_col
msg = (
"invalid value for result_type, must be one of "
"{None, 'reduce', 'broadcast', 'expand'}"
)
with pytest.raises(ValueError, match=msg):
df.apply(lambda x: [1, 2, 3], axis=1, result_type=result_type)
def test_apply_invalid_axis_value():
df = DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]], index=["a", "a", "c"])
msg = "No axis named 2 for object type DataFrame"
with pytest.raises(ValueError, match=msg):
df.apply(lambda x: x, 2)
def test_applymap_invalid_na_action(float_frame):
# GH 23803
with pytest.raises(ValueError, match="na_action must be .*Got 'abc'"):
float_frame.applymap(lambda x: len(str(x)), na_action="abc")
def test_agg_raises():
# GH 26513
df = DataFrame({"A": [0, 1], "B": [1, 2]})
msg = "Must provide"
with pytest.raises(TypeError, match=msg):
df.agg()
def test_map_with_invalid_na_action_raises():
# https://github.com/pandas-dev/pandas/issues/32815
s = Series([1, 2, 3])
msg = "na_action must either be 'ignore' or None"
with pytest.raises(ValueError, match=msg):
s.map(lambda x: x, na_action="____")
@pytest.mark.parametrize("input_na_action", ["____", True])
def test_map_arg_is_dict_with_invalid_na_action_raises(input_na_action):
# https://github.com/pandas-dev/pandas/issues/46588
s = Series([1, 2, 3])
msg = f"na_action must either be 'ignore' or None, {input_na_action} was passed"
with pytest.raises(ValueError, match=msg):
s.map({1: 2}, na_action=input_na_action)
def test_map_categorical_na_action():
values = Categorical(list("ABBABCD"), categories=list("DCBA"), ordered=True)
s = Series(values, name="XX", index=list("abcdefg"))
with pytest.raises(NotImplementedError, match=tm.EMPTY_STRING_PATTERN):
s.map(lambda x: x, na_action="ignore")
def test_map_datetimetz_na_action():
values = date_range("2011-01-01", "2011-01-02", freq="H").tz_localize("Asia/Tokyo")
s = Series(values, name="XX")
with pytest.raises(NotImplementedError, match=tm.EMPTY_STRING_PATTERN):
s.map(lambda x: x, na_action="ignore")
@pytest.mark.parametrize("method", ["apply", "agg", "transform"])
@pytest.mark.parametrize("func", [{"A": {"B": "sum"}}, {"A": {"B": ["sum"]}}])
def test_nested_renamer(frame_or_series, method, func):
# GH 35964
obj = frame_or_series({"A": [1]})
match = "nested renamer is not supported"
with pytest.raises(SpecificationError, match=match):
getattr(obj, method)(func)
@pytest.mark.parametrize(
"renamer",
[{"foo": ["min", "max"]}, {"foo": ["min", "max"], "bar": ["sum", "mean"]}],
)
def test_series_nested_renamer(renamer):
s = Series(range(6), dtype="int64", name="series")
msg = "nested renamer is not supported"
with pytest.raises(SpecificationError, match=msg):
s.agg(renamer)
def test_apply_dict_depr():
tsdf = DataFrame(
np.random.randn(10, 3),
columns=["A", "B", "C"],
index=date_range("1/1/2000", periods=10),
)
msg = "nested renamer is not supported"
with pytest.raises(SpecificationError, match=msg):
tsdf.A.agg({"foo": ["sum", "mean"]})
@pytest.mark.parametrize("method", ["agg", "transform"])
def test_dict_nested_renaming_depr(method):
df = DataFrame({"A": range(5), "B": 5})
# nested renaming
msg = r"nested renamer is not supported"
with pytest.raises(SpecificationError, match=msg):
getattr(df, method)({"A": {"foo": "min"}, "B": {"bar": "max"}})
@pytest.mark.parametrize("method", ["apply", "agg", "transform"])
@pytest.mark.parametrize("func", [{"B": "sum"}, {"B": ["sum"]}])
def test_missing_column(method, func):
# GH 40004
obj = DataFrame({"A": [1]})
match = re.escape("Column(s) ['B'] do not exist")
with pytest.raises(KeyError, match=match):
getattr(obj, method)(func)
def test_transform_mixed_column_name_dtypes():
# GH39025
df = DataFrame({"a": ["1"]})
msg = r"Column\(s\) \[1, 'b'\] do not exist"
with pytest.raises(KeyError, match=msg):
df.transform({"a": int, 1: str, "b": int})
@pytest.mark.parametrize(
"how, args", [("pct_change", ()), ("nsmallest", (1, ["a", "b"])), ("tail", 1)]
)
def test_apply_str_axis_1_raises(how, args):
# GH 39211 - some ops don't support axis=1
df = DataFrame({"a": [1, 2], "b": [3, 4]})
msg = f"Operation {how} does not support axis=1"
with pytest.raises(ValueError, match=msg):
df.apply(how, axis=1, args=args)
def test_transform_axis_1_raises():
# GH 35964
msg = "No axis named 1 for object type Series"
with pytest.raises(ValueError, match=msg):
Series([1]).transform("sum", axis=1)
def test_apply_modify_traceback():
data = DataFrame(
{
"A": [
"foo",
"foo",
"foo",
"foo",
"bar",
"bar",
"bar",
"bar",
"foo",
"foo",
"foo",
],
"B": [
"one",
"one",
"one",
"two",
"one",
"one",
"one",
"two",
"two",
"two",
"one",
],
"C": [
"dull",
"dull",
"shiny",
"dull",
"dull",
"shiny",
"shiny",
"dull",
"shiny",
"shiny",
"shiny",
],
"D": np.random.randn(11),
"E": np.random.randn(11),
"F": np.random.randn(11),
}
)
data.loc[4, "C"] = np.nan
def transform(row):
if row["C"].startswith("shin") and row["A"] == "foo":
row["D"] = 7
return row
def transform2(row):
if notna(row["C"]) and row["C"].startswith("shin") and row["A"] == "foo":
row["D"] = 7
return row
msg = "'float' object has no attribute 'startswith'"
with pytest.raises(AttributeError, match=msg):
data.apply(transform, axis=1)
@pytest.mark.parametrize(
"df, func, expected",
tm.get_cython_table_params(
DataFrame([["a", "b"], ["b", "a"]]), [["cumprod", TypeError]]
),
)
def test_agg_cython_table_raises_frame(df, func, expected, axis):
# GH 21224
msg = "can't multiply sequence by non-int of type 'str'"
with pytest.raises(expected, match=msg):
df.agg(func, axis=axis)
@pytest.mark.parametrize(
"series, func, expected",
chain(
tm.get_cython_table_params(
Series("a b c".split()),
[
("mean", TypeError), # mean raises TypeError
("prod", TypeError),
("std", TypeError),
("var", TypeError),
("median", TypeError),
("cumprod", TypeError),
],
)
),
)
def test_agg_cython_table_raises_series(series, func, expected):
# GH21224
msg = r"[Cc]ould not convert|can't multiply sequence by non-int of type"
with pytest.raises(expected, match=msg):
# e.g. Series('a b'.split()).cumprod() will raise
series.agg(func)
def test_agg_none_to_type():
# GH 40543
df = DataFrame({"a": [None]})
msg = re.escape("int() argument must be a string")
with pytest.raises(TypeError, match=msg):
df.agg({"a": int})
def test_transform_none_to_type():
# GH#34377
df = DataFrame({"a": [None]})
msg = "argument must be a"
with pytest.raises(TypeError, match=msg):
df.transform({"a": int})
@pytest.mark.parametrize(
"func",
[
lambda x: np.array([1, 2]).reshape(-1, 2),
lambda x: [1, 2],
lambda x: Series([1, 2]),
],
)
def test_apply_broadcast_error(int_frame_const_col, func):
df = int_frame_const_col
# > 1 ndim
msg = "too many dims to broadcast|cannot broadcast result"
with pytest.raises(ValueError, match=msg):
df.apply(func, axis=1, result_type="broadcast")
def test_transform_and_agg_err_agg(axis, float_frame):
# cannot both transform and agg
msg = "cannot combine transform and aggregation operations"
with pytest.raises(ValueError, match=msg):
with np.errstate(all="ignore"):
float_frame.agg(["max", "sqrt"], axis=axis)
@pytest.mark.parametrize(
"func, msg",
[
(["sqrt", "max"], "cannot combine transform and aggregation"),
(
{"foo": np.sqrt, "bar": "sum"},
"cannot perform both aggregation and transformation",
),
],
)
def test_transform_and_agg_err_series(string_series, func, msg):
# we are trying to transform with an aggregator
with pytest.raises(ValueError, match=msg):
with np.errstate(all="ignore"):
string_series.agg(func)
@pytest.mark.parametrize("func", [["max", "min"], ["max", "sqrt"]])
def test_transform_wont_agg_frame(axis, float_frame, func):
# GH 35964
# cannot both transform and agg
msg = "Function did not transform"
with pytest.raises(ValueError, match=msg):
float_frame.transform(func, axis=axis)
@pytest.mark.parametrize("func", [["min", "max"], ["sqrt", "max"]])
def test_transform_wont_agg_series(string_series, func):
# GH 35964
# we are trying to transform with an aggregator
msg = "Function did not transform"
warn = RuntimeWarning if func[0] == "sqrt" else None
warn_msg = "invalid value encountered in sqrt"
with pytest.raises(ValueError, match=msg):
with tm.assert_produces_warning(warn, match=warn_msg):
string_series.transform(func)
@pytest.mark.parametrize(
"op_wrapper", [lambda x: x, lambda x: [x], lambda x: {"A": x}, lambda x: {"A": [x]}]
)
@pytest.mark.filterwarnings("ignore:.*Select only valid:FutureWarning")
def test_transform_reducer_raises(all_reductions, frame_or_series, op_wrapper):
# GH 35964
op = op_wrapper(all_reductions)
obj = DataFrame({"A": [1, 2, 3]})
obj = tm.get_obj(obj, frame_or_series)
msg = "Function did not transform"
with pytest.raises(ValueError, match=msg):
obj.transform(op)
|
[
"noreply@github.com"
] |
noreply@github.com
|
9780673943bc92fd7b2f2d0aa7eeef3acbfe31bc
|
68a80a4a3c12a6efe369513c83688b5bd4d083fc
|
/gui_tcl_tk_tkinter/buttons.py
|
5d0c482d1d0b054bf30e8510622c625bead5e854
|
[] |
no_license
|
biki234/PythonNotes
|
bfa95f67c6c460a79623551927abb2e3bb375135
|
4ee4846d3e4f7c564ed84065f0810bfa3fdad74a
|
refs/heads/master
| 2021-06-22T18:39:51.598210
| 2017-08-26T03:04:12
| 2017-08-26T03:04:12
| 107,346,885
| 1
| 0
| null | 2017-10-18T02:08:32
| 2017-10-18T02:08:32
| null |
UTF-8
|
Python
| false
| false
| 520
|
py
|
from tkinter import *
class App:
def __init__(self, master):
frame = Frame(master)
frame.pack()
self.button = Button(frame,
text="QUIT", fg="red",
command=quit)
self.button.pack(side=LEFT)
self.slogan = Button(frame,
text="Hello",
command=self.write_slogan)
self.slogan.pack(side=LEFT)
def write_slogan(self):
print("Tkinter is easy to use!")
root = Tk()
app = App(root)
root.mainloop()
|
[
"anishchapagain"
] |
anishchapagain
|
2f77fca013521fe64e21aeb99dfd2e48d77e4bb1
|
e5c35b39d2087ea92fccef682022787e2938814f
|
/segmentation_models/__init__.py
|
df272d01ec6d50b84905867edcf289a7d241b6cb
|
[
"MIT"
] |
permissive
|
dddtqshmpmz/unet-DA
|
3f0d0ab30acb152feb2d80d138a266b56ce966a3
|
2bb0933ab607cd6b882463247523fed4d236f60e
|
refs/heads/master
| 2023-08-11T03:01:16.904338
| 2021-10-09T12:50:16
| 2021-10-09T12:50:16
| 415,305,370
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 43
|
py
|
from .segmentation_models_pytorch import *
|
[
"diaoli@sjtu.edu.cn"
] |
diaoli@sjtu.edu.cn
|
9fc363a3b8eb474777876ec38235218c494aaa7d
|
84794307dda1007e4a4ee5eace89ec53174ca525
|
/canteen/store/migrations/0008_auto_20200925_1836.py
|
20d9157660a5fad9b7a1849c0e20b8dcd6be671f
|
[] |
no_license
|
Ajaya2/CMS
|
40149253752fb0a8a6db33f7a108d6313debb84c
|
d5010b8ad61cdff60901c19431defe2b6eec07e4
|
refs/heads/master
| 2023-01-22T20:05:01.911613
| 2020-11-12T07:00:59
| 2020-11-12T07:00:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 378
|
py
|
# Generated by Django 3.1 on 2020-09-25 12:51
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('store', '0007_auto_20200925_1427'),
]
operations = [
migrations.AlterField(
model_name='order',
name='date_ordered',
field=models.DateTimeField(),
),
]
|
[
"shekharneupane23@gmail.com"
] |
shekharneupane23@gmail.com
|
b0b184c82a19a0684562e5609e95670b3d3daa98
|
812d6305dd64eba70e93753c51f73387722cc920
|
/quete_streamlit.py
|
a14b1efa49099634f8038fd0620f75be64c14c84
|
[] |
no_license
|
Seb-Dupont-DataAnalyst/Quete-Streamlit
|
1c25e9b8fd668ff9fc9b49a7bf8d43629f96a80f
|
3cd66dcc37324244d9adc6635bace95cb00ed2b5
|
refs/heads/main
| 2023-06-18T09:13:43.976280
| 2021-07-07T13:00:16
| 2021-07-07T13:00:16
| 383,749,738
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,202
|
py
|
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import seaborn as sns
import plotly.express as px
import plotly.graph_objects as go
import streamlit as st
import streamlit.components.v1 as components
link = "https://raw.githubusercontent.com/murpi/wilddata/master/quests/cars.csv"
df_cars = pd.read_csv(link)
df_cars['continent_fact'] = df_cars['continent'].factorize()[0]
st.markdown('<style>' + open('style.css').read() +
'</style>', unsafe_allow_html=True)
st.markdown('<body class="title">Streamlit : build and share data apps</body>',
unsafe_allow_html=True)
st.sidebar.title("Bonjour :racing_car: :car: :blue_car:")
choice = st.sidebar.radio("", ('Analyse descriptive', "Analyse des corrélations"))
# Création Sidebar avec les différents choix
liste_pays = df_cars['continent'].unique().tolist()
#liste_pays.insert(0, 'Tous')
st.title('')
st.title('')
choix_pays = st.sidebar.multiselect('Select countries', liste_pays, default= liste_pays, format_func=lambda x: 'Select a country' if x == '' else x)
#choix_pays = st.selectbox('Select a continent :', liste_pays)
if choice == 'Analyse descriptive':
if choix_pays :
df_cars = df_cars[df_cars['continent'].isin(choix_pays)]
else :
st.sidebar.warning('No option is selected')
st.subheader('')
st.markdown("<body class='p3'>Quelques graphiques pour l'analyse descriptive :</body>",
unsafe_allow_html=True)
st.title('')
fig4 = px.pie(df_cars, values='year', names='continent', color='continent',color_discrete_map={'US.':'lightcyan',
'Japan.':'cyan',
'Europe.':'royalblue'})
fig4.update_layout(title_text="<b>Répartition des modèles par continent",
title_x=0.5, title_font_family="Verdana")
st.write(fig4)
st.markdown("<body class='p4'>Près de 2/3 des modèles du dataset proviennent des Etats-Unis.</body>",
unsafe_allow_html=True)
st.header('')
fig3 = px.histogram(df_cars, x="year", color="continent", color_discrete_map={'US.':'lightcyan',
'Japan.':'cyan',
'Europe.':'royalblue'})
fig3.update_layout(title_text="<b>Répartition des modèles par années",
title_x=0.5, title_font_family="Verdana")
fig3.update_layout({'plot_bgcolor': 'rgba(0,0,0,0)',
'paper_bgcolor': 'rgba(0,0,0,0)'})
fig3.update_xaxes(showgrid=False, gridwidth=1,
gridcolor='black', linecolor='rgba(0,0,0,0)')
fig3.update_yaxes(showgrid=False, gridwidth=1,
gridcolor='black', linecolor='rgba(0,0,0,0)')
fig3.update_xaxes(title_text="<b>Années")
#fig8.update_yaxes(title_text="<b>Distance par gallon")
st.write(fig3)
fig1 = px.box(df_cars, x="weightlbs", color="continent", color_discrete_map={'US.':'lightcyan',
'Japan.':'cyan',
'Europe.':'royalblue'})
fig1.update_layout(title_text="<b>Répartition des modèles par poids",
title_x=0.5, title_font_family="Verdana")
fig1.update_layout({'plot_bgcolor': 'rgba(0,0,0,0)',
'paper_bgcolor': 'rgba(0,0,0,0)'})
fig1.update_xaxes(showgrid=False, gridwidth=1,
gridcolor='black', linecolor='rgba(0,0,0,0)')
fig1.update_yaxes(showgrid=False, gridwidth=1,
gridcolor='black', linecolor='rgba(0,0,0,0)')
fig1.update_xaxes(title_text="<b>Poids")
#fig8.update_yaxes(title_text="<b>Distance par gallon")
st.write(fig1)
st.title('')
fig2 = px.box(df_cars, x="hp", color="continent", color_discrete_map={'US.':'lightcyan',
'Japan.':'cyan',
'Europe.':'royalblue'})
fig2.update_layout(title_text="<b>Répartition des modèles par puissance",
title_x=0.5, title_font_family="Verdana")
fig2.update_layout({'plot_bgcolor': 'rgba(0,0,0,0)',
'paper_bgcolor': 'rgba(0,0,0,0)'})
fig2.update_xaxes(showgrid=False, gridwidth=1,
gridcolor='black', linecolor='rgba(0,0,0,0)')
fig2.update_yaxes(showgrid=False, gridwidth=1,
gridcolor='black', linecolor='rgba(0,0,0,0)')
fig2.update_xaxes(title_text="<b>Puissance (cv)")
#fig8.update_yaxes(title_text="<b>Distance par gallon")
st.write(fig2)
st.markdown("<body class='p4'>Sans surprise, les modèles US sont dans une catégorie à part avec des poids et des puissances beaucoup plus importants que les modèles japonais ou européens.</body>",
unsafe_allow_html=True)
if choice == 'Analyse des corrélations':
if choix_pays :
df_cars = df_cars[df_cars['continent'].isin(choix_pays)]
else :
st.sidebar.warning('No option is selected')
st.subheader('')
st.markdown('<body class="p3">Heatmap de corrélation :</body>',
unsafe_allow_html=True)
st.header('')
fig, axes = plt.subplots(figsize=(12, 5))
sns.heatmap(df_cars.corr(), annot=True, cmap="YlGnBu")
sns.set(rc={'figure.facecolor': 'white'})
st.write(fig)
st.markdown("<body class='p4'>A la lecture de ce heatmap, 4 variables sont fortement corrélées (hp, weightlbs, cylinders, cubicinches). Nous allons nous intéresser à d'autres corrélations ici.</body>",
unsafe_allow_html=True)
st.title('')
st.markdown("<body class='p3'>Quelques analyses de corrélation :</body>",
unsafe_allow_html=True)
st.title('')
fig7 = px.scatter(df_cars, x="time-to-60",
y="hp", trendline="ols", color="continent", color_discrete_map={'US.':'lightcyan',
'Japan.':'cyan',
'Europe.':'royalblue'})
fig7.update_layout(title_text="<b>Corrélation entre l'accélération et la puissance",
title_x=0.5, title_font_family="Verdana")
fig7.update_layout({'plot_bgcolor': 'rgba(0,0,0,0)',
'paper_bgcolor': 'rgba(0,0,0,0)'})
fig7.update_xaxes(showgrid=False, gridwidth=1,
gridcolor='black', linecolor='rgba(0,0,0,0)')
fig7.update_yaxes(showgrid=False, gridwidth=1,
gridcolor='black', linecolor='rgba(0,0,0,0)')
fig7.update_xaxes(title_text="<b>Temps (sec)")
fig7.update_yaxes(title_text="<b>Puissance")
st.write(fig7)
st.markdown("<body class='p4'>Plus le modèle est puissant, plus la durée pour atteindre les 60 miles/heure est faible.</body>",
unsafe_allow_html=True)
st.title('')
fig8 = px.scatter(df_cars, x="year",
y="mpg", trendline="ols", color="continent", color_discrete_map={'US.':'lightcyan',
'Japan.':'cyan',
'Europe.':'royalblue'})
fig8.update_layout(title_text="<b>Corrélation entre la consommation et l'année de fabrication",
title_x=0.5, title_font_family="Verdana")
fig8.update_layout({'plot_bgcolor': 'rgba(0,0,0,0)',
'paper_bgcolor': 'rgba(0,0,0,0)'})
fig8.update_xaxes(showgrid=False, gridwidth=1,
gridcolor='black', linecolor='rgba(0,0,0,0)')
fig8.update_yaxes(showgrid=False, gridwidth=1,
gridcolor='black', linecolor='rgba(0,0,0,0)')
fig8.update_xaxes(title_text="<b>Années")
fig8.update_yaxes(title_text="<b>Distance par gallon")
st.write(fig8)
st.markdown("<body class='p4'>La distance parcourue par gallon ne cesse d'augmenter au fil du temps soit une baisse de la consommation des véhicules.</body>",
unsafe_allow_html=True)
st.title('')
|
[
"noreply@github.com"
] |
noreply@github.com
|
bb7efc5aa68913486b3c49694c71001cadf1218b
|
968992f7cc055798334784ec9889b496df98a92b
|
/db_tools/import.py
|
d830d258f9ff1f82a21e5e238594bd920ad95a23
|
[] |
no_license
|
maplehua/cloudcomputing-xiaohua
|
38ec20ee8d7ebde95d302a5774febb2421cdef1d
|
c6fdc8896a7e73ea3777b52cf30462475ea5a513
|
refs/heads/master
| 2021-01-21T21:50:52.609815
| 2014-09-10T06:10:34
| 2014-09-10T06:10:34
| 59,453,305
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,461
|
py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import os, sys
import re
import argparse
from uuid import uuid1
from bs4 import BeautifulSoup
from pymongo import MongoClient
from pyes import ES
from config import *
es_conn = ES(server = ES_SERVER)
mongo_conn = MongoClient(host = MONGODB_HOST, port = MONGODB_PORT)[PAPER_DB]
def load_dir(path, file_type,parse_func, collection, index, doc_type):
for root, dirs, files in os.walk(path):
for f in files:
if file_type == f[-len(file_type):]:
print os.path.join(root, f)
(mongo_doc, es_doc) = parse_func(os.path.join(root, f))
print 'mongo - %r' % (import_mongo(mongo_doc, collection))
print 'elasticsearch - %r' % (import_es(es_doc, index, doc_type))
def paper_html_to_doc(infile):
paper = BeautifulSoup(open(infile))
uuid = unicode(uuid1())
title = paper.find('meta', attrs={'name':'DC.Title'})['content'].strip().strip('\n')
publication = '中国科学'
year = paper.find('meta', attrs={'name':'DC.Date'})['content'].strip().strip('\n')
body = paper.find('meta', attrs={'name':'DC.Description'})['content'].strip().strip('\n')
authors = []
author_list = paper.find('meta', attrs={'name':'citation_authors'})['content']
authors_o = re.split(r",| *|;", author_list)
for a in authors_o:
authors.append(a.strip().strip('\n'))
mongo_doc = dict(uuid = uuid, title = title, authors = authors, publication = publication, year = year, body = body)
es_doc = dict(uuid = uuid, title = title, body = body)
#print title
#print publication
#print year
#print authors
#print body
#return (1,1)
return (mongo_doc, es_doc)
def paper_xml_to_doc(infile):
paper = BeautifulSoup(open(infile))
uuid = unicode(uuid1())
title = paper.title.get_text().strip().strip('\n')
publication = paper.conference.get_text().strip().strip('\n')
year = paper.year.get_text().strip().strip('\n')
body = paper.body.get_text()
authors = []
author_list = paper.find_all('author')
for a in author_list:
authors.append(a.get_text().strip().strip('\n'))
mongo_doc = dict(uuid = uuid, title = title, authors = authors, publication = publication, year = year, body = body)
es_doc = dict(uuid = uuid, title = title, body = body)
return (mongo_doc, es_doc)
def import_mongo(doc, collection):
return mongo_conn[collection].insert(doc)
def import_es(doc, index, doc_type):
return es_conn.index(doc = doc, index = index, doc_type = doc_type)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Academi Data Import Utility')
parser.add_argument('-t', dest='data_type', help='data type')
parser.add_argument('path', help='Data path')
args = parser.parse_args()
if args.data_type == 'paper':
print 'import paper', args.path
load_dir(args.path, '.xml', paper_xml_to_doc, collection = PAPER_COLLECTION, index = PAPER_INDEX, doc_type = PAPER_TYPE)
elif args.data_type == 'paper_en':
print 'paper_en', args.path
load_dir(args.path, '.xml', paper_xml_to_doc, collection = PAPER_EN_COLLECTION, index = PAPER_EN_INDEX, doc_type = PAPER_EN_TYPE)
elif args.data_type == 'paper_cn':
print 'paper_cn', args.path
load_dir(args.path, '.html', paper_html_to_doc, collection = PAPER_COLLECTION, index = PAPER_INDEX, doc_type = PAPER_TYPE)
|
[
"ggshiney@gmail.com"
] |
ggshiney@gmail.com
|
a080485d86a70caa6afe4b4fd83a5a87c8f91514
|
6752106643c421b9b0f9aa73054fc446c0993772
|
/brain_pipeline.py
|
7e5193de8b25bca2fe2c73c9bfd7e5f515228692
|
[] |
no_license
|
kkfuwfny/BRATS
|
4ad26ab6d6b40b6f92eab93e02b3fa7a573174d6
|
e54c56445757d07ce7360d5dd20b9e0836189dbd
|
refs/heads/master
| 2021-06-18T19:54:58.586434
| 2017-07-06T01:55:35
| 2017-07-06T01:55:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,690
|
py
|
# -*- coding:utf -*-
import numpy as np
import subprocess
import random
import progressbar
from glob import glob
from skimage import io
import nipy as ni
np.random.seed(5) # for reproducibility 使每次产生的随机数不变
progress = progressbar.ProgressBar(widgets=[progressbar.Bar('*', '[', ']'), progressbar.Percentage(), ' '])
# A text progress bar is typically used to display the progress of a long running operation, providing a visual cue that processing is underway.
ORI_PATH = '/mnt/filsystem1/code/dsb2017/code/zang/'
class BrainPipeline(object):
'''
A class for processing brain scans for one patient
INPUT: (1) filepath 'path': path to directory of one patient. Contains following mha files:
flair, t1, t1c, t2, ground truth (gt)
(2) bool 'n4itk': True to use n4itk normed t1 scans (defaults to True)
(3) bool 'n4itk_apply': True to apply and save n4itk filter to t1 and t1c scans for given patient. This will only work if the
'''
def __init__(self, path, n4itk = True, n4itk_apply = False):
self.path = path
self.n4itk = n4itk
self.n4itk_apply = n4itk_apply
self.modes = ['flair', 't1', 't1c', 't2', 'gt']
self.slices_by_mode, n = self.read_scans()
self.slices_by_slice = n
self.normed_slices = self.norm_slices()
def read_scans(self):
'''
goes into each modality in patient directory and loads individual scans.
transforms scans of same slice into strip of 5 images
'''
print 'Loading scans...'
slices_by_mode = np.zeros((5, 155, 240, 240)) #变量初始化
slices_by_slice = np.zeros((155, 5, 240, 240))
b=np.zeros((155,240,240))
flair = glob(self.path + '/*Flair*.nii.gz')
t2 = glob(self.path + '/*_T2*.nii.gz')
gt = glob(self.path + '/*OT.nii.gz')
t1s = glob(self.path + '/*_T1_*.nii.gz') # T1
t1_n4 = glob(self.path + '/*_T1c_*.nii.gz') # T1c
t1 = [scan for scan in t1s if scan not in t1_n4]
scans = [flair[0], t1s[0], t1_n4[0], t2[0], gt[0]] # directories to each image (5 total)
if self.n4itk_apply:
print '-> Applyling bias correction...'
for t1_path in t1:
self.n4itk_norm(t1_path) # normalize files
scans = [flair[0], t1_n4[0], t1_n4[0], t2[0], gt[0]]
elif self.n4itk:
scans = [flair[0], t1_n4[0], t1_n4[0], t2[0], gt[0]]
print "=================="
for scan_idx in xrange(5):
s= ni.load_image(flair[0])
for i in range(155):
b[i,:,:]=np.asarray(s[:,:,i])
# read each image directory, save to self.slices
slices_by_mode[scan_idx] = b
for mode_ix in xrange(slices_by_mode.shape[0]): # modes 1 thru 5
for slice_ix in xrange(slices_by_mode.shape[1]): # slices 1 thru 155
slices_by_slice[slice_ix][mode_ix] = slices_by_mode[mode_ix][slice_ix] # reshape by slice
return slices_by_mode, slices_by_slice
def norm_slices(self):
'''
normalizes each slice in self.slices_by_slice, excluding gt
subtracts mean and div by std dev for each slice
clips top and bottom one percent of pixel intensities
if n4itk == True, will apply n4itk bias correction to T1 and T1c images
'''
print 'Normalizing slices...'
normed_slices = np.zeros((155, 5, 240, 240))
for slice_ix in xrange(155):
normed_slices[slice_ix][-1] = self.slices_by_slice[slice_ix][-1]
for mode_ix in xrange(4):
normed_slices[slice_ix][mode_ix] = self._normalize(self.slices_by_slice[slice_ix][mode_ix])
print 'Done.'
return normed_slices
def _normalize(self, slice):
'''
INPUT: (1) a single slice of any given modality (excluding gt)
(2) index of modality assoc with slice (0=flair, 1=t1, 2=t1c, 3=t2)
OUTPUT: normalized slice
'''
b, t = np.percentile(slice, (0.5,99.5)) # np.percentile 百分位
slice = np.clip(slice, b, t) # b: slice_min, t:slice_max
if np.std(slice) == 0:
return slice
else:
return (slice - np.mean(slice)) / np.std(slice) #标准化slice
def save_patient(self, reg_norm_n4, patient_num):
'''
INPUT: (1) int 'patient_num': unique identifier for each patient
(2) string 'reg_norm_n4': 'reg' for original images, 'norm' normalized images, 'n4' for n4 normalized images
OUTPUT: saves png in Norm_PNG directory for normed, Training_PNG for reg
'''
print 'Saving scans for patient {}...'.format(patient_num)
progress.currval = 0
if reg_norm_n4 == 'norm': #saved normed slices
for slice_ix in progress(xrange(155)): # reshape to strip
strip = self.normed_slices[slice_ix].reshape(1200, 240)
if np.max(strip) != 0: # set values < 1
strip /= np.max(strip)
if np.min(strip) <= -1: # set values > -1
strip /= abs(np.min(strip))
# save as patient_slice.png
#print 'the max of strip:',np.max(strip)
#print "the min of strip:",np.min(strip)
io.imsave(ORI_PATH+'Norm_PNG/{}_{}.jpg'.format(patient_num, slice_ix), strip)
elif reg_norm_n4 == 'reg':
for slice_ix in progress(xrange(155)):
strip = self.slices_by_slice[slice_ix].reshape(1200, 240)
if np.max(strip) != 0:
strip /= np.max(strip)
io.imsave(ORI_PATH+'Training_PNG/{}_{}.png'.format(patient_num, slice_ix), strip)
else:
for slice_ix in progress(xrange(155)): # reshape to strip
strip = self.normed_slices[slice_ix].reshape(1200, 240)
if np.max(strip) != 0: # set values < 1
strip /= np.max(strip)
if np.min(strip) <= -1: # set values > -1
strip /= abs(np.min(strip))
# save as patient_slice.png
io.imsave(ORI_PATH+'n4_PNG/{}_{}.png'.format(patient_num, slice_ix), strip)
print 'save'
def n4itk_norm(self, path, n_dims=3, n_iters='[20,20,10,5]'):
'''
INPUT: (1) filepath 'path': path to mha T1 or T1c file
(2) directory 'parent_dir': parent directory to mha file
OUTPUT: writes n4itk normalized image to parent_dir under orig_filename_n.mha
'''
output_fn = path[:-4] + '_n.nii'
# run n4_bias_correction.py path n_dim n_iters output_fn
subprocess.call('python n4_bias_correction.py ' + path + ' ' + str(n_dims) + ' ' + n_iters + ' ' + output_fn, shell = True)
def save_patient_slices(patients, type):
'''
INPUT (1) list 'patients': paths to any directories of patients to save. for example- glob("Training/HGG/**")
(2) string 'type': options = reg (non-normalized), norm (normalized, but no bias correction), n4 (bias corrected and normalized)
saves strips of patient slices to approriate directory (Training_PNG/, Norm_PNG/ or n4_PNG/) as patient-num_slice-num
'''
for patient_num, path in enumerate(patients): #path 为list 类型
print path
a = BrainPipeline(path)
a.save_patient(type, patient_num)
def s3_dump(directory, bucket):
'''
dump files from a given directory to an s3 bucket
INPUT (1) string 'directory': directory containing files to save
(2) string 'bucket': name od s3 bucket to dump files
'''
subprocess.call('aws s3 cp' + ' ' + directory + ' ' + 's3://' + bucket + ' ' + '--recursive')
def save_labels(fns):
'''
INPUT list 'fns': filepaths to all labels
'''
progress.currval = 0
slices=np.zeros((240,240))
label=glob(fns+'/*OT.nii.gz')
print 'len of label:',len(label)
print 'type of label:',type(label)
s = ni.load_image(label[0])
print s.shape
print "=========="
label_idx=0
for slice_idx in xrange(1):
slices=np.asarray(s[:,:,slice_idx])
print slices.shape
io.imsave(ORI_PATH+'Labels/{}_{}L.png'.format(label_idx, slice_idx), slices)
if __name__ == '__main__':
save_labels(ORI_PATH+'brats_2013/')
patients = glob(ORI_PATH+'brats_2013/**')
print('patients:'),patients
save_patient_slices(patients, 'reg')
save_patient_slices(patients, 'norm')
save_patient_slices(patients, 'n4')
s3_dump('Graveyard/Training_PNG/', 'orig-training-png')
|
[
"noreply@github.com"
] |
noreply@github.com
|
c1ea1c2df956749c6deeb18f05376f849453d2e6
|
3c3c274f266736c97dc14608511f039e65e31694
|
/chalicelib/auth.py
|
ad8d2d2a8ff3cc74580968dde491e1779e63a446
|
[] |
no_license
|
craymaru/chalice-todoapp-training
|
b2de9a7bff52ae3675a36ac44c7886a003199c7c
|
5a3229f3f4d185457812777432bd99adb9b7c56a
|
refs/heads/master
| 2023-01-11T18:03:50.712684
| 2020-11-18T04:29:34
| 2020-11-18T04:29:34
| 313,465,965
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,168
|
py
|
import hashlib
import hmac
import datetime
from uuid import uuid4
import jwt
from chalice import UnauthorizedError
# TODO: Figure out what we want to do with this.
# We can either move this out to env vars in config.json,
# use KMS to encrypt/decrypt this value, or store this in SSM.
# Until we figure it out I'll store it here.
_SECRET = b'\xf7\xb6k\xabP\xce\xc1\xaf\xad\x86\xcf\x84\x02\x80\xa0\xe0'
def get_jwt_token(username, password, record):
actual = hashlib.pbkdf2_hmac(
record['hash'],
password.encode(),
record['salt'].value,
record['rounds']
)
expected = record['hashed'].value
if hmac.compare_digest(actual, expected):
now = datetime.datetime.utcnow()
unique_id = str(uuid4())
payload = {
'sub': username,
'iat': now,
'nbf': now,
'jti': unique_id,
# NOTE: We can also add 'exp' if we want tokens to expire.
}
return jwt.encode(payload, _SECRET, algorithm='HS256')
raise UnauthorizedError('Invalid password')
def decode_jwt_token(token):
return jwt.decode(token, _SECRET, algorithms=['HS256'])
|
[
"craymaru@gmail.com"
] |
craymaru@gmail.com
|
354008357caf51c4266ae2cbd0b6f2dd88dc8917
|
2f609e4c763973e33f430785898dc2e01570a918
|
/migrations/versions/c5722c470e44_.py
|
c8cf72d5d404b3255c72e4f6a91552acf5694df4
|
[
"MIT"
] |
permissive
|
TheoTT/s_shop_flask
|
aa2eb815743b02f5d33ef877e26d4c03f436a1f3
|
bbdb2c8e65a1da08f72a18a06f7bbf0a8d42d0e8
|
refs/heads/master
| 2022-01-26T04:34:03.687511
| 2020-04-18T02:37:47
| 2020-04-18T02:37:47
| 247,942,837
| 0
| 0
|
MIT
| 2021-03-29T22:35:30
| 2020-03-17T10:38:00
|
Python
|
UTF-8
|
Python
| false
| false
| 803
|
py
|
"""empty message
Revision ID: c5722c470e44
Revises: 793ea5ac3149
Create Date: 2020-03-22 22:01:24.511081
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'c5722c470e44'
down_revision = '793ea5ac3149'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('submenus', sa.Column('child_id_1', sa.Integer(), nullable=False))
op.create_foreign_key(None, 'submenus', 'menus', ['child_id_1'], ['id'])
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_constraint(None, 'submenus', type_='foreignkey')
op.drop_column('submenus', 'child_id_1')
# ### end Alembic commands ###
|
[
"tt137378245@outlook.com"
] |
tt137378245@outlook.com
|
65cbb2d8960663a033aa29f3118e65c6bdd82a98
|
be4b355c065e82086d1abb7bd860581ba0127aaa
|
/dataprocess.py
|
1337ea23089d798036381e3a0ea1852460656064
|
[] |
no_license
|
gannerr/sneakergen
|
de0df5b921856682b9061178b55bc454881ee220
|
5a0577b5958be7ef79e9a4eaf0cc20193e6a0fe8
|
refs/heads/master
| 2020-06-16T16:40:33.922010
| 2019-07-07T10:55:33
| 2019-07-07T10:55:33
| 195,639,203
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,631
|
py
|
def process_data():
current_dir = os.getcwd()
# parent = os.path.dirname(current_dir)
shoe_dir = os.path.join(current_dir, 'resized_black')
images = []
for each in os.listdir(shoe_dir):
images.append(os.path.join(shoe_dir,each))
# print images
all_images = tf.convert_to_tensor(images, dtype = tf.string)
images_queue = tf.train.slice_input_producer(
[all_images])
content = tf.read_file(images_queue[0])
image = tf.image.decode_jpeg(content, channels = CHANNEL)
# sess1 = tf.Session()
# print sess1.run(image)
image = tf.image.random_flip_left_right(image)
image = tf.image.random_brightness(image, max_delta = 0.1)
image = tf.image.random_contrast(image, lower = 0.9, upper = 1.1)
# noise = tf.Variable(tf.truncated_normal(shape = [HEIGHT,WIDTH,CHANNEL], dtype = tf.float32, stddev = 1e-3, name = 'noise'))
# print image.get_shape()
size = [HEIGHT, WIDTH]
image = tf.image.resize_images(image, size)
image.set_shape([HEIGHT,WIDTH,CHANNEL])
# image = image + noise
# image = tf.transpose(image, perm=[2, 0, 1])
# print image.get_shape()
image = tf.cast(image, tf.float32)
image = image / 255.0
iamges_batch = tf.train.shuffle_batch(
[image], batch_size = BATCH_SIZE,
num_threads = 4, capacity = 200 + 3* BATCH_SIZE,
min_after_dequeue = 200)
num_images = len(images)
return images_batch, num_images
|
[
"noreply@github.com"
] |
noreply@github.com
|
4cd37584ef4a0d01cd88ff800395b7ab860f7b52
|
7950c4faf15ec1dc217391d839ddc21efd174ede
|
/problems/0530.0_Minimum_Absolute_Difference_in_BST.py
|
93ad4e5a1a70ef0a7e5a3df60900e33c8cd38472
|
[] |
no_license
|
lixiang2017/leetcode
|
f462ecd269c7157aa4f5854f8c1da97ca5375e39
|
f93380721b8383817fe2b0d728deca1321c9ef45
|
refs/heads/master
| 2023-08-25T02:56:58.918792
| 2023-08-22T16:43:36
| 2023-08-22T16:43:36
| 153,090,613
| 5
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 763
|
py
|
'''
Runtime: 99 ms, faster than 5.10% of Python3 online submissions for Minimum Absolute Difference in BST.
Memory Usage: 18.5 MB, less than 69.94% of Python3 online submissions for Minimum Absolute Difference in BST.
'''
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def getMinimumDifference(self, root: Optional[TreeNode]) -> int:
def inorder(node):
if node:
yield from inorder(node.left)
yield node.val
yield from inorder(node.right)
return min(b - a for a, b in pairwise(inorder(root)))
|
[
"laoxing201314@outlook.com"
] |
laoxing201314@outlook.com
|
1e849b8441b64d8dcd64fc090cecf42eca0c2f1a
|
8329504e8dcf39d72f5120804d1afbc47b7c8364
|
/djangoProject2/settings.py
|
54e474073684abe49acfbe0b6198733d625bb9be
|
[] |
no_license
|
jeonyongho0/djangoProject2
|
ae7bf5cc7e17d28a083f4f4797bcde172c93d7c9
|
222f7d89f43e8027d2d2e30e7891e2a1f6b92995
|
refs/heads/master
| 2023-04-06T13:19:21.368229
| 2021-04-21T23:49:13
| 2021-04-21T23:49:13
| 360,335,142
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,292
|
py
|
"""
Django settings for djangoProject2 project.
Generated by 'django-admin startproject' using Django 3.2.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-v9h8zo_4!8iy=43f75arig(1(ht(f8#f7v!8a0w&df44)ahc=*'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'djangoProject2.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [BASE_DIR / 'templates']
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'djangoProject2.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
|
[
"drager7798@naver.com"
] |
drager7798@naver.com
|
ef9a648c598dc220ff704edf4dde3de811641175
|
154f978202f5d06334bddaf82785975dddc04166
|
/ressApp/migrations/0008_auto_20180626_1357.py
|
2b9748422b7d1f6fb51b7bbd7c5018004fb969c8
|
[] |
no_license
|
Lippskinn/BAR4
|
409cebed8bec2e88f75a81ac1b5dadc1e92e0cb8
|
aef07438255a875b1e8bd231490cde27eb5273da
|
refs/heads/master
| 2020-04-02T07:02:28.715022
| 2018-10-22T16:43:00
| 2018-10-22T16:43:00
| 154,178,934
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 751
|
py
|
# Generated by Django 2.0.2 on 2018-06-26 13:57
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('ressApp', '0007_auto_20180626_1334'),
]
operations = [
migrations.RemoveField(
model_name='offer',
name='offerImage',
),
migrations.AddField(
model_name='offer',
name='image',
field=models.ImageField(default='offer_image/no_offer_image.jpg', upload_to='offer_image'),
),
migrations.AddField(
model_name='profile',
name='image',
field=models.ImageField(default='profile_image/no_profile_image.jpg', upload_to='profile_image'),
),
]
|
[
"noreply@github.com"
] |
noreply@github.com
|
32863802c37b18d84de632557d016bf82a7f9e33
|
c68c8b0bf9e1e367f1cd170d7271b55e61ff736b
|
/quickstart.py
|
e13a92e4080df07d8b17a8353638f8daff229a18
|
[] |
no_license
|
eesaaamer3/EmailAutomation
|
55fb9b2aeda1825285b1e0c77ee9a7e898a27fe5
|
dcada973c5539e59f01db319395599dfd3eeb3b8
|
refs/heads/master
| 2022-11-14T04:21:56.421601
| 2020-06-26T03:03:45
| 2020-06-26T03:03:45
| 254,958,000
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,805
|
py
|
"""
Eesa Aamer
Date Created: 11/04/20
Last Modified: 13/04/20
A python script that can read and send emails, while also being
able to download attachments directly onto the device.
"""
import ezgmail
import argparse
parser = argparse.ArgumentParser(description="Enter valid email information")
parser.add_argument('Email', type=str, help='Receiver email address')
parser.add_argument('SubjectLine', type=str, help='Subject Line of Email')
parser.add_argument('Body', type=str, help='Body of Email')
args = parser.parse_args()
class SendWithNoAttachment:
""" Sends emails without attachments """
def __init__(self, email, subjectLine, body):
self.email = email
self.subjectLine = subjectLine
self.body = body
def sender(self):
ezgmail.send(self.email, self.subjectLine, self.body)
class SendWithAttachments(SendWithNoAttachment):
""" Sends emails with variable amount of attachments"""
def __init__(self, email, subjectLine, body, attachments):
super().__init__(email, subjectLine, body) # Inherits from SendWithNoAttachment class
self.attachments = attachments
def senderWithAttach(self):
ezgmail.send(self.email, self.subjectLine, self.body, self.attachments) # Command to send email
class Reader:
""" Reads most recent unread emails """
def __init__(self):
unreadThreads = ezgmail.unread() # Collects unread email into lsit
print("You have {} new emails".format(len(unreadThreads)))
ezgmail.summary(unreadThreads) # Command that provides name, subject line, and body of unread emails
class Downloader:
""" Downloads attachments from select emails """
def __init__(self, subjectLine):
self.subjectLine = subjectLine
def mailFinder(self):
mail = ezgmail.search(self.subjectLine) # Collects emails that have a certain subject line
return mail
def downloadOneAttachment(self, files):
filename = input("What is the name of the file?: ")
files[0].messages[0].downloadAttachment(filename) # Command to download a specific attachment
def downloadAllAttachments(self, files):
files[0].messages[0].downloadAllAttachments() # Command to download all attachments
class Introduction:
""" Initial user interface """
def __init__(self):
pass
def start(self):
# Takes in user choice
print("Welcome to the automated email system!")
initialResp = input("[S]end without attachments, [W]ith attachments, [R]ead?, or [D]ownload?: ")
return initialResp
if __name__ == "__main__":
begin = Introduction() # Introduction object acts as starting screen
initial = begin.start()
if initial == "S": # User wants to send an email
newSenderWithNone = SendWithNoAttachment(args.Email, args.SubjectLine, args.Body).sender()
elif initial == "W": # User wants to send email with attachments
print("For attachments, please list all attachments seperated with a space")
attaches = input("Attachments?: ")
new_list = [attach for attach in attaches.split(" ")]
newSenderWithSome = SendWithAttachments(args.Email, args.SubjectLine, args.Body, new_list).senderWithAttach()
elif initial == "R": # User wants to view recent unread emails
newRead = Reader()
elif initial == "D": # User wants to download attachments from file
desiredEmail = input("What is the subject line?: ")
newDownload = Downloader(desiredEmail)
user_choice = input("[O]ne file or [A]ll?: ")
if user_choice == "O":
newDownload.downloadOneAttachment(newDownload.mailFinder())
elif user_choice == "A":
newDownload.downloadAllAttachments(newDownload.mailFinder())
|
[
"eesaaamer5@gmail.com"
] |
eesaaamer5@gmail.com
|
a9e2b226c09273c26c8859b32edb71bbdff7b623
|
f6f632d0d19fcced0a464d7c75d82ef2c3a9758a
|
/ABTS/physical_models/Mission.py
|
00d67e41e43cc63cd614c16fa4fcff9cc751a51a
|
[] |
no_license
|
gfalcon2/Aerobraking-Trajectory-Simulator
|
3f9446be40df41e94fe59ac60bd6d2edf2364232
|
2ad7314ae759283148bd259449a012a7797de6c2
|
refs/heads/master
| 2022-12-03T22:30:33.979187
| 2022-11-20T16:48:11
| 2022-11-20T16:48:11
| 239,838,808
| 7
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,633
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat May 30 16:23:54 2020
@author: Giusy Falcone (gfalcon2@illinois.edu)
@copyright University of illinois at Urbana Champaign
"""
def missiondef(mission):
words = ""
sentence = mission['Purpose']
wordsinpurpose = []
for letter in sentence:
if (letter == ' ') or (letter == '.'):
wordsinpurpose.append(words)
words = ""
else:
words += str(letter)
[e, d, l, a] = [0, 0, 0, 0]
for words in wordsinpurpose:
if (words == 'entry') or (words == 'Entry'):
e = 1
#print(words)
if (words == 'descent') or (words == 'Descent'):
d = 1
#print(words)
if (words == 'landing') or (words == 'Landing'):
l = 1
#print(words)
if (words == 'aerobraking') or (words == 'Aerobraking'):
a = 1
#print(words)
if (words == 'Earth'):
p = 0
#print(words)
elif (words == 'Mars'):
p = 1
#print(words)
elif (words == 'Venus'):
p = 2
#print(words)
class Mission:
def __init__(self, e, d, l, a, p):
self.e = e
self.d = d
self.l = l
self.a = a
self.planet = p
M = Mission(e,d,l,a,p)
#Gravity Model
if (mission['Gravity Model'] == 'constant') or (mission['Gravity Model'] == 'Constant'):
gm = 0
#print('Gravity Model = Constant')
elif (mission['Gravity Model'] == 'Inverse squared') or (mission['Gravity Model'] == 'inverse squared') or (mission['Gravity Model'] == 'Inverse Squared') :
gm = 1
#print('Gravity Model = Inverse squared')
elif (mission['Gravity Model'] == 'Inverse Squared and J2 effect') or (mission['Gravity Model'] == 'inverse squared and J2 effect') or (mission['Gravity Model'] == 'Inverse quared and J2 effect'):
gm = 2
#print('Gravity Model = Inverse Squared and J2 effect')
else:
gm = 1
#print('Gravity Model = Inverse squared')
#Density Model
if (mission['Density Model'] == 'constant') or (mission['Density Model'] == 'Constant'):
dm = 0
#print('Density Model = Constant')
elif (mission['Density Model'] == 'exponential') or (mission['Density Model'] == 'Exponential'):
dm = 1
#print('Density Model = Exponential')
elif (mission['Density Model'] == 'No-Density') or (mission['Density Model'] == 'No-density'):
dm = 2
#print('Density Model = No-Density')
elif (mission['Density Model'] == 'MARSGram') or (mission['Density Model'] == 'MarsGram'):
dm = 3
#print('Density Model = Mars Gram')
else:
dm = 1
#print('Density Model = Exponential')
# MonteCarlo
wm = int(mission['Wind'])
# Aerodynamic Model
if (mission['Aerodynamic Model'] == 'Cd and Cl Constant') or (mission['Aerodynamic Model'] == 'Cd and Cl constant'):
am = 0
#print('Aerodynamic Model = Cd and Cl Constant')
elif ((mission['Aerodynamic Model'] == 'Diffusive') or (mission['Aerodynamic Model'] == 'Mach-dependent')) and (mission['Shape'] == 'Spacecraft'):
am = 1
#print('Aerodynamic Model = Mach-dependent')
elif ((mission['Aerodynamic Model'] == 'No-Ballistic flight with axial coefficient') or (mission['Aerodynamic Model'] == 'No-ballistic flight with axial coefficient')) and (mission['Shape'] == 'Blunted Cone'):
am = 2
#print('Aerodynamic Model = No-Ballistic flight')
else:
am = 0
#print('Aerodynamic Model = Ballistic flight')
# Control Model
if (mission['Control'] == 3):
cm = 3
elif (mission['Control'] == 2):
cm = 2
elif (mission['Control'] == 1):
cm = 1
elif (mission['Control'] == 0):
cm = 0
else:
cm = 0
# Thrust Control
if mission['Firings'] == 'None':
tc = 0
elif mission['Firings'] == 'Aerobraking Maneuver':
tc = 1
elif mission['Firings'] == 'Drag Passage Firing':
tc = 2
else:
tc = 0
# Thermal Model
words = ""
sentence = mission['Thermal Model']
wordsinpurpose = []
for letter in sentence:
if letter != ' ':
words += str(letter)
else:
wordsinpurpose.append(words)
words = ""
if (mission['Thermal Model'] == 'convective and ratiative') or (mission['Thermal Model'] == 'Convective and Radiative'):
tm = 1
if (mission['Thermal Model'] == 'Maxwellian Heat Transfer') or (mission['Thermal Model'] == 'Shaaf and Chambre'):
tm = 2
#if (tm.c == 1 ) and (tm.r == 1):
#print('Thermal Model = Convective Heating (Sutton-Graves) + Radiative Heating')
#elif (tm.c == 1) and (tm.r == 0):
#print('Thermal Model = Convective Heating (Sutton-Graves)')
#elif (tm.c == 0) and (tm.r == 1):
#print('Thermal Model = Radiative Heating (Sutton-Graves)')
#elif (tm.c == 0) and (tm.r == 0) and (tm.t == 1):
#print('Thermal Model = Maxwellian Model for Flat Plate')
# MonteCarlo
mc = int(mission['Monte Carlo'])
class InitialParameters:
def __init__(self, M, gm, dm, wm, am, tm, cm, tc, mc):
self.M= M
self.gm = gm
self.dm = dm
self.wm = wm
self.am = am
self.tm = tm
self.cm = cm
self.tc = tc
self.mc = mc
global ip
ip = InitialParameters(M,gm,dm,wm,am,tm,cm,tc,mc)
return ip
|
[
"noreply@github.com"
] |
noreply@github.com
|
1524daef4faaedc728bb00129ad31c0b83a880c5
|
0049857a3ce7d5137cfcc3c03869c39fd797f7ce
|
/Week_03/combinations.py
|
1375a75576d0c653ba20a13a590671328ac655bb
|
[] |
no_license
|
arthurt53/algorithm011-class02
|
98358d53b76f654edf5d7ec0b00519ac1ff76dea
|
3fd3ac0de550507b6a2ca16179094cef4517848c
|
refs/heads/master
| 2022-12-06T19:29:29.104296
| 2020-09-05T03:22:36
| 2020-09-05T03:22:36
| 274,076,429
| 0
| 0
| null | 2020-06-22T08:02:16
| 2020-06-22T08:02:15
| null |
UTF-8
|
Python
| false
| false
| 346
|
py
|
class Solution:
def combine(self, n: int, k: int) -> List[List[int]]:
res = []
def backtrack(i, k, tmp):
if k == 0:
res.append(tmp)
return
for j in range(i, n + 1):
backtrack(j+1, k-1, tmp + [j])
backtrack(1, k, [])
return res
|
[
"noreply@github.com"
] |
noreply@github.com
|
f58428a22e613a723a819b35bd68032e651b1f57
|
6f37934f98be3df0de70de78e598ab4e83594568
|
/kermit-webui/kermit_new-webui/webui/agent/utils.py
|
ab5b37b279dbb93fbb16cc6deeca4acb048b664f
|
[] |
no_license
|
yevlempy/CT_Kermit
|
34c1087adc8db7b8292c8cd66bde30b73b929c1d
|
4cfea2f6527c7e3130ec0cffad1a1afd4cf189cd
|
refs/heads/master
| 2021-01-20T13:47:32.063550
| 2012-01-05T05:59:40
| 2012-01-05T05:59:40
| 3,051,913
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,074
|
py
|
from webui.agent.models import Agent, Action, ActionInput, ActionOutput
from webui.restserver.communication import callRestServer
import logging
from django.utils import simplejson as json
from webui.serverstatus.models import Server
logger = logging.getLogger(__name__)
def verify_agent_acl(user, agent_name):
try:
agent = Agent.objects.get(name=agent_name)
return user.has_perm('use_agent', agent)
except:
return False
def verify_action_acl(user, agent_name, action_name):
try:
agent = Agent.objects.get(name=agent_name)
action = Action.objects.get(name=action_name, agent=agent)
return user.has_perm('use_action', action)
except:
return False
def update_info(user, agent, use_another_task):
logger.info('Calling Mcollective to get info for agent ' + agent.name)
#Extract n servers containing the agent
servers_list = Server.objects.filter(agents=agent, deleted=False)
filters = None
for current in servers_list:
filters = "identity_filter=%s" % current.hostname
break;
response, content = callRestServer(user, filters, "agentinfo", "desc", "agentname="+agent.name, True, use_another_task)
if response['status'] == '200':
json_content = json.loads(content)
for msg in json_content:
if msg['statusmsg'] == 'OK':
#Verifying Action. If already present in DB just update it
for action_name, action_content in msg['data']['actionhash'].items():
action_db = Action.objects.filter(name=action_name, agent=agent)
saved_action = None
if len(action_db) == 0:
logger.debug('Creating Action ' + action_name)
saved_action = Action.objects.create(name=action_name, description=action_name, agent=agent)
else:
saved_action = action_db[0]
#Verifying Action Inputs. If already present in DB just update it
for input_name, input_content in action_content['input'].items():
input_db = ActionInput.objects.filter(name=input_name, action=saved_action)
if len(input_db) == 0:
logger.debug('Creating Action Inputs')
validation = None
maxlenght = None
if 'validation' in input_content:
validation=input_content['validation']
if 'maxlength' in input_content:
maxlenght=input_content['maxlength']
#TODO: Maybe it's better to create a custom field to use it.
#For type='list' we put the accepted values in the validation field (not too wrong)
if input_content['type']=='list':
validation=input_content['list']
ActionInput.objects.create(action=saved_action, name=input_name, description=input_content['description'], type=input_content['type'], prompt=input_content['prompt'], optional=input_content['optional'], validation=validation, max_length=maxlenght )
else:
logger.debug('Input with name ' + input_name + " already present in DB. Updating...")
input_to_update = input_db[0]
input_to_update.description=input_content['description']
input_to_update.type=input_content['type']
input_to_update.prompt=input_content['prompt']
input_to_update.optional=input_content['optional']
if 'validation' in input_content:
input_to_update.validation=input_content['validation']
else:
input_to_update.validation=None
if 'maxlength' in input_content:
input_to_update.max_length=input_content['maxlength']
else:
input_to_update.max_length=None
#TODO: Maybe it's better to create a custom field to use it.
#For type='list' we put the accepted values in the validation field (not too wrong)
logger.info(input_content['type'])
if input_content['type']=="list":
input_to_update.validation=input_content['list']
input_to_update.save()
#Verifying Action Outputs. If already present in DB just update it
for output_name, output_content in action_content['output'].items():
output_db = ActionOutput.objects.filter(name=output_name, action=saved_action)
if len(output_db) == 0:
logger.debug('Creating Action Outputs')
ActionOutput.objects.create(action=saved_action, name=output_name, description=output_content['description'], display_as=output_content['display_as'])
else:
logger.debug('Output with name ' + output_name + " already present in DB. Updating...")
output_to_update = output_db[0]
output_to_update.description=output_content['description']
output_to_update.display_as=output_content['display_as']
output_to_update.save()
else:
logger.warn("Agent " + agent.name + " as no DDL configured!!")
|
[
"yevlempy@gmail.com"
] |
yevlempy@gmail.com
|
b936b8696aa84cd85d4cbf4b5a632e2f0c2c1956
|
8d8dd2a5c490e912f957f4afe9d6d02da673805c
|
/Python/PythonFundamentals/fundamentals/insert_sort.py
|
0a9342d252b3ace441765a7061c9de0b5ef6ec51
|
[] |
no_license
|
sunset375/CodingDojo-1
|
ea025ad90e36bfb6d6ef6bcfaa49e286823299cd
|
c5e483f5009942792c04eb2fa3fea83e5b91b5a2
|
refs/heads/master
| 2023-05-16T10:00:00.060738
| 2020-09-12T04:09:08
| 2020-09-12T04:09:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 232
|
py
|
def insert_sort(arr):
for i in range(1, len(arr)):
for j in range(i-1, -1, -1):
if arr[j] > arr[j+1]:
arr[j], arr[j+1] = arr[j+1], arr[j]
return arr
print(insert_sort([5,1,7,23,66,90,34]))
|
[
"javierjcjr@gmail.com"
] |
javierjcjr@gmail.com
|
ff30e8932a6292b69bb900155874ffcfa1e06431
|
15f321878face2af9317363c5f6de1e5ddd9b749
|
/solutions_python/Problem_136/2930.py
|
97be09473c78d8ee4bccfb81bd58eb99d9cd14ca
|
[] |
no_license
|
dr-dos-ok/Code_Jam_Webscraper
|
c06fd59870842664cd79c41eb460a09553e1c80a
|
26a35bf114a3aa30fc4c677ef069d95f41665cc0
|
refs/heads/master
| 2020-04-06T08:17:40.938460
| 2018-10-14T10:12:47
| 2018-10-14T10:12:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 432
|
py
|
from __future__ import division
T = input()
for i in range(T):
C, F, X = [float(x) for x in raw_input().split()]
cookiesRate = 2
if C >= X : print "Case #%d: %.7f" % (i+1, X/cookiesRate)
else:
timeElapsed = 0
while(C/cookiesRate + X/(cookiesRate+F) < X/cookiesRate):
timeElapsed += C/cookiesRate
cookiesRate += F
timeElapsed += X/cookiesRate
print "Case #%d: %.7f" % (i+1, timeElapsed)
|
[
"miliar1732@gmail.com"
] |
miliar1732@gmail.com
|
dd6b99605f2ad07b00b76fab12d2dfa0ec787223
|
0f949dc62b728b2cf6e0e172eb7c1cc31012244d
|
/script/tft_touch.py
|
5190129c1503eb022011cef240009dfb42cc3187
|
[
"MIT"
] |
permissive
|
jeguzzi/mt_screen
|
74c4314012ddb9471650d8b1f10c889265101f92
|
f06ea6404474e8a71a4d61ec381a6e99e03e0ebb
|
refs/heads/master
| 2020-03-19T09:39:44.114549
| 2018-06-06T09:46:05
| 2018-06-06T09:46:05
| 136,307,460
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,953
|
py
|
#!/usr/bin/env python
from __future__ import division
import threading
import evdev
import rospy
import wiringpi2 as wp
from evdev.ecodes import ABS_X, ABS_Y, BTN_TOUCH, EV_ABS, EV_KEY
from sensor_msgs.msg import Joy
from std_msgs.msg import Bool
KEYS = [1, 4, 5]
IN = 0
OUT = 1
class TFTouch(object):
def __init__(self):
rospy.init_node('tft')
self.continuos = rospy.get_param('~continuos', True)
rate = rospy.get_param('~rate', 10.0)
if rate > 0:
period = 1 / rate
else:
period = 0.1
self.width = rospy.get_param('tft/width', 320)
self.height = rospy.get_param('tft/height', 240)
self.dev = evdev.InputDevice('/dev/input/ts_uinput')
wp.wiringPiSetup()
for key, pin in enumerate(KEYS):
wp.pinMode(pin, IN)
self.key_pub = {pin: rospy.Publisher('tft/key_{key}'.format(key=i + 1), Bool, queue_size=1)
for i, pin in enumerate(KEYS)}
self.state = {pin: 0 for pin in KEYS}
self.touch = {'x': None, 'y': None, 'down': 0}
self.joy_pub = rospy.Publisher('tft/touch', Joy, queue_size=1)
rospy.Timer(rospy.Duration(period), self.update_keys, oneshot=False)
self.dev_thread = threading.Thread(target=self.update_touch)
self.dev_thread.daemon = True
self.dev_thread.start()
self.buttons = []
self.axes = []
def update_touch(self):
for event in self.dev.read_loop():
if event.type == EV_ABS:
if event.code == ABS_X:
self.touch['x'] = max(min(event.value, self.width), 0)
continue
if event.code == ABS_Y:
self.touch['y'] = max(min((self.height - event.value), self.height), 0)
continue
if event.type == EV_KEY and event.code == BTN_TOUCH:
self.touch['down'] = event.value
continue
def update_keys(self, event):
# 1 is up, 0 is down
state = {pin: 1 - wp.digitalRead(pin) for pin in KEYS}
if self.touch['down'] and self.touch['x'] is not None and self.touch['y'] is not None:
axes = [2 * self.touch['x'] / self.width - 1, 2 * self.touch['y'] / self.height - 1]
else:
axes = [0, 0]
buttons = [self.touch['down']] + [state[pin] for pin in KEYS]
if self.continuos or buttons != self.buttons or axes != self.axes:
msg = Joy(buttons=buttons, axes=axes)
msg.header.stamp = rospy.Time.now()
# msg.header.frame_id = 'tft'
self.joy_pub.publish(msg)
self.buttons = buttons
self.axes = axes
for pin, value in state.items():
if value != self.state.get(pin):
self.key_pub[pin].publish(value)
self.state = state
if __name__ == '__main__':
t = TFTouch()
rospy.spin()
|
[
"jerome@idsia.ch"
] |
jerome@idsia.ch
|
9f060361d6de995b683995985d8c9f9c463b1669
|
d54dfc623e1916c5cf08b738ff4a3d88a923fdb7
|
/NumberGuessingGame.py
|
0527a7d767b7c0c03f242a1d91d2b5670c745784
|
[] |
no_license
|
sandeep1108/Code
|
1d2004987d55058368149873944ebc631b43fe4e
|
1f97e2f6494a40bec14915a288cbfe8e23c7e6eb
|
refs/heads/master
| 2021-05-01T04:23:47.511245
| 2016-09-28T20:44:35
| 2016-09-28T20:44:35
| 69,502,022
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,662
|
py
|
"""
Author: Sandeep Das
This program asks the user to guess a random number between 1 and 100
"""
import random
import os.path
import json
from sys import argv
def main():
print "Hello There! Welcome!"
playerName = str (raw_input("Please enter your user id: "))
print "How are you doing %s?" %playerName
checkUser(playerName) # chk if the user already exists?
def checkUser(playerName):
if not checkIfFileExist():
print "Looks like this is your first attempt %s. Let's begin" %playerName
game(playerName)
else:
playerScore = readFile(playerName) # Check if the Paleyer already exists in the file, check for key as playerName
print "Welcome back %s. Looks like your last score was %d. Let's play again. " %(playerName, max(playerScore))
game(playerName)
def readFile(playerName):
with open ('Score.txt') as fileOpen:
playerDict = json.load(fileOpen)
if playerDict.get(playerName):
playerScore = playerDict.get(playerName)
return (playerScore)
else:
playerScore = []
return (playerScore) # here i am
def game(playerName):
print "Guess a number between 1 and 100!"
randomNumber = random.randint(1, 100)
found = False
attemptCounter = 0
while not found:
userGuess = input("Your Guess: ")
if userGuess == randomNumber:
print "That's the number!"
found = True
elif userGuess < randomNumber:
print "Thats's not it. Try biggger.."
attemptCounter += 1
else:
print "Thats's not it. Try smaller!"
attemptCounter += 1
print "Congratulations, you found it in %s attempts."%str(attemptCounter+1)
if not checkIfFileExist():
playerScore = []
playerScore.append(int(attemptCounter+1))
writeScore(playerName, playerScore)
else:
playerScore =readFile(playerName)
playerScore.append(int(attemptCounter+1))
writeScore(playerName, playerScore)
print "Have a goood day!"
def checkIfFileExist():
if not os.path.isfile('Score.txt') or os.stat('Score.txt').st_size == 0:
filePresent = False
else:
filePresent = True
return (filePresent)
def writeScore(playerName, playerScore):
if not checkIfFileExist():
playerDict = {}
playerDict[playerName] = playerScore
with open ('Score.txt', 'w') as fileOpen:
json.dump(playerDict, fileOpen)
print "New Player %s data saved." %playerName
else:
with open ('Score.txt') as fileOpen:
playerDict = json.load(fileOpen)
playerDict[playerName] = playerScore
with open ('Score.txt', 'w') as fileOpen:
json.dump(playerDict, fileOpen)
print "Player %s data saved." %playerName
if __name__ == "__main__":
main()
|
[
"noreply@github.com"
] |
noreply@github.com
|
a7b55848abbb88a94997e6304eb564af957d682f
|
e012ac032de8bf5bf880d4917fb6329f99f47d2b
|
/grdient_decent.py
|
13306f92fd7645e6b5595b881733dbdf2c385b6e
|
[] |
no_license
|
janakiraam/ML-ToyProbelm
|
d1d0b8ffe55fb68cea22ea2326be3aeb23e64423
|
f8775ebce8f6b464e023bda92890fc30bcf923e6
|
refs/heads/main
| 2023-03-15T11:06:32.252230
| 2021-03-13T17:19:08
| 2021-03-13T17:19:08
| 341,291,350
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 511
|
py
|
import numpy as np
def gradient_decent(x,y):
m_curr=0
b_curr=0
iteration=100
n = len(x)
learning_rate=0.001
for i in range(iteration):
y_predict=m_curr*x+b_curr
md=-(2/n)*sum(x*(y-y_predict))
bd=-(2/n)*sum(y-y_predict)
m_curr=m_curr - learning_rate*md
b_curr=b_curr - learning_rate*bd
print("m {}, b {} , iteration {}".format(m_curr,b_curr,i))
x=np.array([1,2,3,4,5])
y=np.array([5,7,11,25,13])
gradient_decent(x,y)
|
[
"noreply@github.com"
] |
noreply@github.com
|
3791527cea4f9b19510cd2511f27d307b569de22
|
4d2de834ecea6ef444b1c45afb5a41e717900858
|
/app/app_todo/__init__.py
|
33a8204e60a9ea5ebfaf02b5c996d4aafaf808af
|
[] |
no_license
|
florije1988/flask_regular
|
19da04c59fbf600274d206750ccb8cf355db2d24
|
1219e4efbad76202d6dca7e4b2148344ea9edf8c
|
refs/heads/master
| 2020-12-24T13:21:29.840919
| 2014-12-16T00:58:15
| 2014-12-16T00:58:15
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 269
|
py
|
# -*- coding: utf-8 -*-
__author__ = 'florije'
from flask import Blueprint
from app.custom_api import Api
app_todo = Blueprint('app_task', __name__)
api_todo = Api(app_todo, catch_all_404s=True)
from . import views
api_todo.add_resource(views.HelloHandler, '/hello')
|
[
"florije1988@gmail.com"
] |
florije1988@gmail.com
|
6b02940ed353c8a99eb9742927bfe2c36301da96
|
c388f70bec95795dd8fdea56dc8c9beea49aa564
|
/diststore/settings.py
|
9a2c84a0d1c9ad3f286bff6b4bb37109c619066d
|
[
"MIT"
] |
permissive
|
santeri/pydiststore
|
6d6a5c8d33ba2147da106b0877067c23f591a45d
|
227074a152e9e6a86abf22c3d4b2e8d43ec2f659
|
refs/heads/master
| 2021-01-22T02:53:32.767149
| 2012-10-04T23:54:28
| 2012-10-04T23:54:28
| 145,312
| 1
| 1
| null | 2012-10-04T23:54:29
| 2009-03-07T17:33:18
|
Python
|
UTF-8
|
Python
| false
| false
| 843
|
py
|
#!/usr/bin/env python
# encoding: utf-8
__all__ = ['http_port', 'multicast_addr', 'multicast_port', 'multicast_dst', 'multicast_timeout']
import ConfigParser
config = ConfigParser.ConfigParser()
config.read(['diststored.cfg', '/usr/local/etc/diststored.cfg', '/etc/diststored.cfg'])
_http_port = int(config.get("diststored", "http_port"))
_multicast_addr = config.get("diststored", "multicast_addr")
_multicast_port = int(config.get("diststored", "multicast_port"))
_multicast_dst = (_multicast_addr, _multicast_port)
_multicast_timeout = float(config.get("diststored", "multicast_timeout"))
def http_port():
return _http_port
def multicast_addr():
return _multicast_addr
def multicast_port():
return _multicast_port
def multicast_dst():
return _multicast_dst
def multicast_timeout():
return _multicast_timeout
|
[
"santeri@santeri.se"
] |
santeri@santeri.se
|
c633ac2470e05a99614be9f9f82a751daa8489db
|
781e2692049e87a4256320c76e82a19be257a05d
|
/all_data/exercism_data/python/bob/c4ccaa8b4474471f993db5910720bf59.py
|
53dba6ce82f915abfd9d8828c4b03607686fbbc1
|
[] |
no_license
|
itsolutionscorp/AutoStyle-Clustering
|
54bde86fe6dbad35b568b38cfcb14c5ffaab51b0
|
be0e2f635a7558f56c61bc0b36c6146b01d1e6e6
|
refs/heads/master
| 2020-12-11T07:27:19.291038
| 2016-03-16T03:18:00
| 2016-03-16T03:18:42
| 59,454,921
| 4
| 0
| null | 2016-05-23T05:40:56
| 2016-05-23T05:40:56
| null |
UTF-8
|
Python
| false
| false
| 1,305
|
py
|
import unicodedata
STANDARD_RESPONSES = {
'question': 'Sure.',
'exclamation': 'Woah, chill out!',
'empty': 'Fine. Be that way!',
'other': 'Whatever.'
}
def hey(*statements):
for statement in statements:
if type(statement) != str:
try:
statement = str(statement)
except:
statement = unicodedata.normalize('NFKD', statement).encode('ascii','ignore')
if is_empty(statement):
return STANDARD_RESPONSES['empty']
punctuation = statement[len(statement) - 1]
if is_exclamation(statement, punctuation):
return STANDARD_RESPONSES['exclamation']
elif is_question(statement, punctuation):
return STANDARD_RESPONSES['question']
else:
return STANDARD_RESPONSES['other']
def is_empty(statement):
if len(statement) == 0 or statement.isspace():
return True
else:
return False
def is_question(statement, punctuation):
if punctuation == '?':
return True
return False
def is_exclamation(statement, punctuation):
if punctuation == '!':
if statement.isupper():
return True
else:
return False
elif statement.isupper():
return True
return False
|
[
"rrc@berkeley.edu"
] |
rrc@berkeley.edu
|
e9a4edfe1026ffae7f0e4077a0753cd8224ef2a4
|
d18ed72d6f8d27dd8a13eab5c6366f9dca48aa6b
|
/espresso/jobmanager/jobmanager/temp/bin/packjobdir.py
|
697971511dcfa49909f268637571980c629e1286
|
[] |
no_license
|
danse-inelastic/AbInitio
|
6f1dcdd26a8163fa3026883fb3c40f63d1105b0c
|
401e8d5fa16b9d5ce42852b002bc2e4274afab84
|
refs/heads/master
| 2021-01-10T19:16:35.770411
| 2011-04-12T11:04:52
| 2011-04-12T11:04:52
| 34,972,670
| 1
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 719
|
py
|
#!/usr/bin/env python
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# Jiao Lin
# California Institute of Technology
# (C) 2009 All Rights Reserved
#
# {LicenseText}
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
def main():
from vnf.applications.PackJobDir import PackJobDir as base
class App(base):
def _getPrivateDepositoryLocations(self):
return ['../config']
app = App()
return app.run()
# main
if __name__ == '__main__':
# invoke the application shell
main()
# version
__id__ = "$Id$"
# End of file
|
[
"dexity@gmail.com"
] |
dexity@gmail.com
|
cca76db479141c09148d375544d94ff41aad4e76
|
2b87db0ada3c2d016df891761855e9a9dc5b81fe
|
/DP/matrixMultiplication_tabulation.py
|
5dccacdc6120e0b39b9f754361beaf58b5d63d59
|
[] |
no_license
|
drdcs/Algorithms-and-System-Design
|
9b201ba47bda14ca8fcd9aeddcfee760b3194f2d
|
656fafbd758c30f5bd7a73a7d677562d5ae1f39f
|
refs/heads/main
| 2023-04-11T10:25:02.992297
| 2021-04-22T05:57:10
| 2021-04-22T05:57:10
| 329,364,127
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 608
|
py
|
"""
Input: p[] = {40, 20, 30, 10, 30}
Output: 26000
There are 4 matrices of dimensions 40x20, 20x30, 30x10 and 10x30.
Let the input 4 matrices be A, B, C and D. The minimum number of
multiplications are obtained by putting parenthesis in following way
(A(BC))D --> 20*30*10 + 40*20*10 + 40*10*30
Input: p[] = {10, 20, 30, 40, 30}
Output: 30000
There are 4 matrices of dimensions 10x20, 20x30, 30x40 and 40x30.
Let the input 4 matrices be A, B, C and D. The minimum number of
multiplications are obtained by putting parenthesis in following way
((AB)C)D --> 10*20*30 + 10*30*40 + 10*40*30
"""
|
[
"diptihdl@gmail.com"
] |
diptihdl@gmail.com
|
77f4c26f8dedce3ea8a7ddbfbd4dd528d51a45bd
|
65953555b287c0402ed79e2b08c0780df857810d
|
/tarea2/insertionsort2.py
|
1ed4925f3fb20f5224ceebb96b7bac0bec31a708
|
[] |
no_license
|
alfonsokim/algorithms
|
32f3cbc6a6360daced3dadd41c29218506c34e0d
|
17c02d077d788937547af7338942f9ba6e279104
|
refs/heads/master
| 2021-05-16T04:08:18.356718
| 2017-03-07T21:52:16
| 2017-03-07T21:52:16
| 23,545,161
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 570
|
py
|
def insertionsort(array):
for i in range(1, len(array)):
key = array[i]
j = i - 1
while (j >= 0) and (array[j] > key):
array[j+1] = array[j]
j = j - 1
array[j+1] = key
return array
if __name__ == '__main__':
import random
import time
times = open('insertion2.txt', 'a')
for n in range(500000, 10010000, 500000):
array = [random.randint(0, 100 * n) for _ in range(n)]
t1 = time.clock()
insertionsort(array)
print >> times, 'n=%i,t=%.4f' % (n, (time.clock() - t1))
print n
times.close()
|
[
"adkim.ext@gmv.com"
] |
adkim.ext@gmv.com
|
accd632927dfbee392f990536801441050bda91d
|
7ce125c50e942a55ff2554756314fa7558a5c2cb
|
/homepage/views/account.py
|
3c3745ff615b186010c7aaa1f8ba970bc62566c9
|
[] |
no_license
|
dbwyatt/ColonialHeritageFoundation
|
78dc520295d1df650dc7c3ea3770827aaf8ee3cb
|
77a1c3e70ccbe4403902e819950eb10e058f95ef
|
refs/heads/master
| 2021-01-19T08:37:20.394418
| 2015-04-16T00:22:26
| 2015-04-16T00:22:26
| 33,692,016
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 16,678
|
py
|
__author__ = 'Daniel'
from django.conf import settings
from django import forms
from django.http import HttpResponse, HttpResponseRedirect, Http404
from django.http import HttpRequest
from django_mako_plus.controller import view_function
import homepage.models as hmod
from django_mako_plus.controller.router import get_renderer
from django.contrib.auth import update_session_auth_hash
from django.contrib.auth.models import User, Group, Permission, ContentType
from django.contrib.auth.decorators import permission_required, login_required
from django.contrib.auth import authenticate, login
import datetime
from django.utils.timezone import localtime
templater = get_renderer('homepage')
@view_function
@login_required
def process_request(request):
users = {}
# get user
try:
users['user'] = hmod.User.objects.get(id=request.session['user_id'])
except hmod.User.DoesNotExist:
return HttpResponseRedirect('/homepage/login/')
users['user'].converted_time = localtime(users['user'].date_joined)
return templater.render_to_response(request, 'account.html', users)
@view_function
@login_required
def changepassword(request):
params = {}
try:
user = hmod.User.objects.get(id=request.session['user_id'])
except hmod.User.DoesNotExist:
return HttpResponseRedirect('/homepage/login/')
form = ChangePassword(request, initial={
'old_password': '',
'password': '',
'confirm': '',
})
if request.method == 'POST':
form = ChangePassword(request, request.POST)
if form.is_valid():
user.set_password(form.cleaned_data['password'])
user.save()
# form.save()
update_session_auth_hash(request, user)
request.session['user_id'] = user.id
return HttpResponse(True)
params['form'] = form
return templater.render_to_response(request, 'account_changepassword.html', params)
class ChangePassword(forms.Form):
old_password = forms.CharField(label='Old Password', widget=forms.PasswordInput)
password = forms.CharField(label='Password', widget=forms.PasswordInput)
confirm = forms.CharField(label='Confirm Password', widget=forms.PasswordInput)
def __init__(self, request, *args, **kwargs):
assert isinstance(request, HttpRequest), 'Invalid request object'
self.request = request
super().__init__(*args, **kwargs)
def clean(self):
old_password = self.cleaned_data.get('old_password')
password = self.cleaned_data.get('password')
confirm = self.cleaned_data.get('confirm')
print(self.request.user.username)
print(old_password)
print(password)
print(confirm)
user = authenticate(username=self.request.user.username, password=old_password)
if user is not None:
# the password verified for the user
if user.is_active:
if not confirm:
# raise forms.ValidationError("You must confirm your password")
self.add_error('confirm', 'You must confirm your password')
elif password != confirm:
# raise forms.ValidationError("Passwords do not match")
self.add_error('password', 'Passwords do not match')
self.add_error('confirm', 'Passwords do not match')
else:
self.add_error('old_password', 'The password is valid, but the account has been disabled!')
else:
# the authentication system was unable to verify the username and password
self.add_error('old_password', 'Could not verify password')
# return confirm
@view_function
def create(request):
params = {}
form = UserCreateForm(initial={
'first_name': '',
'last_name': '',
'username': '',
'password': '',
'confirm': '',
'email': '',
'phone': '',
'address1': '',
'address2': '',
'city': '',
'state': '',
'zip': '',
'securityQuestion': '',
'securityAnswer': ''
})
if request.method == 'POST':
form = UserCreateForm(request.POST)
form.userid = -1
if form.is_valid():
user = hmod.User()
user.first_name = form.cleaned_data['first_name']
user.last_name = form.cleaned_data['last_name']
user.username = form.cleaned_data['username']
user.set_password(form.cleaned_data['password'])
user.email = form.cleaned_data['email']
address = hmod.Address()
address.address1 = form.cleaned_data['address1']
address.address2 = form.cleaned_data['address2']
address.city = form.cleaned_data['city']
address.state = form.cleaned_data['state']
address.zip = form.cleaned_data['zip']
address.email = 'fill@fill.com'
address.save()
user.address = address
user.phone = form.cleaned_data['phone']
user.securityQuestion = form.cleaned_data['securityQuestion']
user.securityAnswer = form.cleaned_data['securityAnswer']
user.photograph = hmod.Photograph.objects.all()[0]
group = Group.objects.get(name='User')
user.group = group
user.save()
user = authenticate(username=form.cleaned_data['username'], password=form.cleaned_data['password'])
login(request, user)
request.session['user_id'] = user.id
return HttpResponseRedirect('/homepage/account/')
params['form'] = form
return templater.render_to_response(request, 'account_create.html', params)
class UserCreateForm(forms.Form):
first_name = forms.CharField(label='First Name', required=True)
last_name = forms.CharField(label='Last Name', required=True)
username = forms.CharField(label='Username')
password = forms.CharField(label='Password', widget=forms.PasswordInput)
confirm = forms.CharField(label='Confirm Password', required=False, widget=forms.PasswordInput)
email = forms.EmailField(label='Email', required=True)
phone = forms.CharField(label='Phone')
address1 = forms.CharField(label='Address 1')
address2 = forms.CharField(label='Address 2', required=False)
city = forms.CharField(label='City')
state = forms.ChoiceField(
label='State',
choices=[
(x, y) for x, y in
(
("AL", "Alabama"),
("AK", "Alaska"),
("AS", "American Samoa"),
("AZ", "Arizona"),
("AR", "Arkansas"),
("CA", "California"),
("CO", "Colorado"),
("CT", "Connecticut"),
("DE", "Delaware"),
("DC", "District Of Columbia"),
("FM", "Federated States Of Micronesia"),
("FL", "Florida"),
("GA", "Georgia"),
("GU", "Guam"),
("HI", "Hawaii"),
("ID", "Idaho"),
("IL", "Illinois"),
("IN", "Indiana"),
("IA", "Iowa"),
("KS", "Kansas"),
("KY", "Kentucky"),
("LA", "Louisiana"),
("ME", "Maine"),
("MH", "Marshall Islands"),
("MD", "Maryland"),
("MA", "Massachusetts"),
("MI", "Michigan"),
("MN", "Minnesota"),
("MS", "Mississippi"),
("MO", "Missouri"),
("MT", "Montana"),
("NE", "Nebraska"),
("NV", "Nevada"),
("NH", "New Hampshire"),
("NJ", "New Jersey"),
("NM", "New Mexico"),
("NY", "New York"),
("NC", "North Carolina"),
("ND", "North Dakota"),
("MP", "Northern Mariana Islands"),
("OH", "Ohio"),
("OK", "Oklahoma"),
("OR", "Oregon"),
("PW", "Palau"),
("PA", "Pennsylvania"),
("PR", "Puerto Rico"),
("RI", "Rhode Island"),
("SC", "South Carolina"),
("SD", "South Dakota"),
("TN", "Tennessee"),
("TX", "Texas"),
("UT", "Utah"),
("VT", "Vermont"),
("VI", "Virgin Islands"),
("VA", "Virginia"),
("WA", "Washington"),
("WV", "West Virginia"),
("WI", "Wisconsin"),
("WY", "Wyoming")
)]
)
zip = forms.CharField(label='Zip')
securityQuestion = forms.ChoiceField(label='Security Question', choices=[(x, x) for x in ["Where were you born?", "What is your mother's maiden name?", "What is the name of your first pet?", "In which city did you last live?"]])
securityAnswer = forms.CharField(label='Security Answer')
def clean_username(self):
if len(self.cleaned_data['username']) < 5:
raise forms.ValidationError("Username needs to be at least 5 characters.")
user_count = hmod.User.objects.filter(username=self.cleaned_data['username']).exclude(id=self.userid).count()
if user_count >= 1:
raise forms.ValidationError("This username is already taken.")
return self.cleaned_data['username']
def clean(self):
password = self.cleaned_data.get('password')
confirm = self.cleaned_data.get('confirm')
if not confirm:
# raise forms.ValidationError("You must confirm your password")
self.add_error('confirm', 'You must confirm your password')
elif password != confirm:
# raise forms.ValidationError("Passwords do not match")
self.add_error('password', 'Passwords do not match')
self.add_error('confirm', 'Passwords do not match')
# return confirm
@view_function
@login_required
def edit(request):
params = {}
try:
user = hmod.User.objects.get(id=request.urlparams[0])
except hmod.User.DoesNotExist:
return HttpResponseRedirect('/homepage/users/')
form = UserEditForm(initial={
'first_name': user.first_name,
'last_name': user.last_name,
'username': user.username,
'password': user.password,
'email': user.email,
'phone': user.phone,
'address1': user.address.address1,
'address2': user.address.address2,
'city': user.address.city,
'state': user.address.state,
'zip': user.address.zip,
'securityQuestion': user.securityQuestion,
'securityAnswer': user.securityAnswer,
})
if request.method == 'POST':
form = UserEditForm(request.POST)
form.userid = user.id
if form.is_valid():
user.first_name = form.cleaned_data['first_name']
user.last_name = form.cleaned_data['last_name']
user.username = form.cleaned_data['username']
user.email = form.cleaned_data['email']
user.phone = form.cleaned_data['phone']
# try:
# address = hmod.Address.objects.get(id=user.address_id)
# except hmod.User.DoesNotExist:
# return HttpResponseRedirect('/homepage/account/')
address = user.address
address.address1 = form.cleaned_data['address1']
address.address2 = form.cleaned_data['address2']
address.city = form.cleaned_data['city']
address.state = form.cleaned_data['state']
address.zip = form.cleaned_data['zip']
address.save()
user.securityQuestion = form.cleaned_data['securityQuestion']
user.securityAnswer = form.cleaned_data['securityAnswer']
user.save()
return HttpResponse(True)
params['form'] = form
params['user'] = user
return templater.render_to_response(request, 'account_edit.html', params)
class UserEditForm(forms.Form):
first_name = forms.CharField(label='First Name', required=True)
last_name = forms.CharField(label='Last Name', required=True)
username = forms.CharField(label='Username')
email = forms.EmailField(label='Email', required=True)
phone = forms.CharField(label='Phone Number')
address1 = forms.CharField(label='Address 1')
address2 = forms.CharField(label='Address 2', required=False)
city = forms.CharField(label='City')
state = forms.ChoiceField(
label='State',
choices=[
(x, y) for x, y in
(
("AL", "Alabama"),
("AK", "Alaska"),
("AS", "American Samoa"),
("AZ", "Arizona"),
("AR", "Arkansas"),
("CA", "California"),
("CO", "Colorado"),
("CT", "Connecticut"),
("DE", "Delaware"),
("DC", "District Of Columbia"),
("FM", "Federated States Of Micronesia"),
("FL", "Florida"),
("GA", "Georgia"),
("GU", "Guam"),
("HI", "Hawaii"),
("ID", "Idaho"),
("IL", "Illinois"),
("IN", "Indiana"),
("IA", "Iowa"),
("KS", "Kansas"),
("KY", "Kentucky"),
("LA", "Louisiana"),
("ME", "Maine"),
("MH", "Marshall Islands"),
("MD", "Maryland"),
("MA", "Massachusetts"),
("MI", "Michigan"),
("MN", "Minnesota"),
("MS", "Mississippi"),
("MO", "Missouri"),
("MT", "Montana"),
("NE", "Nebraska"),
("NV", "Nevada"),
("NH", "New Hampshire"),
("NJ", "New Jersey"),
("NM", "New Mexico"),
("NY", "New York"),
("NC", "North Carolina"),
("ND", "North Dakota"),
("MP", "Northern Mariana Islands"),
("OH", "Ohio"),
("OK", "Oklahoma"),
("OR", "Oregon"),
("PW", "Palau"),
("PA", "Pennsylvania"),
("PR", "Puerto Rico"),
("RI", "Rhode Island"),
("SC", "South Carolina"),
("SD", "South Dakota"),
("TN", "Tennessee"),
("TX", "Texas"),
("UT", "Utah"),
("VT", "Vermont"),
("VI", "Virgin Islands"),
("VA", "Virginia"),
("WA", "Washington"),
("WV", "West Virginia"),
("WI", "Wisconsin"),
("WY", "Wyoming")
)]
)
zip = forms.CharField(label='Zip')
securityQuestion = forms.ChoiceField(label='Security Question', choices=[(x, x) for x in ["Where were you born?", "What is your mother's maiden name?", "What is the name of your first pet?", "In which city did you last live?"]])
securityAnswer = forms.CharField(label='Security Answer')
def clean_username(self):
if len(self.cleaned_data['username']) < 5:
raise forms.ValidationError("Username needs to be at least 5 character.")
user_count = hmod.User.objects.filter(username=self.cleaned_data['username']).exclude(id=self.userid).count()
if user_count >= 1:
raise forms.ValidationError("This username is already taken.")
return self.cleaned_data['username']
@view_function
@login_required
def recent_activity(request):
users = {}
# get user
try:
users['user'] = hmod.User.objects.get(id=request.session['user_id'])
except hmod.User.DoesNotExist:
return HttpResponseRedirect('/homepage/')
users['user'].converted_time = localtime(users['user'].last_login)
return templater.render_to_response(request, 'account_recent_activity.html', users)
|
[
"dbwyatt23@gmail.com"
] |
dbwyatt23@gmail.com
|
bca492a81fe23d315cbb69434d85d776cc8e630a
|
774413d52ecdfad882bc79f325f6d6b282b336cb
|
/src/face_api_client.py
|
c849e0456c44d3ae95e4d294a36909c9ee1ec024
|
[] |
no_license
|
omribeni/BestFaceImageRecognition
|
6cde0d7dce9c743001aa34121eead85517b93a4c
|
633ab4cf8200bff47a730982b8d41e816aae916f
|
refs/heads/master
| 2022-12-29T11:07:05.260349
| 2020-10-11T15:04:06
| 2020-10-11T15:04:06
| 289,653,503
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,458
|
py
|
import os
from azure.cognitiveservices.vision.face import FaceClient
from msrest.authentication import CognitiveServicesCredentials
import Face
class FaceApiClient(object):
# Set the FACE_SUBSCRIPTION_KEY environment variable with your key as the value.
KEY = os.environ.get('FACE_SUBSCRIPTION_KEY')
# Set the FACE_ENDPOINT environment variable with the endpoint from your Face service in Azure.
ENDPOINT = os.environ.get('FACE_ENDPOINT')
def __init__(self):
self.__client = None
def __initialize(self):
if not self.__client:
# Create an authenticated FaceClient.
self.__client = FaceClient(self.ENDPOINT, CognitiveServicesCredentials(self.KEY))
@property
def client(self):
if not self.__client:
self.__initialize()
return self.__client
def detect_image_faces(self, image_path):
response = self.analyze_image(image_path)
if not response:
return None
return response
def recognize_similar_faces(self, face, all_faces):
face_ids = [f.face_id for f in all_faces]
if face_ids:
response = self.__client.face.find_similar(face.face_id, face_ids=face_ids)
return response
return []
def analyze_image(self, image_path):
# init on demand
self.__initialize()
try:
# convert local image to stream
# file_path = image_path.replace('/', '\\')
with open(image_path, "rb") as image_bytes_stream:
return self.__client.face.detect_with_stream(image_bytes_stream,
return_face_attributes=Face.FaceFields.all(),
detection_model="detection_01", return_face_landmarks=True)
except Exception as e:
return None
def get_most_popular_face_images(self, faces_list):
biggest_person_group = []
while faces_list:
face = faces_list[0]
cur_group = [face]
# add all similar faces of the current face
cur_group.extend(self.recognize_similar_faces(face, faces_list[1:]))
if cur_group and len(cur_group) > len(biggest_person_group):
biggest_person_group = cur_group
# remove all of the current group members from the faces list
faces_list = [x for x in faces_list if x not in cur_group]
return biggest_person_group
@classmethod
def get_best_resolution_face_image(cls, person_faces, all_faces, face_to_resolution_map):
best_image = None
best_resolution = 0
for face in person_faces:
# get the detectedFace object form the similarFace list by face_id
detected_face = next((x for x in all_faces if x.face_id == face.face_id), None)
if detected_face:
cur_res = face_to_resolution_map[detected_face.face_id]
picture_surface_area = cur_res[0] * cur_res[1]
face_box_surface_area = detected_face.face_rectangle.height * detected_face.face_rectangle.width
current_resolution = face_box_surface_area / picture_surface_area
if current_resolution > best_resolution:
best_resolution = current_resolution
best_image = detected_face
return best_image
|
[
"omribenishai015@gmail.com"
] |
omribenishai015@gmail.com
|
4de44a5b5d1cf08e40c04309c6c96b326fff5031
|
c3179dc6c11770fe877d9b08bebd28448ee66ba8
|
/mtdnn/tasks/utils.py
|
0e80a8d2a9db50028038b3cf4486bcfa8fe6d561
|
[
"MIT"
] |
permissive
|
microsoft/MT-DNN
|
2a0f102916a1b092f25b4999834177bd38319c53
|
e5c3e07f3a8e55067433714ce261a6d28ba73d22
|
refs/heads/master
| 2023-06-29T23:57:42.108328
| 2020-07-02T02:22:06
| 2020-07-02T02:22:06
| 215,127,881
| 151
| 28
|
MIT
| 2023-06-12T21:28:37
| 2019-10-14T19:25:46
|
Python
|
UTF-8
|
Python
| false
| false
| 14,964
|
py
|
# coding=utf-8
# Copyright (c) Microsoft. All rights reserved.
import os
import pdb
from random import shuffle
from sys import path
from mtdnn.common.metrics import calc_metrics
from mtdnn.common.types import DataFormat
def process_data_and_dump_rows(
rows: list,
out_path: str,
data_format: DataFormat,
write_mode: str = "w",
dump_rows: bool = False,
) -> None:
"""
Output files should have following format
:param rows: data
:param out_path: output file path
:return: processed_rows: List of string rows
"""
processed_rows = []
for row in rows:
data = ""
if data_format in [DataFormat.PremiseOnly, DataFormat.Sequence]:
for col in ["uid", "label", "premise"]:
if "\t" in str(row[col]):
pdb.set_trace()
data = f"{row['uid']}\t{row['label']}\t{row['premise']}\n"
elif data_format == DataFormat.PremiseAndOneHypothesis:
for col in ["uid", "label", "premise", "hypothesis"]:
if "\t" in str(row[col]):
pdb.set_trace()
data = (
f"{row['uid']}\t{row['label']}\t{row['premise']}\t{row['hypothesis']}\n"
)
elif data_format == DataFormat.PremiseAndMultiHypothesis:
for col in ["uid", "label", "premise"]:
if "\t" in str(row[col]):
pdb.set_trace()
hypothesis = row["hypothesis"]
for one_hypo in hypothesis:
if "\t" in str(one_hypo):
pdb.set_trace()
hypothesis = "\t".join(hypothesis)
data = f"{row['uid']}\t{row['ruid']}\t{row['label']}\t{row['premise']}\t{hypothesis}\n"
else:
raise ValueError(data_format)
processed_rows.append(data)
# Save data if dump_rows is true
if dump_rows:
with open(out_path, mode=write_mode, encoding="utf-8") as out_f:
out_f.writelines(processed_rows)
return processed_rows
def load_scitail(file_path, kwargs: dict = {}):
""" Loading scitail """
rows = []
cnt = 0
with open(file_path, encoding="utf8") as f:
for line in f:
blocks = line.strip().split("\t")
assert len(blocks) > 2
if blocks[0] == "-":
continue
sample = {
"uid": str(cnt),
"premise": blocks[0],
"hypothesis": blocks[1],
"label": blocks[2],
}
rows.append(sample)
cnt += 1
return rows
def load_snli(file_path, kwargs: dict = {}):
""" Load SNLI """
header = kwargs.get("header", True)
rows = []
cnt = 0
with open(file_path, encoding="utf8") as f:
for line in f:
if header:
header = False
continue
blocks = line.strip().split("\t")
assert len(blocks) > 10
if blocks[-1] == "-":
continue
lab = blocks[-1]
if lab is None:
import pdb
pdb.set_trace()
sample = {
"uid": blocks[0],
"premise": blocks[7],
"hypothesis": blocks[8],
"label": lab,
}
rows.append(sample)
cnt += 1
return rows
def load_mnli(file_path, kwargs: dict = {}):
""" Load MNLI """
header = kwargs.get("header", True)
multi_snli = kwargs.get("multi_snli", False)
is_train = kwargs.get("is_train", True)
rows = []
cnt = 0
with open(file_path, encoding="utf8") as f:
for line in f:
if header:
header = False
continue
blocks = line.strip().split("\t")
assert len(blocks) > 9
if blocks[-1] == "-":
continue
lab = "contradiction"
if is_train:
lab = blocks[-1]
if lab is None:
import pdb
pdb.set_trace()
sample = {
"uid": blocks[0],
"premise": blocks[8],
"hypothesis": blocks[9],
"label": lab,
}
rows.append(sample)
cnt += 1
return rows
def load_mrpc(file_path, kwargs: dict = {}):
""" Load MRPC """
header = kwargs.get("header", True)
is_train = kwargs.get("is_train", True)
rows = []
cnt = 0
with open(file_path, encoding="utf8") as f:
for line in f:
if header:
header = False
continue
blocks = line.strip().split("\t")
assert len(blocks) > 4
lab = 0
if is_train:
lab = int(blocks[0])
sample = {
"uid": cnt,
"premise": blocks[-2],
"hypothesis": blocks[-1],
"label": lab,
}
rows.append(sample)
cnt += 1
return rows
def load_qnli(file_path, kwargs: dict = {}):
""" Load QNLI for classification"""
header = kwargs.get("header", True)
is_train = kwargs.get("is_train", True)
rows = []
cnt = 0
with open(file_path, encoding="utf8") as f:
for line in f:
if header:
header = False
continue
blocks = line.strip().split("\t")
assert len(blocks) > 2
lab = "not_entailment"
if is_train:
lab = blocks[-1]
if lab is None:
import pdb
pdb.set_trace()
sample = {
"uid": blocks[0],
"premise": blocks[1],
"hypothesis": blocks[2],
"label": lab,
}
rows.append(sample)
cnt += 1
return rows
def load_qqp(file_path, kwargs: dict = {}):
""" Load QQP """
header = kwargs.get("header", True)
is_train = kwargs.get("is_train", True)
rows = []
cnt = 0
skipped = 0
with open(file_path, encoding="utf8") as f:
for line in f:
if header:
header = False
continue
blocks = line.strip().split("\t")
if is_train and len(blocks) < 6:
skipped += 1
continue
if not is_train:
assert len(blocks) == 3
lab = 0
if is_train:
lab = int(blocks[-1])
sample = {
"uid": cnt,
"premise": blocks[-3],
"hypothesis": blocks[-2],
"label": lab,
}
else:
sample = {
"uid": int(blocks[0]),
"premise": blocks[-2],
"hypothesis": blocks[-1],
"label": lab,
}
rows.append(sample)
cnt += 1
return rows
def load_rte(file_path, kwargs: dict = {}):
""" Load RTE """
header = kwargs.get("header", True)
is_train = kwargs.get("is_train", True)
rows = []
cnt = 0
with open(file_path, encoding="utf8") as f:
for line in f:
if header:
header = False
continue
blocks = line.strip().split("\t")
if is_train and len(blocks) < 4:
continue
if not is_train:
assert len(blocks) == 3
lab = "not_entailment"
if is_train:
lab = blocks[-1]
sample = {
"uid": int(blocks[0]),
"premise": blocks[-3],
"hypothesis": blocks[-2],
"label": lab,
}
else:
sample = {
"uid": int(blocks[0]),
"premise": blocks[-2],
"hypothesis": blocks[-1],
"label": lab,
}
rows.append(sample)
cnt += 1
return rows
def load_wnli(file_path, kwargs: dict = {}):
""" Load WNLI """
header = kwargs.get("header", True)
is_train = kwargs.get("is_train", True)
rows = []
cnt = 0
with open(file_path, encoding="utf8") as f:
for line in f:
if header:
header = False
continue
blocks = line.strip().split("\t")
if is_train and len(blocks) < 4:
continue
if not is_train:
assert len(blocks) == 3
lab = 0
if is_train:
lab = int(blocks[-1])
sample = {
"uid": cnt,
"premise": blocks[-3],
"hypothesis": blocks[-2],
"label": lab,
}
else:
sample = {
"uid": cnt,
"premise": blocks[-2],
"hypothesis": blocks[-1],
"label": lab,
}
rows.append(sample)
cnt += 1
return rows
def load_sst(file_path, kwargs: dict = {}):
""" Load SST """
header = kwargs.get("header", True)
is_train = kwargs.get("is_train", True)
rows = []
cnt = 0
with open(file_path, encoding="utf8") as f:
for line in f:
if header:
header = False
continue
blocks = line.strip().split("\t")
if is_train and len(blocks) < 2:
continue
lab = 0
if is_train:
lab = int(blocks[-1])
sample = {"uid": cnt, "premise": blocks[0], "label": lab}
else:
sample = {"uid": int(blocks[0]), "premise": blocks[1], "label": lab}
cnt += 1
rows.append(sample)
return rows
def load_cola(file_path, kwargs: dict = {}):
""" Load COLA """
header = kwargs.get("header", True)
is_train = kwargs.get("is_train", True)
rows = []
cnt = 0
with open(file_path, encoding="utf8") as f:
for line in f:
if header:
header = False
continue
blocks = line.strip().split("\t")
if is_train and len(blocks) < 2:
continue
lab = 0
if is_train:
lab = int(blocks[1])
sample = {"uid": cnt, "premise": blocks[-1], "label": lab}
else:
sample = {"uid": cnt, "premise": blocks[-1], "label": lab}
rows.append(sample)
cnt += 1
return rows
def load_stsb(file_path, kwargs: dict = {}):
""" Load STSB """
header = kwargs.get("header", True)
is_train = kwargs.get("is_train", True)
rows = []
cnt = 0
with open(file_path, encoding="utf8") as f:
for line in f:
if header:
header = False
continue
blocks = line.strip().split("\t")
assert len(blocks) > 8
score = "0.0"
if is_train:
score = blocks[-1]
sample = {
"uid": cnt,
"premise": blocks[-3],
"hypothesis": blocks[-2],
"label": score,
}
else:
sample = {
"uid": cnt,
"premise": blocks[-2],
"hypothesis": blocks[-1],
"label": score,
}
rows.append(sample)
cnt += 1
return rows
def load_conll_ner(file_path, kwargs: dict = {}):
""" Load NER """
rows = []
cnt = 0
sentence = []
label = []
with open(file_path, encoding="utf8") as f:
for line in f:
line = line.strip()
if len(line) == 0 or line.startswith("-DOCSTART") or line[0] == "\n":
if len(sentence) > 0:
sample = {"uid": cnt, "premise": sentence, "label": label}
rows.append(sample)
sentence = []
label = []
cnt += 1
continue
splits = line.split(" ")
sentence.append(splits[0])
label.append(splits[-1])
if len(sentence) > 0:
sample = {"uid": cnt, "premise": sentence, "label": label}
return rows
def load_conll_pos(file_path, kwargs: dict = {}):
""" Load POS """
rows = []
cnt = 0
sentence = []
label = []
with open(file_path, encoding="utf8") as f:
for line in f:
line = line.strip()
if len(line) == 0 or line.startswith("-DOCSTART") or line[0] == "\n":
if len(sentence) > 0:
sample = {"uid": cnt, "premise": sentence, "label": label}
rows.append(sample)
sentence = []
label = []
cnt += 1
continue
splits = line.split(" ")
sentence.append(splits[0])
label.append(splits[1])
if len(sentence) > 0:
sample = {"uid": cnt, "premise": sentence, "label": label}
return rows
def load_conll_chunk(file_path, kwargs: dict = {}):
""" Load CHUNK """
rows = []
cnt = 0
sentence = []
label = []
with open(file_path, encoding="utf8") as f:
for line in f:
line = line.strip()
if len(line) == 0 or line.startswith("-DOCSTART") or line[0] == "\n":
if len(sentence) > 0:
sample = {"uid": cnt, "premise": sentence, "label": label}
rows.append(sample)
sentence = []
label = []
cnt += 1
continue
splits = line.split(" ")
sentence.append(splits[0])
label.append(splits[2])
if len(sentence) > 0:
sample = {"uid": cnt, "premise": sentence, "label": label}
return rows
def submit(path, data, label_dict=None):
header = "index\tprediction"
with open(path, "w") as writer:
predictions, uids = data["predictions"], data["uids"]
writer.write("{}\n".format(header))
assert len(predictions) == len(uids)
# sort label
paired = [(int(uid), predictions[idx]) for idx, uid in enumerate(uids)]
paired = sorted(paired, key=lambda item: item[0])
for uid, pred in paired:
if label_dict is None:
writer.write("{}\t{}\n".format(uid, pred))
else:
assert type(pred) is int
writer.write("{}\t{}\n".format(uid, label_dict[pred]))
|
[
"noreply@github.com"
] |
noreply@github.com
|
488374d79aa245ef32483dbbbe87d6d7825895ad
|
4cc4f6f49bcd1031f017a144e18a7ea68ea7cfeb
|
/Lab09/.svn/text-base/generateReport.py.svn-base
|
e96764d152e3d9da24cd8b7572b3d2a740e12461
|
[] |
no_license
|
apoorvvw/Python
|
abeddddbe59dfe0d17865bfbf06232565dbff104
|
1b2661e5835f9e01ae10a91da21bfe768a858a13
|
refs/heads/master
| 2020-12-24T16:40:34.717506
| 2016-04-03T05:53:16
| 2016-04-03T05:53:16
| 40,106,944
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,185
|
#!/usr/local/bin/python3.4
__author__ = 'ee364e10'
import re
def moreReg(filename):
dict=[]
with open(filename,"r") as inputFile:
FileObject = open('finalGrades.xml',"w")
FileObject.write("<?xml version=\"1.0\"?>\n")
FileObject.write("<students>\n")
for line in inputFile:
f = re.search(r'<(?P<id>\w{3})>(?P<name>.*):\[',line)
# AT this point I have the name and the id. Now generate the course grades and stuff
if f:
FileObject.write(" <student name=\""+f.group("name")+"\" id=\""+f.group("id")+"\">\n")
print(f.group("id"))
ff = re.search(r':(.*)<',line)
g = ff.group(1)
if g[8] == ",":
gg = g.strip().split(",")
elif g[8] == ";":
gg = g.strip().split(";")
for i in gg:
i = i.strip()
j = i.strip("[").strip("]")
course = j.split(":")[0]
marks = j.split(":")[1]
# dict[course] = marks
print(int(marks))
letter = 'A'
if int(marks)>=90 and int(marks)<=100:
letter = 'A'
elif int(marks)>=80 and int(marks)<90:
letter = 'B'
elif int(marks)>=70 and int(marks)<80:
letter = 'C'
elif int(marks)>=60 and int(marks)<70:
letter = 'D'
elif int(marks)<60:
letter = 'F'
print(course+" : "+marks+" : "+letter)
# fg = re.search(r'<(?P<course>ECE\d{3})\sscore=\"(?P<marks>\d{2})\"\sgrade=\"(?P<grade>[A-Z]?)\"/>',line)
FileObject.write(" <ECE"+course+ " score=\""+marks+"\" grade=\""+letter+"\"" +">\n")
FileObject.write(" </student>\n")
FileObject.write("</students>\n")
if __name__ == "__main__":
a= moreReg('rawGrades.xml')
# f = re.search(r'<(?P<course>ECE\d{3})\sscore=\"(?P<marks>\d{2})\"\sgrade=\"(?P<grade>[A-Z]?)\"/>',line)
|
[
"awairaga@purdue.edu"
] |
awairaga@purdue.edu
|
|
3b3e969d18a5a1b06d75911178cd64cceed64785
|
e65fba18d73b9a0d2bb51b8903d9bfff4d621da6
|
/pytd6.py
|
068d407edadaf6b07cdbac1acfa3dca93671d843
|
[] |
no_license
|
stevendevine/pyTD6
|
23fb16c3afd00235759f6ecba6b7a13cf0ecef77
|
7c701db14fe9b286dc0fc29e8a1e580e249889c8
|
refs/heads/main
| 2023-07-15T22:16:20.458018
| 2021-08-24T21:47:17
| 2021-08-24T21:47:17
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 27,536
|
py
|
import keyboard, mouse, json, pygetwindow, pywinauto, time, ctypes, pyautogui, PIL, pytesseract
from PIL import ImageFilter
from PIL import ImageChops
from typing import Tuple, Union
from exceptions import *
# required for the mouse.move() to not be offset when display scaling is enabled.
user32 = ctypes.windll.user32
user32.SetProcessDPIAware()
pytesseract.pytesseract.tesseract_cmd = r"C:\Program Files\Tesseract-OCR\tesseract"
# load json file with monkey information in it.
with open("monkeys.json") as monkeys_json:
monkeys = json.load(monkeys_json)
# load json file with hero information in it.
with open("heroes.json") as heroes_json:
heroes = json.load(heroes_json)
# load json file with hotkey information in it.
with open("hotkeys.json") as hotkeys_json:
hotkeys = json.load(hotkeys_json)
# used to focus btd6 window without IPython error (https://github.com/asweigart/PyGetWindow/issues/16s)
def focus_window(window_title=None):
window = pygetwindow.getWindowsWithTitle(window_title)[0]
if window.isActive == False:
pywinauto.application.Application().connect(
handle=window._hWnd
).top_window().set_focus()
# used to round price to the nearest 5.
def price_round(x, base=5):
return base * round(x / base)
# these 3 functions are used to get health, cash, and the round respectively.
def get_health():
# focus BTD6.
focus_window("BloonsTD6")
# take a screenshot of the health.
health_image = pyautogui.screenshot(region=[120, 20, 150, 40])
# invert the image, as pytesseract does better with black text.
health_image = ImageChops.invert(health_image)
# convert it to a pure black and white binary image, to increase contrast and readability.
fn = lambda x: 255 if x > 10 else 0
health_image = health_image.convert("L").point(fn, mode="1")
# convert it to a string, with only specified characters allowed.
health_text = pytesseract.image_to_string(
health_image, config="-c tessedit_char_whitelist=0123456789 --psm 6",
)
# convert it into an integer if possible.
try:
health = int(health_text)
except ValueError:
health = 1
return health
def get_cash():
# focus BTD6.
focus_window("BloonsTD6")
# take a screenshot of the cash.
cash_image = pyautogui.screenshot(region=[320, 20, 240, 40])
# invert the image, as pytesseract does better with black text.
cash_image = ImageChops.invert(cash_image)
# convert it to a pure black and white binary image, to increase contrast and readability.
fn = lambda x: 255 if x > 10 else 0
cash_image = cash_image.convert("L").point(fn, mode="1")
# convert it to a string, with only specified characters allowed.
cash_text = pytesseract.image_to_string(
cash_image, config="-c tessedit_char_whitelist=$0123456789 --psm 6",
)
# convert it into an integer if possible.
try:
cash = int(cash_text[1:])
except ValueError:
cash = 0
return cash
def get_round():
# focus BTD6.
focus_window("BloonsTD6")
# take a screenshot of the round.
round_image = pyautogui.screenshot(region=[1360, 30, 200, 40])
# invert the image, as pytesseract does better with black text.
round_image = ImageChops.invert(round_image)
# convert it to a pure black and white binary image, to increase contrast and readability.
fn = lambda x: 255 if x > 10 else 0
round_image = round_image.convert("L").point(fn, mode="1")
# convert it to a string, with only specified characters allowed.
round_text = pytesseract.image_to_string(
round_image, config="-c tessedit_char_whitelist=/0123456789 --psm 6",
)
# convert it into a [int, int] tuple if possible.
try:
round = list(map(int, round_text.split("/")))
except ValueError:
round = [0]
return round
class Monkey:
def __init__(self, monkey: str, delay: int = 0.1):
# initialize monkey's attributes.
self.name = monkey
self.delay = delay
self.upgrades = [0, 0, 0]
self.targeting_options = ["First", "Last", "Close", "Strong"]
self.targeting = "First"
self.sold = False
self.placed = False
# update information about monkey
# self.get_info()
def place(self, coordinates: Tuple[int, int]):
# raise MonkeyPlaced if the monkey has already been placed.
if self.placed:
raise MonkeyPlaced
# raise CoordinateError if invalid type or tuple length.
if (type(coordinates) != list) and (type(coordinates) != tuple):
raise CoordinateError
if len(coordinates) != 2:
raise CoordinateError
# activate Bloons TD 6 window.
focus_window("BloonsTD6")
# move to the monkey's position
# send the hotkey for the monkey
# left click to place the monkey
# time.sleep required for the monkey to be placed in time.
mouse.move(coordinates[0], coordinates[1])
time.sleep(self.delay)
keyboard.send(hotkeys["Monkeys"][self.name])
time.sleep(self.delay)
mouse.click()
time.sleep(self.delay)
# record the coordinates of the monkey.
self.coordinates = coordinates
# record that the monkey has been placed.
self.placed = True
def select(self):
# raise exceptions if the monkey hasn't been placed or has been already sold.
if not self.placed:
raise MonkeyNotPlaced
if self.sold:
raise MonkeySold
# raise CoordinateError if invalid type or tuple length.
if (type(self.coordinates) != list) and (type(self.coordinates) != tuple):
raise CoordinateError
if len(self.coordinates) != 2:
raise CoordinateError
mouse.move(self.coordinates[0], self.coordinates[1])
time.sleep(self.delay)
mouse.click()
time.sleep(self.delay)
def upgrade(self, upgrades: Tuple[int, int, int], skip_esc: bool = False):
# raise exceptions if the monkey hasn't been placed or has been already sold.
if not self.placed:
raise MonkeyNotPlaced
if self.sold:
raise MonkeySold
# raise UpgradeError if invalid type or tuple length.
if (type(upgrades) != list) and (type(upgrades) != tuple):
raise UpgradeError
if len(upgrades) != 3:
raise UpgradeError
# raise UpgradeError if all paths have tiers active.
if upgrades.count(0) == 0:
raise UpgradeError
# raise UpgradeError there is a path above the 5th tier or below the base tier.
if max(upgrades) > 5 or min(upgrades) < 0:
raise UpgradeError
# raise UpgradeError if there is more than one path at tier 3 or higher
third_tier_upgrade_count = len([i for i in upgrades if i >= 3])
if third_tier_upgrade_count > 1:
raise UpgradeError
# activate Bloons TD 6 window.
focus_window("BloonsTD6")
# move to the monkey's position
# send the hotkey for (current upgrade - previous upgrade)
# send escape to get out of upgrade menu
self.select()
for path in range(len(upgrades)):
for tier in range(upgrades[path] - self.upgrades[path]):
keyboard.send(hotkeys["Monkeys"]["Upgrades"][path])
time.sleep(self.delay)
if not skip_esc:
keyboard.send("esc")
time.sleep(self.delay)
# record the upgrades of the monkey.
self.upgrades = upgrades
# update information about monkey
# self.get_info(self.name)
def target(self, targeting: str):
# raise TargetingError if targeting not in targeting_options.
if targeting not in self.targeting_options:
raise TargetingError
# find difference between indexes of new targeting and old targeting
targeting_index_old = self.targeting_options.index(self.targeting)
targeting_index = self.targeting_options.index(targeting)
targeting_index_change = targeting_index - targeting_index_old
self.select()
# if new targeting index is lower than old one, use reverse targeting hotkey
if targeting_index_change <= 0:
for i in range(abs(targeting_index_change)):
keyboard.send(hotkeys["Monkeys"]["Change Targeting"][0])
time.sleep(self.delay)
# if new targeting index is higher than old one, use normal targeting hotkey
else:
for i in range(targeting_index_change):
keyboard.send(hotkeys["Monkeys"]["Change Targeting"][1])
time.sleep(self.delay)
# send escape to get out of upgrade menu
keyboard.send("esc")
time.sleep(self.delay)
# record the targetting of the monkey.
self.targeting = targeting
def sell(self):
# raise exceptions if the monkey hasn't been placed or has been already sold.
if not self.placed:
raise MonkeyNotPlaced
if self.sold:
raise MonkeySold
# move to the monkey's position
# sell monkey
self.select()
keyboard.send(hotkeys["Gameplay"]["Sell"])
time.sleep(self.delay)
# record that the monkey has been sold.
self.sold = True
def get_info(self, upgrades: Tuple[int, int, int] = None):
# if no upgrade path is passed, use the one provided when the monkey was generated.
if upgrades == None:
upgrades = self.upgrades
# raise UpgradeError if invalid type or tuple length.
if (type(upgrades) != list) and (type(upgrades) != tuple):
raise UpgradeError
if len(upgrades) != 3:
raise UpgradeError
# raise UpgradeError if all paths have tiers active.
if upgrades.count(0) == 0:
raise UpgradeError
# raise UpgradeError there is a path above the 5th tier or below the base tier.
if max(upgrades) > 5 or min(upgrades) < 0:
raise UpgradeError
# raise UpgradeError if there is more than one path at tier 3 or higher
third_tier_upgrade_count = len([i for i in upgrades if i >= 3])
if third_tier_upgrade_count > 1:
raise UpgradeError
# get main path from the 3, represented by highest tier.
self.main_tier = max(upgrades)
self.main_path = upgrades.index(self.main_tier)
# set basic monkey data
self.monkey_description = monkeys[self.name]["description"]
# calculate monkey prices for different difficulties.
self.monkey_price_medium = monkeys[self.name]["price"]
self.monkey_price_easy = price_round(0.85 * self.monkey_price_medium)
self.monkey_price_hard = price_round(1.08 * self.monkey_price_medium)
self.monkey_price_impoppable = price_round(1.2 * self.monkey_price_medium)
# reset upgrade info every time this method is called.
self.upgrade_name = None
self.upgrade_description = None
self.upgrade_price_medium = 0
self.upgrade_price_easy = 0
self.upgrade_price_hard = 0
self.upgrade_price_impoppable = 0
# only run this if the monkey has been upgraded.
if upgrades != [0, 0, 0]:
# get basic upgrade data from monkeys.json
self.upgrade_name = monkeys[self.name]["upgrades"][self.main_path][
self.main_tier - 1
]["name"]
self.upgrade_description = monkeys[self.name]["upgrades"][self.main_path][
self.main_tier - 1
]["description"]
# calculate upgrade prices for different difficulties.
self.upgrade_price_medium = monkeys[self.name]["upgrades"][self.main_path][
self.main_tier - 1
]["price"]
self.upgrade_price_easy = price_round(0.85 * self.upgrade_price_medium)
self.upgrade_price_hard = price_round(1.08 * self.upgrade_price_medium)
self.upgrade_price_impoppable = price_round(1.2 * self.upgrade_price_medium)
# calculate total prices for different difficulties.
self.total_price_medium = self.monkey_price_medium
for path in range(len(upgrades)):
for tier in range(upgrades[path]):
self.total_price_medium += monkeys[self.name]["upgrades"][path][tier][
"price"
]
self.total_price_easy = price_round(0.85 * self.total_price_medium)
self.total_price_hard = price_round(1.08 * self.total_price_medium)
self.total_price_impoppable = price_round(1.2 * self.total_price_medium)
class Hero:
def __init__(self, hero: str, delay: int = 0.1):
self.name = hero
self.delay = delay
self.level = 0
self.targeting = "First"
self.targeting_options = ["First", "Last", "Close", "Strong"]
self.sold = False
self.placed = False
def place(self, coordinates: Tuple[int, int]):
# raise MonkeyPlaced if the monkey has already been placed.
if self.placed:
raise MonkeyPlaced
# raise CoordinateError if invalid type or tuple length.
if (type(coordinates) != list) and (type(coordinates) != tuple):
raise CoordinateError
if len(coordinates) != 2:
raise CoordinateError
# activate Bloons TD 6 window.
focus_window("BloonsTD6")
# move to the monkey's position
# send the hotkey for the monkey
# left click to place the monkey
# time.sleep required for the monkey to be placed in time.
mouse.move(coordinates[0], coordinates[1])
time.sleep(self.delay)
keyboard.send(hotkeys["Monkeys"]["Heroes"])
time.sleep(self.delay)
mouse.click()
time.sleep(self.delay)
# record the coordinates of the monkey.
self.coordinates = coordinates
# record that the monkey has been placed.
self.placed = True
self.level = 1
def select(self, coordinates: Tuple[int, int] = None):
# raise exceptions if the monkey hasn't been placed or has been already sold.
if not self.placed:
raise MonkeyNotPlaced
if self.sold:
raise MonkeySold
# if no coordinates are passed, sue the ones provided when the monkey was placed.
if coordinates == None:
coordinates = self.coordinates
# raise CoordinateError if invalid type or tuple length.
if (type(coordinates) != list) and (type(coordinates) != tuple):
raise CoordinateError
if len(coordinates) != 2:
raise CoordinateError
mouse.move(coordinates[0], coordinates[1])
time.sleep(self.delay)
mouse.click()
time.sleep(self.delay)
def set_level(self, level: int = 1):
self.level = level
def upgrade(self, level: int = 1, skip_esc: bool = False):
# raise exceptions if the monkey hasn't been placed or has been already sold.
if not self.placed:
raise MonkeyNotPlaced
if self.sold:
raise MonkeySold
# if no upgrade path is passed, use the one provided when the monkey was generated.
# raise UpgradeError if invalid type.
if type(level) != int:
raise UpgradeError
# raise UpgradeError there is a path above the 5th tier or below the base tier.
if level > 20 or level < 1 or level < self.level:
raise UpgradeError
# move to the monkey's position
# send the hotkey for (current level - previous level)
# send escape to get out of upgrade menu
self.select()
for l in range(level - self.level):
keyboard.send(hotkeys["Monkeys"]["Upgrades"][0])
time.sleep(self.delay)
if not skip_esc:
keyboard.send("esc")
time.sleep(self.delay)
# record the level of the hero.
self.set_level(level)
# update information about hero
# self.get_info(self.name)
def target(self, targeting: str = None):
# if no targeting is passed, use the one provided when the monkey was generated.
if targeting == None:
targeting = self.targeting
# raise TargetingError if targeting not in targeting_options.
if targeting not in self.targeting_options:
raise TargetingError
# find difference between indexes of new targeting and old targeting
self.targeting_index_old = self.targeting_options.index(self.targeting)
self.targeting_index = self.targeting_options.index(targeting)
self.targeting_index_change = self.targeting_index - self.targeting_index_old
self.select()
# if new targeting index is lower than old one, use reverse targeting hotkey
if self.targeting_index_change <= 0:
for i in range(abs(self.targeting_index_change)):
keyboard.send(hotkeys["Monkeys"]["Change Targeting"][0])
time.sleep(self.delay)
# if new targeting index is higher than old one, use normal targeting hotkey
else:
for i in range(self.targeting_index_change):
keyboard.send(hotkeys["Monkeys"]["Change Targeting"][1])
time.sleep(self.delay)
# send escape to get out of upgrade menu
keyboard.send("esc")
time.sleep(self.delay)
# record the targetting of the monkey.
self.targeting = targeting
def sell(self):
# raise exceptions if the monkey hasn't been placed or has been already sold.
if not self.placed:
raise MonkeyNotPlaced
if self.sold:
raise MonkeySold
# move to the monkey's position
# sell monkey
self.select()
keyboard.send(hotkeys["Gameplay"]["Sell"])
time.sleep(self.delay)
# record that the monkey has been sold.
self.sold = True
def get_info(self, name: str = None, upgrades: Tuple[int, int, int] = None):
# if no upgrade path is passed, use the one provided when the monkey was generated.
if upgrades == None:
upgrades = self.upgrades
# if no monkey name is passed, use the one provided when the monkey was generated.
if name == None:
name = self.name
# raise UpgradeError if invalid type or tuple length.
if (type(upgrades) != list) and (type(upgrades) != tuple):
raise UpgradeError
if len(upgrades) != 3:
raise UpgradeError
# raise UpgradeError if all paths have tiers active.
if upgrades.count(0) == 0:
raise UpgradeError
# raise UpgradeError there is a path above the 5th tier or below the base tier.
if max(upgrades) > 5 or min(upgrades) < 0:
raise UpgradeError
# raise UpgradeError if there is more than one path at tier 3 or higher
third_tier_upgrade_count = len([i for i in upgrades if i >= 3])
if third_tier_upgrade_count > 1:
raise UpgradeError
# get main path from the 3, represented by highest tier.
self.main_tier = max(upgrades)
self.main_path = upgrades.index(self.main_tier)
# set basic monkey data
self.name = name
self.monkey_description = monkeys[name]["description"]
# calculate monkey prices for different difficulties.
self.monkey_price_medium = monkeys[name]["price"]
self.monkey_price_easy = price_round(0.85 * self.monkey_price_medium)
self.monkey_price_hard = price_round(1.08 * self.monkey_price_medium)
self.monkey_price_impoppable = price_round(1.2 * self.monkey_price_medium)
# reset upgrade info every time this method is called.
self.upgrade_name = None
self.upgrade_description = None
self.upgrade_price_medium = 0
self.upgrade_price_easy = 0
self.upgrade_price_hard = 0
self.upgrade_price_impoppable = 0
# only run this if the monkey has been upgraded.
if upgrades != [0, 0, 0]:
# get basic upgrade data from monkeys.json
self.upgrade_name = monkeys[name]["upgrades"][self.main_path][
self.main_tier - 1
]["name"]
self.upgrade_description = monkeys[name]["upgrades"][self.main_path][
self.main_tier - 1
]["description"]
# calculate upgrade prices for different difficulties.
self.upgrade_price_medium = monkeys[name]["upgrades"][self.main_path][
self.main_tier - 1
]["price"]
self.upgrade_price_easy = price_round(0.85 * self.upgrade_price_medium)
self.upgrade_price_hard = price_round(1.08 * self.upgrade_price_medium)
self.upgrade_price_impoppable = price_round(1.2 * self.upgrade_price_medium)
# calculate total prices for different difficulties.
self.total_price_medium = self.monkey_price_medium
for path in range(len(upgrades)):
for tier in range(upgrades[path]):
self.total_price_medium += monkeys[name]["upgrades"][path][tier][
"price"
]
self.total_price_easy = price_round(0.85 * self.total_price_medium)
self.total_price_hard = price_round(1.08 * self.total_price_medium)
self.total_price_impoppable = price_round(1.2 * self.total_price_medium)
class Ability:
def __init__(
self,
monkey: Union[Monkey, Hero],
hotkey_index: int,
ability_name: str = None,
upgrades: Union[Tuple[int, int, int], int] = None,
):
# initialize ability's attributes.
self.monkey = monkey
self.name = monkey.name
self.hotkey_index = hotkey_index
self.ability_name = ability_name
if type(monkey) == Monkey:
# if no upgrade path is passed, use the one provided when the monkey was generated.
if upgrades == None:
self.upgrades = monkey.upgrades
# raise AbilityError if the monkey's upgrade doesn't have an ability.
if (
"abilities"
not in monkeys[self.name]["upgrades"][monkey.main_path][
monkey.main_tier - 1
].keys()
):
raise AbilityError
# set list of monkey's abilities in ability_list
self.ability_list = monkeys[self.name]["upgrades"][monkey.main_path][
monkey.main_tier - 1
]["abilities"]
# if ability_name isn't passed, default to the first ability that the monkey has.
# if it is, then find the index of it and set it to that.
if ability_name == None:
self.ability_dict = self.ability_list[0]
else:
for ability_dict in self.ability_list:
if ability_dict["name"] == ability_name:
self.ability_dict = ability_dict
# update information about ability
self.get_info()
elif type(monkey) == Hero:
# if no upgrade path is passed, use the one provided when the monkey was generated.
if upgrades == None:
self.level = monkey.level
# raise AbilityError if the monkey's upgrade doesn't have an ability.
if "abilities" not in heroes[self.name]["levels"][self.level - 1]:
print(heroes[self.name]["levels"][self.level - 1])
raise AbilityError
# set list of monkey's abilities in ability_list
self.ability_list = heroes[self.name]["levels"][self.level - 1]["abilities"]
# if ability_name isn't passed, default to the first ability that the monkey has.
# if it is, then find the index of it and set it to that.
if ability_name == None:
self.ability_dict = self.ability_list[0]
else:
for ability_dict in self.ability_list:
if ability_dict["name"] == ability_name:
self.ability_dict = ability_dict
# update information about ability
self.get_info()
def activate(
self,
hotkey_index=None,
coordinates_1: Tuple[int, int] = None,
coordinates_2: Tuple[int, int] = None,
):
# if no hotkey_index is passed, use the one provided when the ability was generated.
if hotkey_index == None:
hotkey_index = self.hotkey_index
# type 0 - just activate ability
# i.e. Super Monkey Fan Club
if self.ability_dict["type"] == 0:
keyboard.send(hotkeys["Gameplay"]["Activated Abilities"][hotkey_index - 1])
time.sleep(self.monkey.delay)
# type 1 - activate ability then click once.
# i.e. Overclock
elif self.ability_dict["type"] == 1:
keyboard.send(hotkeys["Gameplay"]["Activated Abilities"][hotkey_index - 1])
time.sleep(self.monkey.delay)
mouse.move(coordinates_1[0], coordinates_1[1])
time.sleep(self.monkey.delay)
mouse.click()
time.sleep(self.monkey.delay)
# type 2 - activate ability then click twice.
# i.e. Chinook Reposition
elif self.ability_dict["type"] == 2:
keyboard.send(hotkeys["Gameplay"]["Activated Abilities"][hotkey_index - 1])
time.sleep(self.monkey.delay)
mouse.move(coordinates_1[0], coordinates_2[1])
time.sleep(self.monkey.delay)
mouse.click()
time.sleep(self.monkey.delay)
mouse.move(coordinates_2[0], coordinates_2[1])
time.sleep(self.monkey.delay)
mouse.click()
time.sleep(self.monkey.delay)
def get_info(self, ability_dict=None):
# if ability_dict != provided, use the one provided when the ability was generated.
if ability_dict == None:
ability_dict = self.ability_dict
# turn ability dictionary values into attributes.
self.ability_name = ability_dict["name"]
self.ability_cooldown = ability_dict["cooldown"]
self.ability_type = ability_dict["type"]
def play():
keyboard.send(hotkeys["Gameplay"]["Play/Fast Forward"])
time.sleep(0.1)
def confirm():
keyboard.send("enter")
time.sleep(0.1)
keyboard.send("esc")
time.sleep(0.1)
|
[
"vidh.bhatt@gmail.com"
] |
vidh.bhatt@gmail.com
|
4c269658b728a3ab97fc12a26d4e6a217994a423
|
fb0906f77a44e2e2875ee5dbc16a14f5d70df50e
|
/fotok/config.py
|
edebde423a287e62642cffceca7e034c08c99d55
|
[] |
no_license
|
gsdu8g9/fotok
|
3f30f14a5a195531cbf1ee7ecaa5d87d702a701d
|
bdb9b823c000e9bb9cb465f4e0401f1c093b63f0
|
refs/heads/master
| 2021-01-22T20:44:44.406304
| 2016-02-16T20:20:54
| 2016-02-16T20:20:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 754
|
py
|
"""
This file stores configuration for both the web app and feed server.
Should be changed on deployment.
"""
import os
basedir = os.path.abspath(os.path.dirname(__file__))
DEBUG = True
SECRET_KEY = 'abcdimverysilly'
MAX_CONTENT_LENGTH = 1024*1024
SQLALCHEMY_DATABASE_URI = 'sqlite:///' + os.path.join(basedir, 'fotok.db')
CACHE_KIND = 'redis'
REDIS_HOST = 'localhost'
REDIS_PORT = 6379
REDIS_DB = 0
TOKEN_TTL = 1800
RECENT_PHOTOS = 10
RECENT_COMMENTS = 3
FEEDSERVER_HOST = '127.0.0.1'
FEEDSERVER_PORT = 8000
if FEEDSERVER_PORT == 80:
FEEDSERVER_URL = 'http://{}/'.format(FEEDSERVER_HOST)
else:
FEEDSERVER_URL = 'http://{}:{}/'.format(FEEDSERVER_HOST, FEEDSERVER_PORT)
MAX_WIDTH = 1920
MIN_WIDTH = 100
MAX_HEIGHT = 1080
MIN_HEIGHT = 100
|
[
"maksbotan@gentoo.org"
] |
maksbotan@gentoo.org
|
c95bfa05173b3c57990cc2d53cf42b70060aa4c2
|
934c3ed7a95d6509d63c099df9481aa35c2a7c56
|
/app/view_models/__init__.py
|
79902edd521f6ccbc41b47d3c82e0bf184b70ae1
|
[] |
no_license
|
meto001/label
|
b072c9ab0e581799a3eb6a7b4175afae9d04ca10
|
8f2af923379eea9bb89350b2f6b794b4964a1301
|
refs/heads/master
| 2021-06-10T13:11:26.307612
| 2020-08-25T08:41:23
| 2020-08-25T08:41:23
| 181,861,296
| 3
| 0
| null | 2021-03-25T22:56:53
| 2019-04-17T09:37:58
|
Python
|
UTF-8
|
Python
| false
| false
| 71
|
py
|
# _*_ coding:utf-8 _*_
__author__ = 'meto'
__date__ = '2019/4/11 14:42'
|
[
"756246975@qq.com"
] |
756246975@qq.com
|
9836c4db6976992908c3e2fdd5c42aee5b2c2e44
|
66d352e30036b0917e22b2ccde6e0bbc05f9758c
|
/TelluricSpectra/TellRemoval_interptest.py
|
54d1373f0ce141d99b8b9bb15b17c2674b949ca8
|
[] |
no_license
|
jason-neal/Phd-codes
|
8354563b1d2b0fcce39d72adbfd82b65557399b4
|
c947ffa56228746e2e5cdb3ab99e174f6c8e9776
|
refs/heads/master
| 2023-08-30T23:11:55.394560
| 2022-04-24T09:25:28
| 2022-04-24T09:25:28
| 42,106,284
| 0
| 1
| null | 2023-08-16T02:22:59
| 2015-09-08T10:40:26
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 5,575
|
py
|
#!/usr/bin/env python
# -*- coding: utf8 -*-
""" Codes for Telluric contamination removal
Interpolates telluric spectra to the observed spectra.
Divides spectra telluric spectra
can plot result
"""
import argparse
import os
import time
import matplotlib.pyplot as plt
import numpy as np
import scipy as sp
from astropy.io import fits
from scipy import interpolate
from scipy.interpolate import interp1d
import GaussianFitting as gf
import Obtain_Telluric as obt
def divide_spectra(spec_a, spec_b):
""" Assumes that the spectra have been interpolated to same wavelength step"""
""" Divide two spectra"""
assert(len(spec_a) == len(spec_b)), "Not the same length"
divide = spec_a / spec_b
return divide
def match_wl(wl, spec, ref_wl):
"""Interpolate Wavelengths of spectra to common WL
Most likely convert telluric to observed spectra wl after wl mapping performed"""
newspec1 = np.interp(ref_wl, wl, spec) # 1-d peicewise linear interpolat
test_plot_interpolation(wl, spec,ref_wl,newspec1)
print("newspec1")
# cubic spline with scipy
#linear_interp = interp1d(wl, spec)
#linear_interp = interp1d(wl, spec, kind='cubic')
# Timeing interpolation
starttime = time.time()
newspec2 = interpolate.interp1d(wl, spec, kind='linear')(ref_wl)
print("linear intergration time =", time.time()-starttime)
starttime = time.time()
newspec2 = interpolate.interp1d(wl, spec, kind='slinear')(ref_wl)
print("slinear intergration time =", time.time()-starttime)
starttime = time.time()
newspec2 = interpolate.interp1d(wl, spec, kind='quadratic')(ref_wl)
print("quadratic intergration time =", time.time()-starttime)
starttime = time.time()
newspec2 = interpolate.interp1d(wl, spec, kind='cubic')(ref_wl)
print("cubic intergration time =", time.time()-starttime)
#newspec2 = interp1d(wl, spec, kind='cubic')(ref_wl)
print("newspec2")
#ewspec2 = sp.interpolate.interp1d(wl, spec, kind='cubic')(ref_wl)
return newspec1, newspec2 # test inperpolations
def plot_spectra(wl, spec, colspec="k.-", label=None, title="Spectrum"):
""" Do I need to replicate plotting code?
Same axis
"""
plt.plot(wl, spec, colspec, label=label)
plt.title(title)
plt.legend()
plt.show(block=False)
return None
def test_plot_interpolation(x1, y1, x2, y2, methodname=None):
""" Plotting code """
plt.plot(x1, y1, label="original values")
plt.plot(x2, y2, label="new points")
plt.title("testing Interpolation: ", methodname)
plt.legend()
plt.xlabel("Wavelength (nm)")
plt.ylabel("Norm Intensity")
plt.show()
return None
def telluric_correct(wl_obs, spec_obs, wl_tell, spec_tell):
"""Code to contain other functions in this file
1. Interpolate spectra to same wavelengths with match_WLs()
2. Divide by Telluric
3. ...
"""
print("Before match_wl")
interp1, interp2 = match_wl(wl_tell, spec_tell, wl_obs)
print("After match_wl")
# could just do interp here without match_wl function
# test outputs
#print("test1")
#test_plot_interpolation(wl_tell, spec_tell, wl_obs, interp1)
#print("test2")
# test_plot_interpolation(wl_tell, spec_tell, wl_obs, interp2)
# division
print("Before divide_spectra")
corrected_spec = divide_spectra(spec_obs, interp2)
print("After divide_spectra")
#
# other corrections?
return corrected_spec
def _parser():
"""Take care of all the argparse stuff.
:returns: the args
"""
parser = argparse.ArgumentParser(description='Telluric Removal')
parser.add_argument('fname', help='Input fits file')
parser.add_argument('-o', '--output', default=False,
help='Ouput Filename',)
args = parser.parse_args()
return args
def main(fname, output=False):
homedir = os.getcwd()
data = fits.getdata(fname)
wl = data["Wavelength"]
I = data["Extracted_DRACS"]
hdr = fits.getheader(fname)
datetime = hdr["DATE-OBS"]
obsdate, obstime = datetime.split("T")
obstime, __ = obstime.split(".")
tellpath = "/home/jneal/Phd/data/Tapas/"
tellname = obt.get_telluric_name(tellpath, obsdate, obstime)
print("tell name", tellname)
tell_data = obt.load_telluric(tellpath, tellname[0])
wl_lower = np.min(wl/1.0001)
wl_upper = np.max(wl*1.0001)
tell_data = gf.slice_spectra(tell_data[0], tell_data[1], wl_lower, wl_upper)
#tell_data =
print("After slice spectra")
plt.figure()
plt.plot(wl, I, label="Spectra")
plt.plot(tell_data[0], tell_data[1], label="Telluric lines")
plt.show()
# Loaded in the data
# Now perform the telluric removal
I_corr = telluric_correct(wl, I, tell_data[0], tell_data[1])
print("After telluric_correct")
plt.figure()
plt.plot(wl, I_corr, label="Corrected Spectra")
plt.plot(tell_data[0], tell_data[1], label="Telluric lines")
plt.show()
if __name__ == "__main__":
args = vars(_parser())
fname = args.pop('fname')
opts = {k: args[k] for k in args}
main(fname, **opts)
""" Some test code for testing functions """
sze = 20
x2 = range(sze)
y2 = np.random.randn(len(x2)) + np.ones_like(x2)
y2 = 0.5 * np.ones_like(x2)
x1 = np.linspace(1, sze-1.5, 9)
y1 = np.random.randn(len(x1)) + np.ones_like(x1)
y1 = np.ones_like(x1)
print(x1)
print(x2)
#print(y1)
#print(y2)
y1_cor = telluric_correct(x1, y1, x2, y2)
print(x1)
print(y1)
print(y1_cor)
|
[
"jason.neal@astro.up.pt"
] |
jason.neal@astro.up.pt
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.