blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
85677bd4300ff04247348fd1fbac5edc67089b73
|
0e584ab671bfe87eee671119b9d3d3f42111a650
|
/bboard/urls.py
|
12474158e94731d873a76de0a07e5a663d850b99
|
[] |
no_license
|
kotbegemot1/billboard-m
|
218019e3bdd0f821a125e20729be118398dfe644
|
65d20d123e09fcbf9484f5f37ff943517e36508f
|
refs/heads/main
| 2023-07-19T03:31:44.481340
| 2021-09-29T11:28:15
| 2021-09-29T11:28:15
| 402,050,084
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,247
|
py
|
"""bboard URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from django.conf import settings
from django.conf.urls.static import static
from django.contrib.staticfiles.views import serve
from django.views.decorators.cache import never_cache
urlpatterns = [
path('admin/', admin.site.urls),
path('captcha/', include('captcha.urls')),
path('api/', include('api.urls')),
path('', include('main.urls', namespace='')),
]
if settings.DEBUG:
urlpatterns.append(path('static/<path:path>', never_cache(serve)))
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
[
"kk1slorod@gmail.com"
] |
kk1slorod@gmail.com
|
9aee4b79f4e49359063df643cc7d1abf5693d4e2
|
e28d3a25cb399aa407769abe820ce8236c13fcce
|
/progress_proposal/nb_tweet.py
|
25c00ed8cb1d1446f6bea02de5bc8846344ec294
|
[] |
no_license
|
grant-park/nlp_project
|
42d515f0bdc94f6b4f21ee50c70b5ff7d8ed21a6
|
01662cf1340c82cc8092f3296a90b1581f909422
|
refs/heads/master
| 2021-06-09T17:36:10.301519
| 2016-12-22T23:21:37
| 2016-12-22T23:21:37
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,665
|
py
|
from __future__ import division
from collections import defaultdict
import math
import os
PATH_TO_DATA = 'twitter_dataset'
TRAIN_DIR, TRAIN_FILE = os.path.join(PATH_TO_DATA, 'train'), "trainingdata-all-annotations.txt"
TEST_DIR, TEST_FILE = os.path.join(PATH_TO_DATA, 'test'), "testdata-taskA-all-annotations.txt"
class NB_Baseline:
def __init__(self):
self.targets = ['Atheism', 'Legalization of Abortion', 'Feminist Movement', 'Climate Change is a Real Concern', 'Hillary Clinton']
self.vocab = set()
self.doc_count_dict = { "Atheism": 0.0, "Legalization of Abortion": 0.0, "Feminist Movement": 0.0, "Climate Change is a Real Concern": 0.0, "Hillary Clinton": 0.0 }
self.token_count_dict = { "Atheism": 0.0, "Legalization of Abortion": 0.0, "Feminist Movement": 0.0, "Climate Change is a Real Concern": 0.0, "Hillary Clinton": 0.0 }
self.doc_token_count_dict = { "Atheism": defaultdict(float), "Legalization of Abortion": defaultdict(float), "Feminist Movement": defaultdict(float), "Climate Change is a Real Concern": defaultdict(float), "Hillary Clinton": defaultdict(float) }
self.total_doc_count = 0
def train(self, dir, filename):
with open(os.path.join(dir, filename),'r') as doc:
iterdoc = iter(doc)
attr = next(iterdoc).split() # differentiate first line
for index,line in enumerate(iterdoc):
entry = line.split("\t")
target = entry[1]
tweet_content = map(lambda x: x.lower(), entry[2].split())
self.total_doc_count += 1
self.doc_count_dict[target] += 1
self.token_count_dict[target] += len(tweet_content)
for each in tweet_content:
if each not in self.doc_token_count_dict[target]:
self.doc_token_count_dict[target][each] = 0
self.doc_token_count_dict[target][each] += 1
self.vocab.add(each)
def p_word_given_label_and_psuedocount(self, word, label, alpha):
return (self.doc_token_count_dict[label][word] + alpha)/(self.token_count_dict[label] + (len(self.vocab)*alpha))
def log_posterior(self, bag, label, alpha):
return math.log(self.doc_count_dict[label]/self.total_doc_count) + sum(map(lambda x: math.log(self.p_word_given_label_and_psuedocount(x,label,alpha)), bag))
def classify(self, bag, alpha):
return max(map(lambda x: (x, self.log_posterior(bag,x,alpha)), self.targets), key = lambda x: x[1])[0]
def eval(self, alpha):
accuracy = 0
total = 0
with open(os.path.join(TEST_DIR, TEST_FILE),'r') as doc:
iterdoc = iter(doc)
attr = next(iterdoc).split() # differentiate first line
for index,line in enumerate(iterdoc):
entry = line.split("\t")
if entry[1] == self.classify(entry[2].lower(), alpha):
accuracy += 1
total += 1
return accuracy/total
def plot_psuedocount_vs_accuracy(psuedocounts, accuracies):
import matplotlib.pyplot as plt
plt.plot(psuedocounts, accuracies)
plt.xlabel('Psuedocount Parameter')
plt.ylabel('Accuracy (%)')
plt.title('Psuedocount Parameter vs. Accuracy Experiment')
plt.show()
if __name__ == '__main__':
nb = NB_Baseline()
nb.train(TRAIN_DIR,TRAIN_FILE)
test = "i love hillary clinton"
print(nb.classify(test,36))
print(nb.eval(36))
# Plot
psuedocounts = range(1,50)
accuracies = map(lambda x: nb.eval(x),psuedocounts)
plot_psuedocount_vs_accuracy(psuedocounts, accuracies)
|
[
"gpark18@amherst.edu"
] |
gpark18@amherst.edu
|
964b6f7b7cee9313ed96171733560c201741a868
|
ff57cd713e5ed1cbca6c621e505f29eae42aa48f
|
/python_socket_demo/block/c.py
|
04895609b6aeac5dd1103e311f40109865fae974
|
[] |
no_license
|
cdhello/git_code
|
f918c62d49b7a81e4761f128c34fb26576c1d31f
|
51220a15702e94b7f5cbb7f6178aca3cd80c18a4
|
refs/heads/master
| 2021-03-30T20:52:01.413224
| 2020-05-16T16:47:38
| 2020-05-16T16:47:38
| 124,522,021
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,725
|
py
|
import sys
import socket
import time
import logging
BUF_SIZE = 256
srcfilename = "a.pptx"
def run():
print "hi, i m client";
global srcfilename
s = socket.socket(socket.AF_INET,socket.SOCK_STREAM);
ser_add = "10.140.162.26";
ser_port = 1567;
try:
result = s.connect((ser_add, ser_port));
except:
print "connect failed, return"
return
else:
print "connect ok"
if len(sys.argv)<2:
pass
else:
srcfilename = sys.argv[1]
ltime = time.localtime(time.time())
print "time: %d:%d:%d"%(ltime.tm_hour,ltime.tm_min,ltime.tm_sec)
try:
srcfile = open(srcfilename, "rb")
except:
print "There is no file named '%s'. return"%(srcfilename)
s.close();
return
else:
print "File '%s' is opend"%srcfilename
outputfile = open("output.txt","wb")
timestart = time.time()
bytesCount = 0;
bytesCount2 = 0
while(1):
data2send=srcfile.read(BUF_SIZE)
if (0 == len(data2send)):
s.shutdown(socket.SHUT_WR); #inform server
break;
bytesCount2+=len(data2send)
#logging.info("read %u bytes"%(len(data2send)))
#s.send(data2send); # return the length of the sent data.
s.sendall(data2send); # try to send all of the data, return none, else raise a exception
recv_data = s.recv(BUF_SIZE);
logging.info("read %u, write %u "%(len(data2send), len(recv_data)))
outputfile.write(recv_data);
bytesCount += len(recv_data);
try:
recv_data = s.recv(BUF_SIZE);
except:
print "recv failed."
while(1):
if (0 == len(recv_data)):
break;
logging.info("(last data) write %u "%( len(recv_data)))
outputfile.write(recv_data);
bytesCount += len(recv_data);
recv_data = s.recv(BUF_SIZE);
timeend = time.time()
print "seconds: %lf, bytes: %u, rate: %lf(KBs)"%(timeend - timestart, bytesCount, (bytesCount>>10)/(timeend - timestart));
print "2 byes %u"%bytesCount2
ltime = time.localtime(time.time())
print "time :%d:%d:%d"%(ltime.tm_hour,ltime.tm_min,ltime.tm_sec)
outputfile.close()
srcfile.close()
s.close();
print "client bye bye"
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s',
datefmt='%d %b %Y %H:%M:%S',
filename='client.log',
filemode='w')
run()
|
[
"cdh130128@126.com"
] |
cdh130128@126.com
|
1f69d2212665b7c450ddadc6b800bd1963b6a8a7
|
06190b30798084ff91261c554accf5ff47f7fabf
|
/src/ssllabs/migrations/0001_initial.py
|
040a4ea0cb8e763ad9b4d14cabcfcb71c46349f0
|
[
"BSD-3-Clause"
] |
permissive
|
kidmose/tlsscout
|
a51624377523200334730c565b964188ddc622d3
|
ebb6bf296f16db1b247952d584df671b98945243
|
refs/heads/master
| 2021-01-22T19:53:51.672518
| 2016-04-15T07:29:22
| 2016-04-15T07:29:22
| 56,300,248
| 0
| 0
| null | 2016-04-15T07:27:44
| 2016-04-15T07:27:44
| null |
UTF-8
|
Python
| false
| false
| 1,683
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import uuidfield.fields
class Migration(migrations.Migration):
dependencies = [
('sitecheck', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='ApiClientState',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('sleep_until', models.DateTimeField(null=True)),
('max_concurrent_assessments', models.IntegerField()),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='RequestLog',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('datetime', models.DateTimeField(auto_now_add=True)),
('uuid', uuidfield.fields.UUIDField(max_length=32)),
('request_url', models.CharField(max_length=1000)),
('request_headers', models.TextField()),
('request_body', models.TextField(null=True)),
('response_code', models.IntegerField(null=True)),
('response_headers', models.TextField(null=True)),
('response_body', models.TextField(null=True)),
('sitecheck', models.ForeignKey(related_name='requestlogs', to='sitecheck.SiteCheck', null=True)),
],
options={
'ordering': ['-datetime'],
},
bases=(models.Model,),
),
]
|
[
"thomas@gibfest.dk"
] |
thomas@gibfest.dk
|
05572bbf0ccce85e4248ee4ba97f4167ca183086
|
d0a2771cb86fffbc213597125ed140a37e00f2e9
|
/unet/config.py
|
3876571f6790b968ef2c9d9619f640e4a8ceb705
|
[] |
no_license
|
shuaigezhu/star_galaxy_classification
|
eff7a135419036f8a28a6f784049f44972c2426a
|
a319beca9f8d2a62dd0714d78f70f788603219d6
|
refs/heads/master
| 2020-12-12T17:40:42.575255
| 2020-03-30T15:40:29
| 2020-03-30T15:40:29
| 234,187,615
| 0
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,704
|
py
|
import argparse
# ----------------------------------------
# Global variables within this script
arg_lists = []
parser = argparse.ArgumentParser()
def str2bool(v):
return v.lower() in ("true", "1")
def add_argument_group(name):
arg = parser.add_argument_group(name)
arg_lists.append(arg)
return arg
# ----------------------------------------
# Arguments for the main program
main_arg = add_argument_group("Main")
main_arg.add_argument("--mode", type=str,
default="train",
choices=["train", "test"],
help="Run mode")
# ----------------------------------------
# Arguments for training
train_arg = add_argument_group("Training")
train_arg.add_argument("--data_dir", type=str,
default="/Users/kwang/Downloads/cifar-10-batches-py",
help="Directory with CIFAR10 data")
train_arg.add_argument("--learning_rate", type=float,
default=1e-4,
help="Learning rate (gradient step size)")
train_arg.add_argument("--batch_size", type=int,
default=100,
help="Size of each training batch")
train_arg.add_argument("--num_epoch", type=int,
default=100,
help="Number of epochs to train")
train_arg.add_argument("--val_intv", type=int,
default=1000,
help="Validation interval")
train_arg.add_argument("--rep_intv", type=int,
default=1000,
help="Report interval")
train_arg.add_argument("--log_dir", type=str,
default="./logs",
help="Directory to save logs and current model")
train_arg.add_argument("--save_dir", type=str,
default="./save",
help="Directory to save the best model")
train_arg.add_argument("--resume", type=str2bool,
default=True,
help="Whether to resume training from existing checkpoint")
# ----------------------------------------
# Arguments for model
model_arg = add_argument_group("Model")
model_arg.add_argument("--feature_type", type=str,
default="hog",
choices=["hog", "h_histogram", "rgb"],
help="Type of feature to be used")
model_arg.add_argument("--loss_type", type=str,
default="cross_entropy",
choices=["cross_entropy", "svm"],
help="Type of data loss to be used")
model_arg.add_argument("--normalize", type=str2bool,
default=True,
help="Whether to normalize with mean/std or not")
model_arg.add_argument("--l2_reg", type=float,
default=1e-4,
help="L2 Regularization strength")
model_arg.add_argument("--num_unit", type=int,
default=64,
help="Number of neurons in the hidden layer")
model_arg.add_argument("--num_hidden", type=int,
default=3,
help="Number of hidden layers")
model_arg.add_argument("--num_class", type=int,
default=10,
help="Number of classes in the dataset")
model_arg.add_argument("--activ_type", type=str,
default="relu",
choices=["relu", "tanh"],
help="Activation type")
def get_config():
config, unparsed = parser.parse_known_args()
return config, unparsed
def print_usage():
parser.print_usage()
|
[
"noreply@github.com"
] |
shuaigezhu.noreply@github.com
|
4f00554ddd6bda5f64f1638ae2fe1ee98b3cd7ad
|
5331ae322ed9c1ff76e83840a0f9a392f7b97c41
|
/EE/Caaa(openMV-arduino communication)/drive_newest.py
|
6b31e0ea64b32fbfbdf3245a9d63c3844353a01b
|
[] |
no_license
|
tanMcCree/EE
|
4560d5c352c045e28e22c6334c2bed907ae905a2
|
11d2e8293e5c9709d57fbdfd4fe5c2941ee4b1b4
|
refs/heads/main
| 2023-06-14T20:37:31.520964
| 2021-06-27T02:58:49
| 2021-06-27T02:58:49
| 380,638,027
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,800
|
py
|
# UART Control
import time
from pyb import UART
uart = UART(1, 115200)
#uart.init(115200, bits=8, parity=None, stop=1) # init with given parameters
def sending_data(data):
#data = "SY 90 + "
s = list(data)
for i in range(len(s)):
uart.write(s[i])
time.sleep(1)
def recive_data():
while((uart.any())==0):
1
while((uart.any())!=0):
tmp_data = uart.readchar()
#print(tmp_data)
return tmp_data
"""
def s_and_r_data(int value):
while(True):
sending_data()
recive_data()
break
"""
def GetAngle():
sending_data("Y")
time.sleep(20)
yaw = recive_data()
time.sleep(20)
sending_data("P")
time.sleep(20)
pitch = recive_data()
time.sleep(20)
return yaw,pitch
def Motor(v1, v2, v3, v4):
sending_data("M "+str(v1)+" "+str(v2)+" "+str(v3)+" "+str(v4)+" ")
time.sleep(20)
#def Motor_L(v1, v2):
# sending_data("ML "+str(v1)+" "+str(v2)+" ")
# time.sleep(20)
def Servo_Y(a):
if(a>=0):
sending_data("SY "+str(a)+" "+"+"+" ")
else:
a=-a;
sending_data("SY "+str(a)+" "+"-"+" ")
time.sleep(20)
def Servo_P(a):
if(a>=0):
sending_data("SP "+str(a)+" "+"+"+" ")
else:
a=-a;
sending_data("SP "+str(a)+" "+"-"+" ")
time.sleep(20)
def Fire1():
sending_data("F")
time.sleep(500)
time.sleep(20)
def Fire2():
sending_data("H")
time.sleep(20)
def GetDistance():
sending_data("D")
time.sleep(20)
d = uart.readchar()
time.sleep(20)
return d
def Show1():
sending_data("A")
time.sleep(100)
def Show2():
sending_data("B")
time.sleep(100)
|
[
"noreply@github.com"
] |
tanMcCree.noreply@github.com
|
85b05ddcac79aafc6d9a2166090d92c50d8ce798
|
cc22be434dd05f7d5637f3beffe8681da88afc45
|
/python-pass.py
|
42ab0bbddc83a51704f606c0889cd7e12df7e936
|
[] |
no_license
|
shobhit-bhatt-14/Getting-Started-with-Python
|
f877fb0fb23ea5e80ff13e1129359793ec997550
|
a0aada154ae777bb140ed984f0e40cf483b2d8b1
|
refs/heads/main
| 2023-06-04T15:49:34.851813
| 2021-06-22T19:54:17
| 2021-06-22T19:54:17
| 374,080,922
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 88
|
py
|
# initialize a blank loop func or condition
for i in range(1,5) :
pass
print('End')
|
[
"shobhit.bhatt.14@gmail.com"
] |
shobhit.bhatt.14@gmail.com
|
15cb6f4a4b2a69e6eebe258bef70f4ee918bcdde
|
38ada05c7ce06c78f3196e26d94bd7943b7866fc
|
/SuPyMode/superset.py
|
9f482342485331a82ed53c4c087737cd5df94e0b
|
[
"MIT"
] |
permissive
|
MartinPdeS/SuPyMode
|
467ac526bea47513680a50d217d5ba893618dc70
|
f6dd3f1e390728c189ce053ee164ca3ebc776e44
|
refs/heads/master
| 2023-08-17T12:18:22.709146
| 2023-08-14T15:36:39
| 2023-08-14T15:36:39
| 366,930,899
| 4
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 45,834
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Built-in imports
import pickle
import numpy
import logging
import matplotlib.pyplot as plt
from dataclasses import dataclass
from pathlib import Path
from itertools import combinations, product
# Third-party imports
from scipy.interpolate import interp1d
from scipy.integrate import solve_ivp
import pyvista
# Local imports
from SuPyMode.supermode import SuperMode
from SuPyMode.slice_structure import SliceStructure
from SuPyMode.tools import plot_style
from SuPyMode.tools.utils import test_valid_input, get_intersection
from SuPyMode.profiles import AlphaProfile
from SuPyMode.tools import directories
from MPSPlots.Render2D import Scene2D, SceneList, Axis, Multipage, Line, ColorBar, Mesh, Scatter
from MPSPlots import colormaps
@dataclass
class SuperSet(object):
"""
Solver to which is associated the computed SuperSet Modes
.. note::
This class is a representation of the fiber optic structures set of supermodes, hence the name.
This class has not ling to c++ codes, it is pure Python.
The items of this class are the supermodes generated from within the SuPySolver
"""
parent_solver: object
wavelength: float
def __post_init__(self):
self.wavenumber = 2 * numpy.pi / self.wavelength
self._transmission_matrix = None
self.supermodes = []
self._itr_to_slice = interp1d(self.itr_list, numpy.arange(self.itr_list.size))
self._slice_to_itr = interp1d(numpy.arange(self.itr_list.size), self.itr_list)
def __getitem__(self, idx: int):
return self.supermodes[idx]
def __setitem__(self, idx: int, value):
self.supermodes[idx] = value
@property
def geometry(self):
"""
Return geometry of the coupler structure
"""
return self.parent_solver.geometry
@property
def itr_list(self):
"""
Return list of itr value that are used to compute the supermodes
"""
return self.parent_solver.itr_list
@property
def coordinate_system(self):
"""
Return axes object of the geometry
"""
return self.parent_solver.geometry.coordinate_system
@property
def fundamental_supermodes(self):
return self.get_fundamental_supermodes(tolerance=1e-3)
@property
def non_fundamental_supermodes(self):
return self.get_non_fundamental_supermodes(tolerance=1e-3)
@property
def transmission_matrix(self) -> numpy.ndarray:
"""
Return supermode transfert matrix
"""
if self._transmission_matrix is None:
self.compute_transmission_matrix()
return self._transmission_matrix
def itr_to_slice(self, itr_list: list[float]) -> list[int]:
"""
Return slice number associated to itr value
:param itr_list: Inverse taper ration value to evaluate the slice.
:type itr_list: { type_description }
:returns: List of itr values,
:rtype: list[float]
"""
itr_list = numpy.asarray(itr_list)
return numpy.floor(self._itr_to_slice(itr_list)).astype(int)
def slice_to_itr(self, slice_list: list[int]) -> list[float]:
"""
Return slice number associated to itr value
:param slice_list: Value of the slice to which evaluate the itr.
:type slice_list: list[int]
:returns: List of itr values,
:rtype: list[float]
"""
slice_list = numpy.asarray(slice_list) % self.itr_list.size
return self._slice_to_itr(slice_list)
def get_fundamental_supermodes(self, *, tolerance: float = 0.1) -> list[SuperMode]:
"""
Returns list of modes that do not spatially overlap and that have the highest
propagation constant values.
:param tolerance: The tolerance to which consider the spatial overlap
:type tolerance: float
:returns: List of the fundamental modes.
:rtype: list
"""
self.sorting_modes_beta()
fundamental_supermodes = [self.supermodes[0]]
def coupling(mode_0: SuperMode, mode_1: SuperMode):
field_0 = numpy.abs(mode_0.field[0])
field_1 = numpy.abs(mode_1.field[0])
return numpy.sum(field_0 * field_1)
for mode_0 in self.supermodes:
couplings = [
coupling(mode_0, mode_1) for mode_1 in fundamental_supermodes
]
couplings = numpy.asarray(couplings)
if numpy.any(couplings > tolerance):
continue
fundamental_supermodes.append(mode_0)
return fundamental_supermodes
def get_non_fundamental_supermodes(self, *, tolerance: float = 0.1) -> list[SuperMode]:
"""
Returns list of modes that do not spatially don't overlap with the fundamental modes.
Those mode are usually related to higher-order or cladding supermodes.
:param tolerance: The tolerance to which consider the spatial overlap
:type tolerance: float
:returns: List of the non-fundamental modes.
:rtype: list
"""
non_fundamental_supermodes = self.supermodes
for supermodes in self.get_fundamental_supermodes(tolerance=tolerance):
non_fundamental_supermodes.remove(supermodes)
return non_fundamental_supermodes
def get_mode_solver_classification(self) -> list[list[SuperMode]]:
"""
Returns a list containing the modes ordered per solver number.
:returns: The mode solver classification.
:rtype: list[list[SuperMode]]
"""
solver_numbers = [mode.solver_number for mode in self]
number_of_solvers = len(set(solver_numbers))
mode_solver_array = [
[] for i in range(number_of_solvers)
]
for mode in self:
mode_solver_array[mode.solver_number].append(mode)
return mode_solver_array
def label_supermodes(self, *label_list) -> None:
for n, label in enumerate(label_list):
self[n].label = label
setattr(self, label, self[n])
def reset_labels(self) -> None:
for n, super_mode in self:
super_mode.label = f'mode_{n}'
def swap_supermode_order(self, idx0: int, idx1: int) -> "SuperSet":
"""
Swap two supermodes.
it doesn't change any of their characteristic, it only changes the
order on whihc they will appear, notably for the plots.
:param idx0: Index of the first mode to swap
:type idx0: int
:param idx1: Index of the second mode to swap
:type idx1: int
"""
self.supermodes[idx0], self.supermodes[idx1] = self.supermodes[idx1], self.supermodes[idx0]
return self
def get_slice_structure(self, *, itr: int, add_symmetries: bool = True) -> SliceStructure:
x, y = self.supermodes[0].get_axis_vector()
output_slice = SliceStructure(
parent_superset=self,
itr=itr,
supermodes=self.supermodes,
add_symmetries=add_symmetries
)
return output_slice
def compute_transmission_matrix(self) -> None:
"""
Calculates the transmission matrix with only the propagation constant included.
:returns: The transmission matrix.
:rtype: numpy.ndarray
"""
shape = [
len(self.supermodes),
len(self.supermodes),
len(self.itr_list)
]
self._transmission_matrix = numpy.zeros(shape)
for mode in self.supermodes:
self._transmission_matrix[mode.mode_number, mode.mode_number, :] = mode.beta._data * 2.0 * numpy.pi
def add_coupling_to_t_matrix(self, *, t_matrix: numpy.ndarray, adiabatic_factor: numpy.ndarray) -> numpy.ndarray:
"""
Add the coupling coefficients to the transmission matrix.
:param t_matrix: The t matrix to which add the coupling values
:type t_matrix: numpy.ndarray
:param adiabatic_factor: The adiabatic factor, if None, it is set to one meaning normalized coupling [z-independent]
:type adiabatic_factor: numpy.ndarray
:returns: The transmission matrix.
:rtype: numpy.ndarray
"""
if adiabatic_factor is None:
adiabatic_factor = 1
size = t_matrix.shape[-1]
t_matrix = t_matrix.astype(complex)
for mode_0, mode_1 in combinations(self.supermodes, 2):
coupling = mode_0.normalized_coupling.get_values(mode_1)[:size] * adiabatic_factor
t_matrix[mode_0.mode_number, mode_1.mode_number, :] = - coupling
t_matrix[mode_1.mode_number, mode_0.mode_number, :] = + coupling
return t_matrix
def compute_coupling_factor(self, *, coupler_length: float) -> numpy.ndarray:
r"""
Compute the coupling factor defined as:
.. math::
f_c = \frac{1}{\rho} \frac{d \rho}{d z}
:param coupler_length: The length of the coupler
:type coupler_length: float
:returns: The amplitudes as a function of the distance in the coupler
:rtype: numpy.ndarray
"""
dx = coupler_length / (self.itr_list.size)
ditr = numpy.gradient(numpy.log(self.itr_list), axis=0)
return ditr / dx
def get_transmision_matrix_from_profile(self, *, profile: AlphaProfile, coupling: str = 'normalized') -> tuple:
"""
Gets the transmision matrix from profile.
:param profile: The z-profile of the coupler
:type profile: object
:param coupling: Defines how the coupling is added to the transmission matrix, either 'none', 'normalized', 'unnormalized'
:type coupling: str
"""
final_slice = self.itr_to_slice(itr_list=profile.smallest_itr)
sub_t_matrix = self.transmission_matrix[..., :final_slice]
sub_itr_vector = self.itr_list[: final_slice]
match coupling.lower():
case 'none':
sub_t_matrix = self.add_coupling_to_t_matrix(
t_matrix=sub_t_matrix,
adiabatic_factor=0
)
case 'normalized':
sub_t_matrix = self.add_coupling_to_t_matrix(
t_matrix=sub_t_matrix,
adiabatic_factor=profile.evaluate_adiabatic_factor(itr=sub_itr_vector)
)
case 'unnormalized':
sub_t_matrix = self.add_coupling_to_t_matrix(
t_matrix=sub_t_matrix,
adiabatic_factor=1
)
sub_distance = profile.evaluate_distance_vs_itr(sub_itr_vector)
return sub_distance, sub_itr_vector, sub_t_matrix
def propagate(self, *,
profile: AlphaProfile,
initial_amplitude: list,
max_step: float = None,
n_step: int = None,
coupling: str = 'normalized',
method: str = 'RK45',
**kwargs) -> numpy.ndarray:
"""
Returns the amplitudes value of the supermodes in the coupler.
:param initial_amplitude: The initial amplitude
:type initial_amplitude: list
:param profile: The z-profile of the coupler
:type profile: object
:param max_step: The maximum stride to use in the solver
:type max_step: float
:param coupling: Defines how the coupling is added to the transmission matrix, either 'none', 'normalized', 'unnormalized'
:type coupling: str
:param kwargs: The keywords arguments to be passed to the solver
:type kwargs: dictionary
:returns: The amplitudes as a function of the distance in the coupler
:rtype: numpy.ndarray
"""
initial_amplitude = numpy.asarray(initial_amplitude).astype(complex)
if max_step is None:
max_step = self.parent_solver.wavelength / 200
if n_step is not None:
t_eval = numpy.linspace(0, profile.length, n_step)
sub_distance, sub_itr_vector, sub_t_matrix = self.get_transmision_matrix_from_profile(
profile=profile,
coupling=coupling,
)
z_to_itr = interp1d(
profile.distance,
profile.itr_list,
bounds_error=False,
fill_value='extrapolate',
axis=-1
)
itr_to_t_matrix = interp1d(
sub_itr_vector,
sub_t_matrix,
bounds_error=False,
fill_value='extrapolate',
axis=-1
)
def model(z, y):
itr = z_to_itr(z)
return 1j * itr_to_t_matrix(itr).dot(y)
sol = solve_ivp(
model,
y0=initial_amplitude,
t_span=[0, profile.length],
vectorized=True,
# max_step=max_step,
method=method,
# t_eval=t_eval,
**kwargs
)
norm = (numpy.abs(sol.y)**2).sum(axis=0)
if not numpy.all(numpy.isclose(norm, 1.0, atol=1e-1)):
logging.warning(f'Warning Power conservation is not acheived [tol: 1e-1]. You should considerate reducing the max step size [{max_step}]')
return sol.t, sol.y, z_to_itr(sol.t)
def interpret_initial_input(self, initial_amplitude):
if isinstance(initial_amplitude, SuperMode):
return initial_amplitude.amplitudes
else:
return numpy.asarray(initial_amplitude).astype(complex)
def plot_propagation(self, *,
profile: AlphaProfile,
initial_amplitude,
max_step: float = None,
coupling: str = 'normalized',
method: str = 'RK45',
sub_sampling: int = 5,
save_directory: str = 'new_figure.gif',
**kwargs) -> tuple:
initial_amplitude = self.interpret_initial_input(
initial_amplitude=initial_amplitude
)
z, amplitudes, itr_list = self.propagate(
initial_amplitude=initial_amplitude,
profile=profile,
coupling=coupling,
max_step=max_step,
method=method
)
slc = slice(None, None, sub_sampling)
figure, ax = plt.subplots(1, 1, figsize=(16, 6))
for n, mode in enumerate(self.supermodes):
ax.plot(z[slc], abs(amplitudes[n][slc])**2, label=mode.stylized_label)
ax1 = ax.twinx()
ax1.plot(z, itr_list, 'k--', label='coupler profile')
ax1.legend()
ax.legend()
ax.set_xlabel('Propagation distance z')
ax.set_ylabel('Mode power distribution')
ax1.set_ylabel('Inverse taper ratio')
plt.tight_layout()
plt.savefig(f'{save_directory}')
plt.show()
return z, amplitudes, itr_list
def generate_propagation_gif(self, *,
profile: AlphaProfile,
initial_amplitude,
max_step: float = None,
coupling: str = 'normalized',
method: str = 'RK45',
sub_sampling: int = 5,
mutliplicative_factor: float = 1,
save_directory: str = 'new_figure.gif',
delta_azimuth: float = 0,
**kwargs) -> tuple:
"""
Generates a gif video of the mode propagation.
:param initial_amplitude: The initial amplitude
:type initial_amplitude: list
:param coupler_length: The length of the coupler
:type coupler_length: float
:param max_step: The maximum stride to use in the solver
:type max_step: float
:param sub_sampling: Propagation undersampling factor for the video production
:type sub_sampling: int
:param kwargs: The keywords arguments
:type kwargs: dictionary
"""
initial_amplitude = self.interpret_initial_input(
initial_amplitude=initial_amplitude
)
z_list, amplitudes_list, itr_list = self.propagate(
initial_amplitude=initial_amplitude,
profile=profile,
coupling=coupling,
max_step=max_step,
method=method
)
self.generate_propagation_gif_from_values(
amplitudes_list=amplitudes_list,
itr_list=itr_list,
z_list=z_list,
mutliplicative_factor=mutliplicative_factor,
save_directory=save_directory,
delta_azimuth=delta_azimuth,
sub_sampling=sub_sampling
)
return z_list, amplitudes_list, itr_list
def generate_propagation_gif_from_values(self, *,
amplitudes_list: numpy.ndarray,
itr_list: numpy.ndarray,
z_list: numpy.ndarray,
sub_sampling: int = 10000,
mutliplicative_factor: float = -100,
delta_azimuth: float = 0,
save_directory: str = 'new_figure.gif',
colormap: str = 'bwr',
**kwargs) -> None:
"""
Generates a gif video of the mode propagation.
:param initial_amplitude: The initial amplitude
:type initial_amplitude: list
:param coupler_length: The length of the coupler
:type coupler_length: float
:param max_step: The maximum stride to use in the solver
:type max_step: float
:param sub_sampling: Propagation undersampling factor for the video production
:type sub_sampling: int
:param kwargs: The keywords arguments
:type kwargs: dictionary
"""
amplitudes_list = amplitudes_list[:, ::sub_sampling]
itr_list = itr_list[::sub_sampling]
z_list = z_list[::sub_sampling]
structure = self.get_slice_structure(itr=1.0, add_symmetries=True)
total_field = structure.get_field_combination(amplitudes_list[:, 0], Linf_normalization=True) * mutliplicative_factor
x, y = numpy.mgrid[0: total_field.shape[0], 0: total_field.shape[1]]
grid = pyvista.StructuredGrid(x, y, total_field)
plotter = pyvista.Plotter(notebook=False, off_screen=True)
plotter.open_gif(save_directory, fps=20)
plotter.view_isometric()
# plotter.set_background('black', top='white')
plotter.add_mesh(
grid,
scalars=total_field,
style='surface',
show_edges=True,
edge_color='k',
colormap=colormap,
show_scalar_bar=False,
clim=[-100, 100]
)
pts = grid.points.copy()
azimuth = 0
for z, amplitudes, itr in zip(z_list, amplitudes_list.T, itr_list):
print(f'itr: {itr}')
plotter.camera.elevation = -20
plotter.camera.azimuth = azimuth
azimuth += delta_azimuth
structure = self.get_slice_structure(itr=itr, add_symmetries=True)
total_field = structure.get_field_combination(amplitudes, Linf_normalization=True) * mutliplicative_factor
pts[:, -1] = total_field.T.ravel()
plotter.update_coordinates(pts, render=True)
plotter.update_scalars(total_field.T.ravel(), render=False)
plotter.add_title(f'ITR: {itr: .3f}\t z: {z: .3e}', font='courier', color='w', font_size=20)
plotter.write_frame()
plotter.close()
def _sorting_modes_(self, *ordering_list) -> None:
"""
Generic mode sorting method
:param ordering_parameters: The ordering list to sort the supermodes
:type ordering_parameters: list
"""
order = numpy.lexsort(ordering_list)
supermodes = [self.supermodes[idx] for idx in order]
for n, supermode in enumerate(supermodes):
supermode.mode_number = n
return supermodes
def sorting_modes_beta(self) -> None:
"""
Re-order modes to sort them in descending value of propagation constant.
"""
return self._sorting_modes_([-mode.beta[-1] for mode in self.supermodes])
def sorting_modes(self, *, sorting_method: str = "beta", keep_only: int = None) -> None:
"""
Re-order modes according to a sorting method, either "beta" or "symmetry+beta".
The final mode selection will also be filter to keep only a certain number of modes
"""
assert sorting_method.lower() in ["beta", "symmetry+beta"], \
f"Unrecognized sortingmethod: {sorting_method}, accepted values are ['beta', 'symmetry+beta']"
match sorting_method.lower():
case "beta":
supermodes = self.sorting_modes_beta()
case "symmetry+beta":
supermodes = self.sorting_modes_solver_beta()
self.all_supermodes = supermodes
self.supermodes = supermodes[:keep_only]
def sorting_modes_solver_beta(self):
"""
Re-order modes to sort them in with two parameters:
ascending cpp_solver number and descending value of propagation constant.
"""
return self._sorting_modes_(
[-mode.beta[-1] for mode in self.supermodes],
[mode.solver_number for mode in self.supermodes],
)
def _interpret_itr_slice_list_(self, *, slice_list: list, itr_list: list):
slice_list = [*slice_list, *self.itr_to_slice(itr_list=itr_list)]
if len(slice_list) == 0:
slice_list = [0, -1]
itr_list = self.slice_to_itr(slice_list)
itr_list = numpy.sort(itr_list)[::-1]
return self.itr_to_slice(itr_list=itr_list), itr_list
@staticmethod
def single_plot(plot_function):
def wrapper(self, *args, **kwargs):
figure = SceneList(unit_size=(16, 6), ax_orientation='vertical')
figure.append_ax()
plot_function(self, ax=figure[0], *args, **kwargs)
return figure
return wrapper
@single_plot
def plot_index(self, ax: Axis, show_crossings: bool = False, mode_of_interest: list[SuperMode] = 'all') -> SceneList:
"""
Plot effective index for each mode as a function of itr
:returns: figure instance, to plot the show() method.
:rtype: SceneList
"""
ax.set_style(**plot_style.index)
mode_of_interest = self.interpret_mode_of_interest(mode_of_interest)
for mode in mode_of_interest:
y = mode.index.get_values()
artist = Line(
x=self.itr_list,
y=y,
label=f'{mode.stylized_label}'
)
ax.add_artist(artist)
if show_crossings:
crossings = self.get_index_crossing(mode_of_interest=mode_of_interest)
for crossing in crossings.values():
artist = Scatter(
x=crossing['itr'],
y=crossing['index'],
marker='o',
color='black',
marker_size=5,
label='mode crossing'
)
ax.add_artist(artist)
@single_plot
def plot_beta(self, ax: Axis, show_crossings: bool = False, mode_of_interest: list[SuperMode] = 'all') -> SceneList:
"""
Plot propagation constant for each mode as a function of itr
:returns: figure instance, to plot the show() method.
:rtype: SceneList
"""
ax.set_style(**plot_style.beta)
mode_of_interest = self.interpret_mode_of_interest(mode_of_interest)
for mode in mode_of_interest:
y = mode.beta.get_values()
artist = Line(
x=self.itr_list,
y=y,
label=f'{mode.stylized_label}'
)
ax.add_artist(artist)
if show_crossings:
crossings = self.get_beta_crossing()
for crossing in crossings.values():
artist = Scatter(
x=crossing['itr'],
y=crossing['beta'],
marker='o',
color='black',
marker_size=5
)
ax.add_artist(artist)
@single_plot
def plot_eigen_value(self, ax: Axis, mode_of_interest: list[SuperMode] = 'all') -> SceneList:
"""
Plot propagation constant for each mode as a function of itr
:returns: figure instance, to plot the show() method.
:rtype: SceneList
"""
ax.set_style(**plot_style.eigen_value)
mode_of_interest = self.interpret_mode_of_interest(mode_of_interest)
for mode in mode_of_interest:
y = mode.eigen_value.get_values()
artist = Line(x=self.itr_list, y=y, label=f'{mode.stylized_label}')
ax.add_artist(artist)
@single_plot
def plot_normalized_coupling(self,
ax: Axis,
mode_of_interest: list = 'all',
mode_selection='pairs') -> SceneList:
"""
Plot coupling value for each mode as a function of itr
:param mode_of_interest: List of the mode that are to be considered in the adiabatic criterion plotting.
:type mode_of_interest: list
:returns: figure instance, to plot the show() method.
:rtype: SceneList
"""
ax.set_style(**plot_style.normalized_coupling)
combination = self.interpret_combinations(
mode_of_interest=mode_of_interest,
mode_selection=mode_selection
)
for mode_0, mode_1 in combination:
if mode_0.is_computation_compatible(mode_1):
y = numpy.abs(mode_0.normalized_coupling.get_values(other_supermode=mode_1))
artist = Line(x=self.itr_list, y=y, label=f'{mode_0.stylized_label} - {mode_1.stylized_label}')
ax.add_artist(artist)
@single_plot
def plot_overlap(self,
ax: Axis,
mode_of_interest: list = 'all',
mode_selection='pairs') -> SceneList:
"""
Plot overlap value for each mode as a function of itr
:param mode_of_interest: List of the mode that are to be considered in the adiabatic criterion plotting.
:type mode_of_interest: list
:returns: figure instance, to plot the show() method.
:rtype: SceneList
"""
ax.set_style(**plot_style.overlap)
combination = self.interpret_combinations(
mode_of_interest=mode_of_interest,
mode_selection=mode_selection
)
for mode_0, mode_1 in combination:
if mode_0.is_computation_compatible(mode_1):
y = mode_0.overlap.get_values(other_supermode=mode_1)
artist = Line(x=self.itr_list, y=y, label=f'{mode_0.stylized_label} - {mode_1.stylized_label}')
ax.add_artist(artist)
@single_plot
def plot_beating_length(self,
ax: Axis,
mode_of_interest: list = 'all',
mode_selection='pairs',
add_profile: list[AlphaProfile] = [],
core_radius: float = None) -> SceneList:
"""
Plot coupling value for each mode as a function of itr
:param mode_of_interest: List of the mode that are to be considered in the adiabatic criterion plotting.
:type mode_of_interest: list
:returns: figure instance, to plot the show() method.
:rtype: SceneList
"""
ax.set_style(**plot_style.beating_length)
combination = self.interpret_combinations(
mode_of_interest=mode_of_interest,
mode_selection=mode_selection
)
for mode_0, mode_1 in combination:
if mode_0.is_computation_compatible(mode_1):
y = mode_0.beating_length.get_values(other_supermode=mode_1)
artist = Line(x=self.itr_list, y=y, label=f'{mode_0.stylized_label} - {mode_1.stylized_label}')
ax.add_artist(artist)
for profile in numpy.atleast_1d(add_profile):
profile._render_taper_length_scale_vs_itr_on_ax_(ax=ax, core_radius=core_radius)
@single_plot
def plot_adiabatic(self,
ax: Axis,
mode_of_interest: list[SuperMode] = 'all',
mode_selection: str = 'pairs',
add_profile: list[AlphaProfile] = []) -> SceneList:
"""
Plot adiabatic criterion for each mode as a function of itr
:param pair_of_interest: List of the mode that are to be considered in the adiabatic criterion plotting.
:type pair_of_interest: list
:param mode_selection: The type of combination to be plotted, either 'specific/all/pairs'
:type mode_selection: str
:returns: figure instance, to plot the show() method.
:rtype: SceneList
"""
ax.set_style(**plot_style.adiabatic)
combination = self.interpret_combinations(
mode_of_interest=mode_of_interest,
mode_selection=mode_selection
)
for mode_0, mode_1 in combination:
if mode_0.is_computation_compatible(mode_1):
y = mode_0.adiabatic.get_values(other_supermode=mode_1)
artist = Line(x=self.itr_list, y=y, label=f'{mode_0.stylized_label} - {mode_1.stylized_label}')
ax.add_artist(artist)
for profile in numpy.atleast_1d(add_profile):
profile._render_adiabatic_factor_vs_itr_on_ax_(ax)
@single_plot
def plot_normalized_adiabatic(self,
ax: Axis,
mode_of_interest: list = 'all',
mode_selection: str = 'pairs') -> SceneList:
"""
Plot adiabatic criterion for each mode as a function of itr
:param pair_of_interest: List of the mode that are to be considered in the adiabatic criterion plotting.
:type pair_of_interest: list
:param mode_selection: The type of combination to be plotted, either 'specific/all/pairs'
:type mode_selection: str
:returns: figure instance, to plot the show() method.
:rtype: SceneList
"""
ax.set_style(**plot_style.adiabatic)
combination = self.interpret_combinations(
mode_of_interest=mode_of_interest,
mode_selection=mode_selection
)
for mode_0, mode_1 in combination:
if mode_0.is_computation_compatible(mode_1):
n0 = mode_0.index._data
n1 = mode_1.index._data
beating_length = mode_0.wavelength / abs(n0 - n1)
y = mode_0.adiabatic.get_values(other_supermode=mode_1) * beating_length
artist = Line(x=self.itr_list, y=y, label=f'{mode_0.stylized_label} - {mode_1.stylized_label}')
ax.add_artist(artist)
def interpret_combinations(self, mode_of_interest: list, mode_selection: str):
mode_of_interest = self.interpret_mode_of_interest(mode_of_interest)
combination = self.interpret_mode_selection(
mode_of_interest=mode_of_interest,
mode_selection=mode_selection
)
return combination
def is_compute_compatible(self, pair_of_mode: tuple) -> bool:
"""
Determines whether the specified pair of mode is compatible for computation.
:param pair_of_mode: The pair of mode
:type pair_of_mode: tuple
:returns: True if the specified pair of mode is compute compatible, False otherwise.
:rtype: bool
"""
mode_0, mode_1 = pair_of_mode
return mode_0.is_computation_compatible(mode_1)
def remove_duplicate_combination(self, supermodes_list: list) -> list[SuperMode]:
"""
Removes a duplicate combination in the mode combination list irrespectively of the order.
:param supermodes_list: The supermodes list
:type supermodes_list: list
:returns: The reduced supermode list
:rtype: list
"""
output_list = []
for mode0, mode1 in supermodes_list:
if (mode0, mode1) not in output_list and (mode1, mode0) not in output_list:
output_list.append((mode0, mode1))
return output_list
def interpret_mode_selection(self, mode_of_interest: list, mode_selection: str):
"""
Interpret user input for mode selection and return the combination of mode to consider.
:param mode_of_interest: The mode of interest
:type mode_of_interest: list
:param mode_selection: The mode selection
:type mode_selection: str
"""
test_valid_input(
variable_name='mode_selection',
user_input=mode_selection,
valid_inputs=['pairs', 'specific']
)
match mode_selection:
case 'pairs':
mode_combinations = product(mode_of_interest, mode_of_interest)
case 'specific':
mode_combinations = product(mode_of_interest, self.supermodes)
mode_combinations = filter(self.is_compute_compatible, mode_combinations)
mode_combinations = self.remove_duplicate_combination(mode_combinations)
return set(mode_combinations)
def interpret_mode_of_interest(self, mode_of_interest: list) -> list:
if isinstance(mode_of_interest, SuperMode):
return [mode_of_interest]
if isinstance(mode_of_interest, list):
return mode_of_interest
test_valid_input(
variable_name='mode_of_interest',
user_input=mode_of_interest,
valid_inputs=['all', 'fundamental', 'non-fundamental']
)
match mode_of_interest:
case 'fundamental':
mode_of_interest = self.fundamental_supermodes
case 'non-fundamental':
mode_of_interest = self.non_fundamental_supermodes
case 'all':
mode_of_interest = self.supermodes
return mode_of_interest
def plot_field(self, mode_of_interest: list = 'all',
itr_list: list[float] = [],
slice_list: list[int] = [],
show_mode_label: bool = True,
show_itr: bool = True,
show_slice: bool = True) -> SceneList:
"""
Plot each of the mode field for different itr value or slice number.
:param itr_list: List of itr value to evaluate the mode field
:type itr_list: list
:param slice_list: List of integer reprenting the slice where the mode field is evaluated
:type slice_list: list
:returns: The figure
:rtype: SceneList
"""
figure = Scene2D(unit_size=(3, 3))
slice_list, itr_list = self._interpret_itr_slice_list_(slice_list=slice_list, itr_list=itr_list)
mode_of_interest = self.interpret_mode_of_interest(mode_of_interest=mode_of_interest)
for m, mode in enumerate(mode_of_interest):
for n, (itr, slice_number) in enumerate(zip(itr_list, slice_list)):
title = self.get_plot_mode_field_title(
supermode=mode,
itr=itr,
slice_number=slice_number,
show_mode_label=show_mode_label,
show_itr=show_itr,
show_slice=show_slice
)
ax = Axis(
row=n,
col=m,
title=title
)
ax.colorbar = ColorBar(symmetric=True, position='right')
field = mode.field.get_field_mesh(slice_number=slice_number, add_symmetries=True)
x, y = mode.field.get_scaled_axis(slice_number=slice_number)
artist = Mesh(x=x, y=y, scalar=field, colormap=colormaps.blue_black_red)
ax.add_artist(artist)
ax.set_style(**plot_style.field)
figure.add_axes(ax)
return figure
def get_plot_mode_field_title(self, supermode: SuperMode, itr: float, slice_number: int, show_mode_label: bool, show_itr: bool, show_slice: bool) -> str:
"""
Gets the title for the plot_field outputed subplots.
:param supermode: The supermode corresponding to the specific subplot.
:type supermode: SuperMode
:param itr: The itr value
:type itr: float
:param slice_number: The slice number
:type slice_number: int
:param show_mode_label: If True the mode label will be shown.
:type show_mode_label: bool
:param show_itr: If True the title contains the itr value.
:type show_itr: bool
:param show_slice: If True the title contains the slice number of the evaluated ITR
:type show_slice: bool
:returns: The plot mode field title.
:rtype: str
"""
title = ''
if show_mode_label:
title += f'{supermode.stylized_label}'
if show_itr or show_slice:
title += '\n'
if show_slice:
title += f'slice: {slice_number}'
if show_itr:
title += f' itr: {itr:.3f}'
return title
def plot(self, plot_type: str, **kwargs) -> SceneList:
"""
Generic plot function.
Args:
type: Plot type ['index', 'beta', 'adiabatic', 'normalized-adiabatic', 'overlap', 'coupling', 'field', 'beating-length']
"""
test_valid_input(
variable_name='plot_type',
user_input=plot_type,
valid_inputs=['index', 'beta', 'eigen_value', 'adiabatic', 'overlap' 'normalized-adiabatic', 'normalized-coupling', 'field', 'beating-length']
)
match plot_type.lower():
case 'index':
return self.plot_index(**kwargs)
case 'beta':
return self.plot_beta(**kwargs)
case 'eigen-value':
return self.plot_eigen_value(**kwargs)
case 'normalized-coupling':
return self.plot_normalized_coupling(**kwargs)
case 'overlap':
return self.plot_overlap(**kwargs)
case 'adiabatic':
return self.plot_adiabatic(**kwargs)
case 'field':
return self.plot_field(**kwargs)
case 'beating-length':
return self.plot_beating_length(**kwargs)
case 'normalized-adiabatic':
return self.plot_normalized_adiabatic(**kwargs)
def generate_pdf_report(self,
filename: str = "report",
directory: str = '.',
itr_list: list[float] = [],
slice_list: list[int] = [],
dpi: int = 200,
mode_of_interest: list = 'all',
mode_selection: str = 'specific') -> None:
"""
Generate a full report of the coupler properties as a .pdf file
:param filename: Name of the Report file to be outputed.
:type filename: str
:param itr_list: List of itr value to evaluate the mode field.
:type itr_list: Array
:param slice_list: List of slice value to evaluate the mode field.
:type slice_list: Array
:param dpi: Pixel density for the image included in the report.
:type dpi: int
:param mode_of_interest: List of the mode that are to be considered in the adiabatic criterion plotting.
:type mode_of_interest: list
:returns: { description_of_the_return_value }
:rtype: None
"""
if directory == 'auto':
directory = directories.reports_path
filename = Path(directory).joinpath(filename).with_suffix('.pdf')
logging.info(f"Saving report pdf into: {filename}")
figures = []
figures.append(self.geometry.plot()._render_())
figures.append(self.plot_field(itr_list=itr_list, slice_list=slice_list)._render_())
figures.append(self.plot_index()._render_())
figures.append(self.plot_beta()._render_())
figures.append(self.plot_normalized_coupling(mode_of_interest=mode_of_interest, mode_selection=mode_selection)._render_())
figures.append(self.plot_adiabatic(mode_of_interest=mode_of_interest, mode_selection=mode_selection)._render_())
Multipage(filename, figs=figures, dpi=dpi)
for figure in figures:
figure.close()
def save_instance(self, filename: str, directory: str = '.'):
"""
Saves the superset instance as a serialized pickle file.
:param filename: The directory where to save the file, 'auto' options means the superset_instance folder
:type filename: str
:param filename: The filename
:type filename: str
"""
if directory == 'auto':
directory = directories.instance_directory
filename = Path(directory).joinpath(filename).with_suffix('.pickle')
logging.info(f"Saving pickled superset into: {filename}")
with open(filename, 'wb') as output_file:
pickle.dump(self, output_file, pickle.HIGHEST_PROTOCOL)
def get_beta_crossing(self, mode_of_interest: list) -> dict:
"""
Returns a dictionnay of the beta mode-crossing, meaning points where the modes propagation
constant do cross.
:returns: The mode crossing dictionnary.
:rtype: dict
"""
output_dictionnary = {}
combination = self.interpret_combinations(
mode_of_interest=mode_of_interest,
mode_selection='pairs'
)
n_crossing = 0
for mode_0, mode_1 in combination:
itr, beta = get_intersection(
x=self.itr_list,
y0=mode_0.beta.get_values(),
y1=mode_1.beta.get_values(),
average=True
)
if len(beta) != 0:
output_dictionnary[n_crossing] = {
'mode0': mode_0,
'mode1': mode_1,
'itr': itr,
'beta': beta
}
n_crossing += 1
return output_dictionnary
def get_index_crossing(self, mode_of_interest: list) -> dict:
"""
Returns a dictionnay of the beta mode-crossing, meaning points where the modes propagation
constant do cross.
:returns: The mode crossing dictionnary.
:rtype: dict
"""
output_dictionnary = {}
combination = self.interpret_combinations(
mode_of_interest=mode_of_interest,
mode_selection='pairs'
)
n_crossing = 0
for mode_0, mode_1 in combination:
itr, index = get_intersection(
x=self.itr_list,
y0=mode_0.index.get_values(),
y1=mode_1.index.get_values(),
average=True
)
if len(index) != 0:
output_dictionnary[n_crossing] = {
'mode0': mode_0,
'mode1': mode_1,
'itr': itr,
'index': index
}
n_crossing += 1
return output_dictionnary
# def __getattribute__(self, key: str):
# print(key, key.__class__)
# for supermode in super().__getattribute__('supermodes'):
# if supermode.label == key:
# return supermode
# return super().__getattribute__(key)
# -
|
[
"Martin.Poinsinet.de.Sivry@gmail.com"
] |
Martin.Poinsinet.de.Sivry@gmail.com
|
1b500ae5665ebddb4b225dc5d03bf80bd29675de
|
55f2ab76334f13b80632e55010f0e846369d86f8
|
/view.py
|
3568ad8fe3d26dfb58768e0746e2d0275ef3f63a
|
[] |
no_license
|
fakhouri-junior/ImageSearch
|
7ad7b8024fed7df079a50a31ea86347d214b0803
|
681da9ace0cbbfec699934a8d9fd8b10b4672cb9
|
refs/heads/master
| 2021-09-09T04:04:05.092097
| 2018-03-13T18:13:35
| 2018-03-13T18:13:35
| 125,086,380
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,077
|
py
|
from flask import Flask, render_template, request, redirect,jsonify, url_for, flash
import urllib
import random, string
from flask_mail import Mail, Message
from model import Prediction, FinalResult
from core_classification import *
import time
import numpy as np
from multiprocessing.pool import ThreadPool
app = Flask(__name__)
"""
WORK WITH THE NUMPY ARRAY IN A THREAD, TRY TO MAKE IT READY FOR WORK BEFORE NEEDING IT
"""
pool = ThreadPool(processes=5)
async_result = pool.apply_async(load_array,())
@app.route('/')
@app.route('/index')
def homePage():
return render_template("index.html")
@app.route('/api/v1/<path:url_path>')
def myAPI(url_path):
# 2- download the image
path_of_image = downloadImage(url_path)
if path_of_image.startswith("Non"):
return render_template('error.html')
# 3- get the path of the image .... DONE IN STEP #2
# 4- form the proper main method for core_classification
result = mainWeb(path_of_image)
# 5- display the output
# 5'- slice the result into useful parts
parts = result.split('\n')
# 6- extract info from the parts to form the FinalResult object and jsonify it
predictions = []
for part in parts:
part = part.strip()
if part.startswith("Prediction") or part == "":
continue
myParts = part.split(' ')
print myParts
percentage = myParts[0]#percentage
value = myParts[2]#value prediction
prediction = Prediction(percentage,value)
predictions.append(prediction)
finalResult = FinalResult(url_path,predictions)
return jsonify(Result=finalResult.serialize)
@app.route('/classify', methods=['POST'])
def classifyImage():
if request.method == 'POST':
# 1- get the url
if request.form['url']:
url_path = request.form['url']
# 2- download the image
path_of_image = downloadImage(url_path)
if path_of_image.startswith("Non"):
return render_template('error.html')
# 3- get the path of the image .... DONE IN STEP #2
# 4- form the proper main method for core_classification
result = mainWeb(path_of_image)
# 5- display the output
# 5'- slice the result into useful parts
# list of percentages
predictions = []
parts = result.split('\n')
for part in parts:
part = part.strip()
if part.startswith("Prediction") or part == "":
continue
myParts = part.split(' ')
percentage = myParts[0] # percentage
value = myParts[2] # value prediction
prediction = Prediction(percentage, value)
predictions.append(prediction)
path_parts = path_of_image.split('/')
betterPath = path_parts[5]+"/"+path_parts[6]+"/"+path_parts[7]
"""
RETRIEVAL CODE GOES HERE
"""
script_start_time = time.time()
features_array = async_result.get()
print 'async_result took %f ' % (time.time() - script_start_time,)
list_of_paths = findSimilar([path_of_image], features_array)
print list_of_paths
# parse the location from disk to a location in server
server_similar_images_parsed = parseImageSimilarPath(list_of_paths)
# pass list_of_paths which represents the similar images to the template
#return render_template('result.html', result=predictions, path_of_image=betterPath)
return render_template('result.html', result=predictions, path_of_image=betterPath, similar=server_similar_images_parsed)
else:
return "REJECTED"
def downloadImage(url):
# sanity checks
# 1- check for http and .jpg or .png
if url.startswith("http") and (url.endswith(".jpg") or url.endswith(".png")):
resource = urllib.urlopen(url)
jebrish = ''.join(random.choice(string.ascii_uppercase + string.digits) for x in xrange(20))
if url.endswith(".png"):
output = open("/home/salim/PycharmProjects/ImageSearch/static/images/" + jebrish + ".png", "wb")
else:
output = open("/home/salim/PycharmProjects/ImageSearch/static/images/" + jebrish + ".jpg", "wb")
output.write(resource.read())
output.close()
# return the path of the saved image
return "/home/salim/PycharmProjects/ImageSearch/static/images/"+jebrish+".jpg"
else:
return "NonValid"
def parseImageSimilarPath(list_of_images):
new_list = []
for image_path in list_of_images:
parts_slash = image_path.split('/')
# '/home/salim/Downloads/test/tshirt1.jpg'
name_of_image = parts_slash[5] # what we need
new_list.append("/static/images_similar/"+name_of_image)
return new_list
if __name__ == '__main__':
app.secret_key = 'super_secret_key'
app.debug = True
app.run(host = '0.0.0.0', port = 5000)
|
[
"noreply@github.com"
] |
fakhouri-junior.noreply@github.com
|
10b6910d59ae321ecacd90b79a73471e32d301e7
|
2b167e29ba07e9f577c20c54cb943861d0ccfa69
|
/numerical_analysis_backup/large-scale-multiobj2/core-arch2-guard0-beta100-hebbe/pareto6.py
|
4ad77b5ac1b8c19557dfa96eb5e314381e537b2a
|
[] |
no_license
|
LiYan1988/kthOld_OFC
|
17aeeed21e195d1a9a3262ec2e67d6b1d3f9ff0f
|
b1237577ea68ad735a65981bf29584ebd889132b
|
refs/heads/master
| 2021-01-11T17:27:25.574431
| 2017-01-23T05:32:35
| 2017-01-23T05:32:35
| 79,773,237
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,395
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Thu Aug 4 15:15:10 2016
@author: li
optimize both throughput and connections
"""
#import sys
#sys.path.insert(0, '/home/li/Dropbox/KTH/numerical_analysis/ILPs')
import csv
from gurobipy import *
import numpy as np
from arch2_decomposition_new import Arch2_decompose
np.random.seed(2010)
num_cores=10
num_slots=320
i = 6
time_limit_routing = 3600
time_limit_sa = 10800
filename = 'traffic_matrix_pod250_load50_'+str(i)+'.csv'
tm = []
with open(filename) as f:
reader = csv.reader(f)
for idx, row in enumerate(reader):
row = [float(u) for u in row]
tm.append(row)
tm = np.array(tm)
#%% arch2
corev = np.array([1, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20])
#corev = np.array([1, 2])
connection_ub = []
throughput_ub = []
obj_ub = []
connection_lb = []
throughput_lb = []
obj_lb = []
connection_he = []
throughput_he = []
obj_he = []
for c in corev:
m = Arch2_decompose(tm, num_slots=num_slots, num_cores=c,
alpha=1,beta=100, num_guard_slot=0)
m.create_model_routing(mipfocus=1,timelimit=7200,mipgap=0.01, method=2)
connection_ub.append(m.connection_ub_)
throughput_ub.append(m.throughput_ub_)
obj_ub.append(m.obj_ub_)
# m.create_model_sa(mipfocus=1,timelimit=10800,mipgap=0.01, method=2,
# SubMIPNodes=2000, heuristics=0.8)
# connection_lb.append(m.connection_lb_)
# throughput_lb.append(m.throughput_lb_)
# obj_lb.append(m.obj_lb_)
# m.write_result_csv('cnklist_lb_%d_%d.csv'%(i,c), m.cnklist_lb)
connection_lb.append(0)
throughput_lb.append(0)
obj_lb.append(0)
m.heuristic()
connection_he.append(m.obj_heuristic_connection_)
throughput_he.append(m.obj_heuristic_throughput_)
obj_he.append(m.obj_heuristic_)
m.write_result_csv('cnklist_heuristic_i%d_c%d.csv'%(i,c), m.cnklist_heuristic_)
result = np.array([corev,
connection_ub,throughput_ub,obj_ub,
connection_lb,throughput_lb,obj_lb,
connection_he,throughput_he,obj_he]).T
file_name = "result_pareto_arch2_old_pod100_i{}.csv".format(i)
with open(file_name, 'w') as f:
writer = csv.writer(f, delimiter=',')
writer.writerow(['#cores', 'connection_ub', 'throughput_ub',
'obj_ub', 'connection_lb', 'throughput_lb', 'obj_lb',
'connection_he', 'throughput_he', 'obj_he'])
writer.writerows(result)
|
[
"li.yan.ly414@gmail.com"
] |
li.yan.ly414@gmail.com
|
af56eb2199b94de3f5eae3ad06f929398286bfb7
|
7e7f08d947c1162fc5978914c0b7333502c783b9
|
/python_tutorial/polls/migrations/0001_initial.py
|
95d7307aa65960d3c1fd15cb41bd3e3b8b2d9fdd
|
[] |
no_license
|
shundesu/study_python
|
a052618137262e177fb7d6c8dc6928637090c12a
|
bcdf52c2200c6c3cf63bde5a0cacd7ee2db621e9
|
refs/heads/master
| 2020-04-24T05:09:56.050008
| 2019-02-20T18:29:31
| 2019-02-20T18:29:31
| 171,727,470
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,165
|
py
|
# Generated by Django 2.1.5 on 2019-01-07 15:43
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Choice',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('choice_text', models.CharField(max_length=200)),
('votes', models.IntegerField(default=0)),
],
),
migrations.CreateModel(
name='Question',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('question_text', models.CharField(max_length=200)),
('pub_date', models.DateTimeField(verbose_name='date published')),
],
),
migrations.AddField(
model_name='choice',
name='question',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='polls.Question'),
),
]
|
[
"my.instant.ramen568@gmail.com"
] |
my.instant.ramen568@gmail.com
|
6c2fd404feebc2e0e08b6077c268bd624a020c61
|
62251173a123ef987a4ba8b34a52987d1001d8a7
|
/app/routes/items.py
|
064bb65ef41c27f6625f4d1d744e154c86cacefc
|
[] |
no_license
|
tianhuizhou/FlaskRestful-p2
|
e4fea0645b92060a9eb6bbb19c89b77e5096e546
|
50af3c7cc6e61c1704ca63dda69098cb788fb682
|
refs/heads/master
| 2023-05-04T17:43:23.227784
| 2021-05-20T09:53:48
| 2021-05-20T09:53:48
| 369,152,076
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,643
|
py
|
from flask import Blueprint, jsonify, request
from app.database.item import Item
from flask_jwt import jwt_required
module = Blueprint('items', __name__, url_prefix='/api/items')
@module.route('', methods=['GET'])
@jwt_required()
def index():
return {'items': list(map(lambda x: x.json(), Item.query.all()))}
@module.route('/<item_name>',methods=['GET'])
def show(item_name):
item = Item.find_by_name(item_name)
if item:
return item.json()
return jsonify({'message': 'Item not found'}), 404
@module.route('',methods=['POST'])
@jwt_required()
def create():
request_data = request.get_json()
try:
if Item.find_by_name(request_data['name']):
return {'message': "An item with name '{}' already exists.".format(name)}, 400
except:
return {"message": "the field of name is required"}
try:
new_item = Item(**request_data)
new_item.save_to_db()
return new_item.json(), 201
except:
return {"message": "An error occurred inserting the item."}, 500
@module.route('/<item_id>',methods=['DELETE'])
@jwt_required()
def delete(item_id):
item = Item.find_by_id(item_id)
if item:
item.delete_from_db()
return jsonify({'message': 'Item has been deleted'})
return jsonify({'message': 'Item not found.'}), 404
@module.route('<item_name>',methods=['PUT'])
@jwt_required()
def update(item_name):
request_data = request.get_json()
item = Item.find_by_name(item_name)
if item:
item.price = request_data['price']
else:
item = Item(request_data)
item.save_to_db()
return item.json()
|
[
"tianhui@hawaii.edu"
] |
tianhui@hawaii.edu
|
6f0f9d1289c37416d382d33a1d10309de7a567cd
|
e0592ee35c8d7923d63e90417f11234f3bf4c26b
|
/mapc2/scripts/get_list_of_mlab_providers.py
|
83c98b8de91f9b5430630ea3d247acc1cb6ae7fe
|
[] |
no_license
|
e-south/CS506Spring2021Repository
|
82e786edb9cede918f02946e9465a88918cc699c
|
5f9864eb4f1b0fa9bce33ed851584dd95f8ecd8f
|
refs/heads/master
| 2023-04-16T02:45:35.936057
| 2021-04-29T01:57:25
| 2021-04-29T13:35:47
| 339,234,954
| 1
| 2
| null | 2021-04-20T18:07:38
| 2021-02-15T23:26:26
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 751
|
py
|
# In this script, we extract out a list of all the Providers present in the MLAB data
#
# Author: Nathan Lauer
# Please feel free to ask me any questions, I hope you're having a nice day!
import pandas as pd
import argparse
########## Main execution flow
parser = argparse.ArgumentParser(description='Map AS Numbers to Provider Name')
parser.add_argument('--mlab-data', dest='mlab_data', help='The input MLAB data csv file')
parser.add_argument('--output', dest='output_file', help='Output file to write to')
args = parser.parse_args()
# Read the MLAB data
df = pd.read_csv(args.mlab_data)
providers = df.ProviderName.unique()
providers_df = pd.DataFrame(data=providers, columns=['ProviderName'])
providers_df.to_csv(args.output_file, index=False)
|
[
"lauern91@gmail.com"
] |
lauern91@gmail.com
|
5f0b559e80eb4c98096bc1b141255cb0359b131c
|
3722d884f8229968a549d90b62d3ae39f2da78b1
|
/python/IK.py
|
bb6b365d4c8e73516dae6111e7ebcd22e2cf3216
|
[] |
no_license
|
Roboy/BulletUnityRoboy
|
7579bb70de1b4aa19f3d7ce3bac969fe9b4aebd0
|
bb09175b42b978a4a53648df8af9377d66027ee8
|
refs/heads/master
| 2021-05-20T12:32:04.232639
| 2020-06-20T16:53:43
| 2020-06-20T16:53:43
| 252,296,978
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,937
|
py
|
import pybullet as p
import math
import time
p.connect(p.GUI)
ob = p.loadURDF("C:\\Users\\roboy\\Documents\\code\\roboy3_models\\upper_body\\bullet.urdf", useFixedBase=1)
p.setGravity(0,0,-10)
t = 0.
prevPose = [0, 0, 0]
prevPose1 = [0, 0, 0]
hasPrevPose = 0
useNullSpace = 0
useOrientation = 0
#This can be used to test the IK result accuracy.
useSimulation = 1
useRealTimeSimulation = 1
ikSolver = 0
p.setRealTimeSimulation(useRealTimeSimulation)
#trailDuration is duration (in seconds) after debug lines will be removed automatically
#use 0 for no-removal
trailDuration = 15
numJoints = p.getNumJoints(ob)
freeJoints = []
for i in range(numJoints):
info = p.getJointInfo(ob,i)
if info[2] == p.JOINT_REVOLUTE:
freeJoints.append(i)
if info[12] == b'hand_right':
endEffectorId = i;
print("EF id: " + str(i))
break
def accurateCalculateInverseKinematics(ob, endEffectorId, targetPos, threshold, maxIter):
closeEnough = False
iter = 0
dist2 = 1e30
while (not closeEnough and iter < maxIter):
jointPoses = p.calculateInverseKinematics(ob, endEffectorId, targetPos)
#import pdb; pdb.set_trace()
for i in range(len(freeJoints)):
p.resetJointState(ob, freeJoints[i], jointPoses[i])
ls = p.getLinkState(ob, endEffectorId)
newPos = ls[4]
diff = [targetPos[0] - newPos[0], targetPos[1] - newPos[1], targetPos[2] - newPos[2]]
dist2 = (diff[0] * diff[0] + diff[1] * diff[1] + diff[2] * diff[2])
closeEnough = (dist2 < threshold)
iter = iter + 1
#print ("Num iter: "+str(iter) + "threshold: "+str(dist2))
return jointPoses
while True:
if (useRealTimeSimulation):
t = time.time() #(dt, micro) = datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S.%f').split('.')
#t = (dt.second/60.)*2.*math.pi
else:
t = t + 0.001
if (useSimulation and useRealTimeSimulation == 0):
p.stepSimulation()
pos = [0.2 * math.cos(t)-0.4, -0.4, 0. + 0.2 * math.sin(t) + 0.7]
threshold = 0.001
maxIter = 100
jointPoses = accurateCalculateInverseKinematics(ob, endEffectorId, pos,
threshold, maxIter)
if (useSimulation):
for i in range(len(freeJoints)):
p.setJointMotorControl2(bodyIndex=ob,
jointIndex=freeJoints[i],
controlMode=p.POSITION_CONTROL,
targetPosition=jointPoses[i],
targetVelocity=0,
force=100,
positionGain=1,
velocityGain=0.1)
ls = p.getLinkState(ob, endEffectorId)
if (hasPrevPose):
p.addUserDebugLine(prevPose, pos, [0, 0, 0.3], 1, trailDuration)
p.addUserDebugLine(prevPose1, ls[4], [1, 0, 0], 1, trailDuration)
prevPose = pos
prevPose1 = ls[4]
hasPrevPose = 1
|
[
"aliona.kharchenko@gmail.com"
] |
aliona.kharchenko@gmail.com
|
0f09b275880b051fdadc094ead0cfbd02ae60a9f
|
5b5c22a8c8cbc4baf184061817c57c8acb202f94
|
/docal/calculation.py
|
21246ab3f846c03984881daaa9b3974532e4ec42
|
[
"MIT"
] |
permissive
|
kirtan29/docal
|
fafb65f90a84174a07af9cdbb0db31fc7e099ad6
|
62368194402e4153fc6030cec80ec7e23a8df076
|
refs/heads/master
| 2023-07-21T12:20:24.043742
| 2021-07-16T18:21:28
| 2021-07-16T18:21:28
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 14,268
|
py
|
'''
module procedure
does the calculations needed, sets the appropriate variables in the main
module and returns the procedure of the calculations
'''
import ast
import logging
from .parsing import to_math, MathVisitor, eqn, UNIT_PF, build_eqn, _split, DEFAULT_MAT_SIZE
log = logging.getLogger(__name__)
# units that are not base units
DERIVED = {
'N': 'kg*m/s**2',
'Pa': 'kg/(m*s**2)',
'J': 'kg*m**2/s**2',
'W': 'kg*m**2/s**3',
'Hz': '_/s',
}
DERIVED = {u: ast.parse(DERIVED[u]).body[0].value for u in DERIVED}
FALLBACK_OPTIONS = {
'steps': [],
'mat_size': DEFAULT_MAT_SIZE,
'unit': None,
'mode': 'default',
'vert': True,
'note': None,
'hidden': False,
'decimal': 3,
'result' : None,
'newlines': 0,
}
def _calculate(expr: ast.AST, options: dict, working_dict: dict, mul=' ', div='/', syntax=None):
'''carryout the necesary calculations and assignments'''
lx_args = lambda ex, subs=None: MathVisitor(mul=mul,
div=div,
subs=subs,
mat_size=options['mat_size'],
decimal=options['decimal'],
working_dict=working_dict,
syntax=syntax
).visit(ex)
value_ast = expr if options['result'] is None else options['result']
value = eval(compile(ast.Expression(value_ast), '<calculation>', 'eval'),
working_dict)
result = [lx_args(expr)]
result_rest = [
lx_args(expr, True),
lx_args(value if not isinstance(value_ast, ast.Lambda) else value_ast)]
if options['steps']:
result += result_rest
result = [result[s] for s in options['steps'] if 0 <= s <= 2]
# remove repeated steps (retaining order)
elif isinstance(expr, ast.Constant) or isinstance(value_ast, ast.Lambda):
result = [result_rest[1]]
elif isinstance(expr, ast.Name):
result = [result[0], result_rest[1]]
else:
last_str = str(result[0])
for step in result_rest:
step_str = str(step)
if step_str != last_str:
last_str = step_str
result.append(step)
# detect if the user is trying to give a different unit and give warning
if options['unit']:
# in their dict forms
compared = [UnitHandler(True, working_dict).visit(options['unit']),
UnitHandler(False, working_dict).visit(expr)]
# when the calculated already has a unit
compared = [compared, [compared[1], [{}, {}]]]
# if it is detected, warn the user but accept it anyway
if not are_equivalent(*compared[1]) and not are_equivalent(*compared[0]):
log.warning(
'The input unit is not equivalent to the calculated one.')
else:
options['unit'] = unitize(expr, working_dict)
result[-1] += to_math(options['unit'], div='/', syntax=syntax, ital=False)
if options['note'] is not None:
result[-1] += syntax.txt(syntax.halfsp) + syntax.txt_math(options['note'])
return result
def _process_options(additionals, defaults=FALLBACK_OPTIONS, syntax=None):
options = {}
if additionals:
for a in _split(additionals, ','):
if a.isdigit():
options['steps'] = [int(num) - 1 for num in a]
# only the first # is used to split the line (see above) so others
elif a.startswith('#'):
note = a[1:].strip() # remove the hash
options['note'] = note[1:-1] \
if note.startswith('(') and note.endswith(')') else note
elif a.startswith('m') and a[1:].isdigit():
options['mat_size'] = (int(a[1:]), int(a[1:]))
elif a.startswith('d') and a[1:].isdigit():
options['decimal'] = int(a[1:])
elif a == '$':
options['mode'] = 'inline'
elif a == '$$':
options['mode'] = 'display'
elif a == '|':
options['vert'] = True
elif a == '-':
options['vert'] = False
elif a == ';':
options['hidden'] = True
elif a.startswith('='):
try:
options['result'] = ast.parse(a[1:]).body[0].value
except SyntaxError:
log.warning('Could not evaluate answer, using default')
elif set(a) == {'\\'}:
options['newlines'] = len(a)
elif a and a != '_':
# if it is a valid python expression, take it as a unit
try:
compile(a, '', 'eval')
except SyntaxError:
log.warning('Unknown option %s found, ignoring...', repr(a))
else:
options['unit'] = ast.parse(a).body[0].value
# merge the options, with the specific one taking precedence
return {**defaults, **options}
def cal(input_str: ast.AST, working_dict={}, mul=' ', div='frac', syntax=None, options={}) -> str:
'''
evaluate all the calculations, carry out the appropriate assignments,
and return all the procedures
'''
result = _calculate(input_str.value, options, working_dict, mul, div, syntax=syntax)
if options['mode'] == 'inline':
displ = False
elif options['mode'] == 'display':
displ = True
else:
if len(options['steps']) == 1 and options['steps'][0] == 0:
displ = False
else:
displ = True
disp = 'disp' if displ else 'inline'
if isinstance(input_str, ast.Assign):
var_names = [v.id for v in input_str.targets]
var_lx = syntax.txt('=').join([to_math(var_name, syntax=syntax) for var_name in input_str.targets])
procedure = [[var_lx, result[0]]]
for step in result[1:]:
procedure.append([syntax.txt(''), step])
if options['result'] is not None:
input_str.value = options['result'] # override the value stored
# carry out normal op in main script
co = compile(ast.Module([input_str], []), '<calculation>', 'exec')
exec(co, working_dict)
# for later unit retrieval
for var in var_names:
working_dict[var + UNIT_PF] = options['unit']
else:
if len(result) > 1:
procedure = [[result[0], result[1]]]
if result[2:]:
procedure.append([syntax.txt(''), result[2]])
else:
procedure = [result]
if options['hidden']:
return ('text', '')
output = build_eqn(procedure, displ, options['vert'], syntax)
return (disp, output)
class UnitHandler(ast.NodeVisitor):
'''
simplify the given expression as a combination of units
'''
def __init__(self, norm=False, working_dict={}):
self.norm = norm
self.dict = working_dict
def visit_Name(self, n):
if self.norm:
unit = n
else:
if n.id + UNIT_PF in self.dict:
unit = self.dict[n.id + UNIT_PF]
else:
unit = ast.parse('_').body[0].value
if isinstance(unit, ast.Name):
if unit.id in DERIVED:
unit = DERIVED[unit.id]
elif hasattr(n, 'upper') and not n.upper:
return [{}, {unit.id: 1}]
else:
return [{unit.id: 1}, {}]
unit.upper = n.upper if hasattr(n, 'upper') else True
# store and temporarily disregard the state self.norm
prev_norm = self.norm
self.norm = True
ls = self.visit(unit)
# revert to the previous state
self.norm = prev_norm
return ls
def visit_Call(self, n):
if isinstance(n.func, ast.Attribute):
func = n.func.attr
elif isinstance(n.func, ast.Name):
func = n.func.id
else:
func = self.visit(n.func)
if func == 'sqrt':
return self.visit(ast.BinOp(left=n.args[0], op=ast.Pow(), right=ast.Num(n=1/2)))
return [{}, {}]
def visit_BinOp(self, n):
if hasattr(n, 'upper') and not n.upper:
upper = False
else:
upper = True
n.left.upper = upper
left = self.visit(n.left)
if isinstance(n.op, ast.Pow):
if isinstance(n.right, ast.BinOp):
if isinstance(n.right.left, ast.Num) and isinstance(n.right, ast.Num):
if isinstance(n.right.op, ast.Add):
p = n.right.left.n + n.right.n
elif isinstance(n.right.op, ast.Sub):
p = n.right.left.n - n.right.n
elif isinstance(n.right.op, ast.Mult):
p = n.right.left.n * n.right.n
elif isinstance(n.right.op, ast.Div):
p = n.right.left.n / n.right.n
elif isinstance(n.right.op, ast.Pow):
p = n.right.left.n ** n.right.n
else:
# XXX
p = 1
elif isinstance(n.right, ast.UnaryOp):
if isinstance(n.right.operand, ast.Num):
if isinstance(n.right.op, ast.USub):
p = - n.right.operand.n
elif isinstance(n.right.op, ast.UAdd):
p = n.right.operand.n
elif isinstance(n.right, ast.Num):
p = n.right.n
else:
# XXX
p = 1
for u in left[0]:
left[0][u] *= p
for u in left[1]:
left[1][u] *= p
return left
elif isinstance(n.op, ast.Mult):
n.right.upper = upper
right = self.visit(n.right)
for u in right[0]:
if u in left[0]:
left[0][u] += right[0][u]
else:
left[0][u] = right[0][u]
for u in right[1]:
if u in left[1]:
left[1][u] += right[1][u]
else:
left[1][u] = right[1][u]
return left
elif isinstance(n.op, ast.Div):
n.right.upper = not upper
right = self.visit(n.right)
for u in right[0]:
if u in left[0]:
left[0][u] += right[0][u]
else:
left[0][u] = right[0][u]
for u in right[1]:
if u in left[1]:
left[1][u] += right[1][u]
else:
left[1][u] = right[1][u]
return left
elif isinstance(n.op, ast.Add) or isinstance(n.op, ast.Sub):
n.right.upper = upper
left = reduce(left)
right = reduce(self.visit(n.right))
if are_equivalent(left, right):
return left
log.warning('The units of the two sides are not equivalent.')
return [{}, {}]
def visit_UnaryOp(self, n):
return self.visit(n.operand)
def visit_Expr(self, n):
return self.visit(n.value)
def generic_visit(self, n):
return [{}, {}]
def unitize(s: ast.AST, working_dict={}) -> str:
'''
look for units of the variable names in the expression, cancel-out units
that can be canceled out and return an expression of units that can be
converted into latex using to_math
'''
ls = reduce(UnitHandler(False, working_dict).visit(s))
# the var names that are of units in the main dict that are not _
in_use = {working_dict[u] for u in working_dict
if u.endswith(UNIT_PF) and working_dict[u] != ast.Name(id='_')}
# var names in in_use whose values contain one of the DERIVED units
in_use = [u for u in in_use
if any([n.id in DERIVED
for n in [n for n in ast.walk(u) if isinstance(n, ast.Name)]])]
# search in reverse order to choose the most recently used unit
in_use.reverse()
# if this unit is equivalent to one of them, return that
for unit in in_use:
if are_equivalent(UnitHandler(True, working_dict).visit(unit), ls):
return unit
upper = "*".join([u if ls[0][u] == 1 else f'{u}**{ls[0][u]}'
for u in ls[0]])
lower = "*".join([u if ls[1][u] == 1 else f'{u}**{ls[1][u]}'
for u in ls[1]])
s_upper = f'({upper})' if ls[0] else "_"
s_lower = f'/({lower})' if ls[1] else ""
return ast.parse(s_upper + s_lower).body[0].value
def are_equivalent(unit1: dict, unit2: dict) -> bool:
'''
return True if the units are equivalent, False otherwise
'''
unit1, unit2 = reduce(unit1), reduce(unit2)
conditions = [
# the numerators have the same elements
set(unit1[0]) == set(unit2[0]) and \
# each item's value (power) is the same in both
all([unit1[0][e] == unit2[0][e] for e in unit1[0]]),
# the denominators have the same elements
set(unit1[1]) == set(unit2[1]) and \
# and each item's value (power) is the same in both
all([unit1[1][e] == unit2[1][e] for e in unit1[1]]),
]
return all(conditions)
def reduce(ls: list) -> list:
'''
cancel out units that appear in both the numerator and denominator, those
that have no name (_) and those with power of 0
'''
upper = {**ls[0]}
lower = {**ls[1]}
for u in ls[0]:
if u in ls[1]:
if upper[u] > lower[u]:
upper[u] -= lower[u]
del lower[u]
elif upper[u] < lower[u]:
lower[u] -= upper[u]
del upper[u]
else:
del upper[u]
del lower[u]
for u in {**upper}:
if upper[u] == 0 or u == '_':
del upper[u]
for u in {**lower}:
if lower[u] == 0 or u == '_':
del lower[u]
return [upper, lower]
|
[
"kidusadugna@gmail.com"
] |
kidusadugna@gmail.com
|
c5813069ab74084371832db97cf363db3c1d548c
|
689e06888840d9aaf30fd89f874608f110a935fd
|
/ER/regExp.py
|
36951be8be5c488574e2541921212b163dcbf968
|
[] |
no_license
|
yuehongOlivia/LearnPython
|
58871db2d019e9689c662ca413fb5515339a3038
|
5bdde927f8bf8a5550d0ef3a06aaa5de66a1e09d
|
refs/heads/master
| 2020-04-03T13:16:37.687052
| 2019-02-09T22:25:46
| 2019-02-09T22:25:46
| 155,279,833
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,083
|
py
|
# -*- coding: utf-8 -*-
# write a RE to verify an email adress like : someone@gmail.com, bill.gates@microsoft.com
import re
def is_valid_email(addr):
re_email = re.compile(r'([\w]([\w\d](?!-))*.)*([\w\d]*)@([\d\w]+.com)')
if re_email.match(addr):
return True
else:
return False
# Test:
assert is_valid_email('someone@gmail.com')
assert is_valid_email('bill.gates@microsoft.com')
assert not is_valid_email('bob#example.com')
assert not is_valid_email('mr-bob@example.com')
print('ok')
# write another RE in order to extract the username
def name_of_email(addr):
username1 = re.compile(r'<([\w\s]*)>(\s)*([\w\d]*)@([\d\w]+.[\w]+)')
username2 = re.compile(r'([\w\d]*)@([\d\w]+.[\w]+)')
if username1.match(addr):
m1 = username1.match(addr)
return m1.group(1)
elif username2.match(addr):
m2 = username2.match(addr)
return m2.group(1)
else:
return False
# Test:
assert name_of_email('<Tom Paris> tom@voyager.org') == 'Tom Paris'
assert name_of_email('tom@voyager.org') == 'tom'
print('ok')
|
[
"hongyue192447@gmail.com"
] |
hongyue192447@gmail.com
|
ac583b22e70d14346b739f3638ad361d420f7569
|
7c92fa902082c1683887296808c22e55bfa4c24f
|
/finalPlotsReallyFinal/NewEfficiencyCorrection.py
|
06763a63c465c6d4582a211295025bf979992e3a
|
[] |
no_license
|
ElenaGramellini/QuickDumpster2
|
6a82402658d47e0ec1f8b118f8882526a4a32334
|
178e30cb993cfc29ae79238e5a65946ef9ca43ec
|
refs/heads/master
| 2020-03-10T11:13:35.477466
| 2018-08-13T19:07:12
| 2018-08-13T19:07:12
| 129,352,039
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,440
|
py
|
from ROOT import *
import os
import math
import argparse
def CalculateErrorRatio (num, numErr, den, denErr):
if num and den:
relativeErrorNum = numErr/num
relativeErrorDen = denErr/den
relativeErrorRatio = TMath.Sqrt(relativeErrorNum*relativeErrorNum + relativeErrorDen*relativeErrorDen)
ratio = num/den
totErrorRatio = relativeErrorRatio*ratio
else :
totErrorRatio = 10000.
return totErrorRatio
pionTrueMC_MCCutMin_FileName = "/Users/elenag/Desktop/TrackingStudy/Resolution/60ACuts/AngleCut_"
pionTrueMC_MCCutMax_FileName = "/Users/elenag/Desktop/TrackingStudy/Resolution/60ACuts/AngleCut_0.15734_histo.root"
pionTrueMC_MCCut_FileName = "/Users/elenag/Desktop/TrackingStudy/Resolution/60ACuts/AngleCut_0.07954_histo.root"
pionTrueMC_DataCut_FileName = "/Users/elenag/Desktop/TrackingStudy/Resolution/60ACuts/AngleCut_0.08334_histo.root"
pionTrueMC_All_FileName = '/Volumes/Seagate/Elena/TPC/TruePionGen.root'
pionRecoMC_FileName = '/Volumes/Seagate/Elena/TPC/MC60A_Pions.root'
pionTrueMC_MCCut_File = TFile.Open(pionTrueMC_MCCut_FileName )
pionTrueMC_DataCut_File = TFile.Open(pionTrueMC_DataCut_FileName )
pionTrueMC_All_File = TFile.Open(pionTrueMC_All_FileName )
pionRecoMC_File = TFile.Open(pionRecoMC_FileName )
# Get Interacting and Incident plots Reco
intTrue_MCC = pionTrueMC_MCCut_File.Get("AngleCutTrueXS/hInteractingKE")
incTrue_MCC = pionTrueMC_MCCut_File.Get("AngleCutTrueXS/hIncidentKE")
intTrue_Data = pionTrueMC_DataCut_File.Get("AngleCutTrueXS/hInteractingKE")
incTrue_Data = pionTrueMC_DataCut_File.Get("AngleCutTrueXS/hIncidentKE")
intTrue_All = pionTrueMC_All_File.Get("TrueXS/hInteractingKE")
incTrue_All = pionTrueMC_All_File.Get("TrueXS/hIncidentKE")
intReco = pionRecoMC_File.Get("RecoXS/hRecoInteractingKE")
incReco = pionRecoMC_File.Get("RecoXS/hRecoIncidentKE")
# Eff with MC Angle Cut
eff_MCC_Int = intReco.Clone("eff_MCC_Int")
eff_MCC_Int.Sumw2()
eff_MCC_Int.Divide(intTrue_MCC)
eff_MCC_Inc = incReco.Clone("eff_MCC_Inc")
eff_MCC_Inc.Sumw2()
eff_MCC_Inc.Divide(incTrue_MCC)
# Eff with Data Angle Cut
eff_Data_Int = intReco.Clone("eff_Data_Int")
eff_Data_Int.Sumw2()
eff_Data_Int.Divide(intTrue_Data)
eff_Data_Inc = incReco.Clone("eff_Data_Inc")
eff_Data_Inc.Sumw2()
eff_Data_Inc.Divide(incTrue_Data)
# Eff with All Interactions
eff_All_Int = intReco.Clone("eff_All_Int")
eff_All_Int.Sumw2()
eff_All_Int.Divide(intTrue_All)
eff_All_Inc = incReco.Clone("eff_All_Inc")
eff_All_Inc.Sumw2()
eff_All_Inc.Divide(incTrue_All)
# Cross Section Plots
XSTrue_MCC = intTrue_MCC .Clone("XS_MCC")
XSTrue_Data = intTrue_Data.Clone("XS_Data")
XSTrue_All = intTrue_All .Clone("XS_All")
XSReco = intReco .Clone("XSReco")
XSTrue_MCC .Scale(101.)
XSTrue_Data.Scale(101.)
XSTrue_All .Scale(101.)
XSReco .Scale(101.)
XSTrue_MCC .Sumw2()
XSTrue_Data.Sumw2()
XSTrue_All .Sumw2()
XSReco .Sumw2()
XSTrue_MCC .Divide(incTrue_MCC )
XSTrue_Data.Divide(incTrue_Data )
XSTrue_All .Divide(incTrue_All )
XSReco .Divide(incReco )
outFile = TFile("NewEfficiencyCorrectionPions60A.root","recreate")
outFile.cd()
eff_MCC_Int.Write()
eff_Data_Int.Write()
eff_All_Int.Write()
eff_MCC_Inc.Write()
eff_Data_Inc.Write()
eff_All_Inc.Write()
XSTrue_MCC .Write()
XSTrue_Data.Write()
XSTrue_All .Write()
XSReco .Write()
outFile.Write()
outFile.Close()
raw_input()
|
[
"elena.gramellini@yale.edu"
] |
elena.gramellini@yale.edu
|
2cd5d6af253d31c2282981c26b189a482c49ccde
|
0965812e37b6f12256f22c876b87370223cd3c8d
|
/beerproject/breweries/models/__init__.py
|
99d8e333ef3fc2cf3886e18e1c30ab51d15f4f06
|
[] |
no_license
|
MargaritaKartaviciute/BeerProject
|
72bd1850f431fda1d51e64963855119477112581
|
148f10a54fb0b1dac553328f644c5e14a8306a4c
|
refs/heads/master
| 2023-01-10T00:01:38.890208
| 2020-01-02T14:32:54
| 2020-01-02T14:32:54
| 228,893,049
| 0
| 0
| null | 2022-12-27T15:34:16
| 2019-12-18T17:48:38
|
Python
|
UTF-8
|
Python
| false
| false
| 150
|
py
|
from .categories import Categories
from .styles import Styles
from .geocodes import Geocodes
from .breweries import Breweries
from .beers import Beers
|
[
"margarityte@gmail.com"
] |
margarityte@gmail.com
|
fa1166d85281f86c70d72480c47ae13add17bb3e
|
bfef33b25941c0358e95a97738bbe0392b8158b5
|
/MyDairy/main/migrations/0004_auto_20210121_1028.py
|
c3182027796f2c8b107faca5d967dad7d7aba0d9
|
[] |
no_license
|
FacelessWanderer/mydairy
|
d882fbe29c6b248be7994f1690a14a1ce8908399
|
2b4adcb5c1f808a05dc14c336c89241e03e13aab
|
refs/heads/master
| 2023-04-19T18:10:46.549451
| 2021-05-15T15:30:33
| 2021-05-15T15:30:33
| 333,198,218
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 361
|
py
|
# Generated by Django 3.1.5 on 2021-01-21 08:28
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('main', '0003_auto_20210121_1010'),
]
operations = [
migrations.RenameField(
model_name='post',
old_name='categry',
new_name='category',
),
]
|
[
"68087847+FacelessWanderer@users.noreply.github.com"
] |
68087847+FacelessWanderer@users.noreply.github.com
|
2d12c679815abff67ca583fa1de9385e393a0303
|
a45b0b8d2ad5b17506ace412cd755d29b2b57872
|
/cookbook-python/misc/slate-test.py
|
cc9918fcddab807a74868d6ab3ec5718e979cea8
|
[] |
no_license
|
hiepbkhn/ml2017
|
e89a1546e412c3f203184006a2423922a4f95af0
|
0fd18fc53712db5e89802913e96f9fdd6e73f573
|
refs/heads/master
| 2023-08-16T15:14:41.786857
| 2023-08-03T03:21:32
| 2023-08-03T03:21:32
| 82,892,920
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 175
|
py
|
'''
Created on Feb 27, 2017
@author: Administrator
'''
import slate
with open('How to Read a Paper.pdf') as f:
doc = slate.PDF(f)
doc[1]
|
[
"hiepnh.vtt@gmail.com"
] |
hiepnh.vtt@gmail.com
|
4254446828a2d41fb2fc7c0ce9b1dd1a6cc086f4
|
3f4a035504508835de9ff53498d44b5872efb4c1
|
/gui/windows/win_window.py
|
d11adb380640ade4f51354294e0e7a8e3dedbeab
|
[] |
no_license
|
gODeaLoAple/Zuma
|
d87bab1715d90c1dfd33c575a11e7deacc749388
|
d65451eae0db3aa384f4ce4bad76ffd0ce54597a
|
refs/heads/master
| 2023-01-05T07:39:27.581785
| 2020-11-07T13:49:20
| 2020-11-07T13:49:20
| 273,963,542
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,118
|
py
|
from PyQt5.QtWidgets import QWidget, QLabel, QVBoxLayout, QPushButton
from PyQt5.QtCore import pyqtSlot, Qt
class WinWindow(QWidget):
def __init__(self, parent):
super().__init__(parent)
self.init_ui()
def init_ui(self):
text_label = QLabel("You are win!", self)
text_label.setAlignment(Qt.AlignCenter)
text_label.setFixedSize(200, 50)
restart_button = QPushButton("Next Level", self)
restart_button.setFixedSize(200, 50)
restart_button.clicked.connect(self.on_next_level_click)
open_main_menu_button = QPushButton("Main menu", self)
open_main_menu_button.setFixedSize(200, 50)
open_main_menu_button.clicked.connect(self.on_main_menu_click)
vbox = QVBoxLayout(self)
vbox.setAlignment(Qt.AlignCenter)
vbox.addWidget(text_label)
vbox.addWidget(restart_button)
vbox.addWidget(open_main_menu_button)
@pyqtSlot()
def on_next_level_click(self):
self.parent().restart_level()
@pyqtSlot()
def on_main_menu_click(self):
self.parent().open_main_menu()
|
[
"maksim.math@gmail.com"
] |
maksim.math@gmail.com
|
6c67bbd556a27d366714130c5f3f6f12314679ea
|
7f59e2c4e771c19378e9839406c220d3985e7efe
|
/public-engines/kaggle-titanic-engine/marvin_titanic_engine/prediction/__init__.py
|
e9b3c7c5a02a68e210e9f618975dfb6fa53f8cc7
|
[
"Apache-2.0"
] |
permissive
|
apache/incubator-marvin
|
c6ff32d50eb01ccd84266587d79f562a9e371496
|
58fdccf2e677041a13966ddbdd96d484edf3b474
|
refs/heads/develop
| 2023-08-30T12:46:56.973102
| 2022-11-18T15:27:52
| 2022-11-18T15:27:52
| 148,087,939
| 112
| 77
|
Apache-2.0
| 2023-03-07T05:45:59
| 2018-09-10T02:27:54
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 157
|
py
|
#!/usr/bin/env python
# coding=utf-8
from .prediction_preparator import PredictionPreparator
from .predictor import Predictor
from .feedback import Feedback
|
[
"daniel.takabayashi@gmail.com"
] |
daniel.takabayashi@gmail.com
|
6ff3928c2cc469c30d69a24c9694ee0f7764fa32
|
9d540749a258509b4f1e80cbc5833cba7320c976
|
/biointeract/hub/dataload/sources/biogrid/__init__.py
|
418920f09752fa7ef84fa85e3c0caa9c6071fd86
|
[
"Apache-2.0"
] |
permissive
|
shunsunsun/biothings.interactions
|
208f8c32d5fa998a601b58e70d1299f9277a355b
|
7a8b16e8119d6505b6b5d89623051c11f3649430
|
refs/heads/master
| 2021-09-15T04:30:41.584419
| 2018-05-25T19:44:56
| 2018-05-25T19:44:56
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 72
|
py
|
from .dumper import BiogridDumper
from .uploader import BiogridUploader
|
[
"greg@spritzr.com"
] |
greg@spritzr.com
|
cb9e35f9d5650b245d2818ff48eed06905914d59
|
8369cc6396310b6d6012c6ca78b2bcbbad4ac5d3
|
/db/dbconnect.py
|
69d2106a97679dd7269e5a4c598f2de2e9309ebc
|
[] |
no_license
|
warthogs32/locker
|
af59f694248a24ecf9d5103ca208d81208c2d331
|
f2b96c52515d23f38542e0c869d5f6ad87e11828
|
refs/heads/master
| 2023-01-27T12:30:10.275595
| 2020-03-02T01:32:38
| 2020-03-02T01:32:38
| 243,481,020
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 342
|
py
|
import firebase_admin
from firebase_admin import credentials
from firebase_admin import firestore
cred = credentials.Certificate('slohacks-269509-firebase-adminsdk-dun4x-9342e5bf13.json')
firebase_admin.initialize_app(cred)
db = firestore.Client()
doc_ref = db.collection(u'log').document(u'test')
doc_ref.set(
{
u'first' : u'jason'
})
|
[
"jj@jj-jj.org"
] |
jj@jj-jj.org
|
ce0d718523e13d1eae47d2d286507489e5e5dbcb
|
9b9817ee1029fdca265e6536fe51b4f1fe6f5e03
|
/data_structures/test/test_my_array_list.py
|
9367ed26e373ba210eafd299b62070fdd3bdb8c5
|
[] |
no_license
|
toebgen/exercises
|
1809d56d18aead4bf41b123de6c61e9febe2c149
|
cd56d29173cb9fde171f94ad6651ea2f5766d125
|
refs/heads/master
| 2020-05-24T11:23:30.391827
| 2020-04-24T14:30:30
| 2020-04-24T14:30:30
| 187,247,490
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,930
|
py
|
import unittest
from my_array_list import MyArrayList
class TestMyArrayList(unittest.TestCase):
def appendWithDummyData(self, length):
for x in range(length):
self.array_list.append(x)
def setUp(self):
self.array_list = MyArrayList()
def testExtend(self):
expected_size_after = self.array_list.size * self.array_list.extension_factor
self.array_list.extend()
self.assertEqual(self.array_list.size, expected_size_after)
def testDiminish(self):
self.appendWithDummyData(9)
expected_size_after = 8
expected_index_after = 8
self.array_list.diminish()
self.assertEqual(self.array_list.size, expected_size_after)
self.assertEqual(self.array_list.index, expected_index_after)
def testAppend(self):
self.assertEqual(len(self.array_list), 0)
test_values = [x for x in range(10)]
for counter, test_value in enumerate(test_values, 1):
self.array_list.append(test_value)
self.assertEqual(len(self.array_list), counter)
self.assertEqual(self.array_list[counter-1], test_value)
# TODO Test self.array_list.size ?
def testPop(self):
# Add 5 values
self.appendWithDummyData(5)
expected_value = 4
# Pop 1 value, hence 4 should be left
expected_size_after = 4
expected_index_after = 4
popped_value = self.array_list.pop()
self.assertEqual(popped_value, expected_value)
self.assertEqual(self.array_list.size, expected_size_after)
self.assertEqual(self.array_list.index, expected_index_after)
def testOperatorIn(self):
self.appendWithDummyData(3)
self.assertEqual(1 in self.array_list, True)
self.assertEqual(4 in self.array_list, False)
if __name__ == '__main__':
unittest.main()
|
[
"toebgen@gmail.com"
] |
toebgen@gmail.com
|
f400cabc87d522781cbaeca42830eb1280a17f5c
|
bc2e77b92ae12e34aec9445f5e8c92fc399e6cc8
|
/vautils/orchestration/vcenter/__init__.py
|
f6d9e885db50dabf4acf38b33be40864884ab7f6
|
[] |
no_license
|
18782967131/test
|
fc8049c10f02f7b49a4dd4a675618ccf388c4f2f
|
cb02979e233ce772bd5fe88ecdc31caf8764d306
|
refs/heads/master
| 2020-05-27T08:41:57.535153
| 2017-11-09T08:31:07
| 2017-11-09T08:31:07
| 82,536,387
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,421
|
py
|
import os,glob
from imp import find_module, load_module
from vautils import logger
from vautils.orchestration.vcenter.vcenter_connector import VcenterConnector
from feature.exceptions import *
supported_mod = ['host','operation','network','vm','connector']
class VcenterConfig(object):
@classmethod
def get_vcenter_config_util(cls_name, resource=None):
"""
function to dynamically load the all vcenter class
Kwargs:
:resource: supported vm factory resource type
Returns:
:instance of the self class for the resource
"""
if not resource:
raise InvalidResource(resource)
product = resource.get_nodetype()
abs_path = os.path.dirname(os.path.abspath(__file__))
for smod in supported_mod:
if smod == 'connector' :
filenames = ['vcenter_connector.py']
abs_path1 = abs_path
else :
abs_path1 = os.path.join(abs_path,smod)
filename='{}_{}'.format(product,smod)
filenames = glob.glob(os.path.join(abs_path1,'*.py'))
for file_name in filenames :
file_name = os.path.basename(file_name)
if file_name == '__init__.py' or file_name == 'vm_verification.py' \
or file_name == 'vcenter_verification.py' or file_name == 'vnet_action.py' :
continue
if len(filenames) == 2 :
filename = '{}_{}'.format(product, smod)
else :
filename = file_name.split('.')[0]
try:
modloc = find_module(filename, [abs_path1])
except ImportError:
raise
else:
load_mod = load_module(filename, modloc[0], modloc[1], modloc[2])
vcs = filename.split('_')
modulename = vcs[0].capitalize() + vcs[1].capitalize()
if modulename == 'VcenterVm' :
modulename = 'VcenterVMs'
try:
util = getattr(load_mod, modulename)
setattr(cls_name,modulename,util(resource))
except AttributeError:
raise
return cls_name
|
[
"515947075@qq.com"
] |
515947075@qq.com
|
79c3488d815b757d1d05562e57013879c7606de0
|
8683bf70ea56283caf0c7b1fb742ed002ce1d8cd
|
/pages/migrations/0015_auto_20200223_1754.py
|
64981d98cf32971db40b96fce5cadc903583e861
|
[] |
no_license
|
Arzor529/learn-pi
|
df6b18cd1735eb31f1f712d902209ff73040c441
|
1a021181bb768d8f0a78fd64988f484a925f4b0b
|
refs/heads/main
| 2023-02-24T18:48:38.614643
| 2021-01-31T19:52:11
| 2021-01-31T19:52:11
| 334,742,500
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 412
|
py
|
# Generated by Django 3.0.2 on 2020-02-23 17:54
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('pages', '0014_auto_20200223_1749'),
]
operations = [
migrations.RemoveField(
model_name='award',
name='lesson_complete',
),
migrations.DeleteModel(
name='LessonComplete',
),
]
|
[
"aaron.keel@hotmail.co.uk"
] |
aaron.keel@hotmail.co.uk
|
163c8f79f45c34299206a1c1b091dc70a20faabe
|
304e639beb84844f9f2dbd13292177016e658f8b
|
/application/sources/database/base.py
|
f148127511463efdfbfc7182b006fdab658198ea
|
[
"MIT"
] |
permissive
|
JuiceFV/Cats_Queue_Management_System
|
f7dd3cdbf24499160042c25e1f5d21b2e542d0b9
|
1aa57fefcd96059ac63391d6d178ea7cfa49e1d0
|
refs/heads/master
| 2022-12-13T01:22:46.859127
| 2021-09-27T14:16:25
| 2021-09-27T14:16:25
| 185,384,218
| 1
| 0
|
MIT
| 2022-12-08T04:09:28
| 2019-05-07T11:04:53
|
Python
|
UTF-8
|
Python
| false
| false
| 5,782
|
py
|
"""In this module contains the basic functions for database interactions.
"""
import asyncpgsa
from .db import tokens
from sqlalchemy import (
select, insert, delete, text, asc, func,
)
async def db_empty(app):
"""Check for database emptiness.
Key arguments:
app -- our application
returns True/False depending on database emptiness.
"""
async with app['db'].acquire() as conn:
query = text("SELECT True FROM tokens LIMIT(1)")
return False if await conn.fetch(query) else True
async def on_start(app):
"""When the application starts it will configure the database with
options from the 'database_config' which are places at config.yaml.
Key arguments:
app -- our application
"""
config = app['config']
app['db'] = await asyncpgsa.create_pool(**config['database_config'])
async def insert_token_into_db(app, token):
"""This function responsible for token's insertion into database.
Key arguments:
app -- our application
token -- the token which should be inserted
"""
async with app['db'].acquire() as conn:
# SQL equivalent: 'INSERT INTO tokens (token) values ('A00')'
query = insert(tokens).values(
token=token
)
await conn.fetchrow(query)
async def delete_token_from_db(app, token):
"""Delete a token from database.
Keywords arguments:
app -- the application
token -- the deleting token
There are several implementations of this function.
For instance, we'd inquire only one but huge request.
Like that:
DO $$
BEGIN
IF (SELECT token FROM tokens ORDER BY id ASC LIMIT 1) = <required-token> THEN
RETURN DELETE FROM tokens WHERE token = <required-token> RETURNING 't'
ELSE IF (SELECT True FROM tokens WHERE token = <required-token>) = True THEN
RETURN "I don't know how to return (specifically in what format) but I believe you can come up with this:)"
ELSE
RETURN 'f'
END IF;
END $$;
However I used another way. I don't want to use big query like that, therefore I decide to split these queries
within python script. May be it makes some influence onto performance due to several queries but difference is few.
Returns whether the token is available and the token-self.
"""
async with app['db'].acquire() as conn:
# Checks for the table emptiness.
query = text("SELECT True FROM tokens LIMIT(1)")
if await conn.fetch(query):
# If the table isn't empty then acquiring the token placed at the first position.
token_at_first_pos = await conn.fetch(select([tokens.c.token]).order_by(asc(tokens.c.id)).limit(1))
# If the first-pos token coincides with the token passed as the argument specifically the
# token which was passed as the user-token by a client.
if token_at_first_pos[0]['token'] == token:
# If all is fine then remove it from the table.
query = delete(tokens).where(tokens.c.token == token).returning(tokens.c.token)
await conn.fetch(query)
# Returns following: (Both are true)
# 1) Is the token available?
# 2) Does the token coincides with the first token?
return True, True
# If passed token isn't first then check for the token availability at whole.
elif await conn.fetch(select([tokens]).where(tokens.c.token == token)):
# Return that token is available, but it isn't first.
return True, False
# Otherwise returns that the token passed by user isn't available.
# (According the task it means "cheating")
else:
return False, False
# Table is already empty. User need to get a token.
else:
return False, 'Table is empty'
async def get_all_tokens(app):
"""This function created for a js displaying function.
We're calling this function when sending a request to auto-update a queue.
Keywords arguments:
app -- the application
returns all tokens in database
"""
async with app['db'].acquire() as conn:
# SQL equivalent: 'SELECT token FROM tokens'.
# If database is empty then the query returns empty list ('[]')
query = select([tokens.c.token])
result = await conn.fetch(query)
return result
async def get_num_of_tokens(app):
"""This function returns number of tokens in database
It uses only once in the Token.get for a position retrieving.
We're calling this function when sending a request to append a token.
Therefore we need to get a position of appended token, and because of we use a queue-resembling data structure
the position is the number of tokens in database.
Keywords arguments:
app -- the application
returns number of tokens/position
"""
async with app['db'].acquire() as conn:
query = select([func.count(tokens.c.token)])
result = await conn.fetch(query)
return result
async def on_shutdown(app):
"""When the application ends its work it will close the connection with database
Key arguments:
app -- our application
"""
async with app['db'].acquire() as conn:
query = delete(tokens)
# frees db
await conn.fetchrow(query)
# Sets id counting from very beginning
await conn.execute("ALTER SEQUENCE tokens_id_seq RESTART ")
await app['db'].close()
# It's important step for the tests
if app['config']['run_type'] == 'test':
with open(app['ban_list'][0].name, 'w') as file:
file.write('')
app['ban_list'][0].close()
|
[
"alexcar19622000@gmail.com"
] |
alexcar19622000@gmail.com
|
e6bf86ac11ef8bd91e210aac264b6377a9e75b9b
|
e949629d3f61aabd7645d097dd4d4537261c69f0
|
/data-preparation/generate_classes_meta_stanford_cars.py
|
dfbd0398554cd893eb8edc57d717ec04a4078645
|
[] |
no_license
|
alirazamir7781/image-search-poc
|
fcf8f974c4838082bb151584af8f034cbe2421f4
|
dc60e8cb0c729f20c29a2eac5d03bf6f5fd2f894
|
refs/heads/master
| 2022-01-19T09:05:46.785713
| 2019-01-03T17:43:15
| 2019-01-03T17:43:15
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,556
|
py
|
import sys
import argparse
import simplejson as json
import scipy.io as sio
import time
from functools import partial
from toolz.dicttoolz import get_in
if __name__ == '__main__':
p = argparse.ArgumentParser(description='Filters a dataset.',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
p.add_argument('--cars_meta', type=str, required=True)
p.add_argument('--cars_annos', type=str, required=True)
p.add_argument('classes_output_file', nargs='?', type=argparse.FileType('w'),
default=sys.stdout,
help='The prepared dataset will be writen to here.')
args = p.parse_args()
cars_meta = sio.loadmat(args.cars_meta)
cars_annos = sio.loadmat(args.cars_annos)
def to_dataset_elem(cars_meta,cars_annos):
description = get_in([0], cars_meta)
classes = str(get_in([0,0,0], cars_annos))
split_desc = ''.join(description).split(" ")
return {str(cars_meta[0]): {'id':classes,
'make': split_desc[0],
'model': split_desc[1],
'body': split_desc[2],
'year': split_desc[3]}}
dataset = list(map(partial(to_dataset_elem), cars_meta["class_names"][0], cars_annos['annotations'][0]))
print('[')
prev = None
for item in dataset:
if prev is not None:
print(',')
print(json.dumps(item, indent=4, separators=(',', ': ')),
end='',
file=args.classes_output_file)
prev = item
print('\n]')
|
[
"zeeshanmalik@Zeeshans-MacBook-Pro-2.local"
] |
zeeshanmalik@Zeeshans-MacBook-Pro-2.local
|
60142c0fc413f3a23ace11f584957f13800b4491
|
680d2229b64d80b9d31cbf1c1d4866e7c8242258
|
/hyperconnect/junho85-gets-gem.py
|
f05404170addfc45a374cbf6f0fc4c77a3de5415
|
[] |
no_license
|
junho85/ps-python
|
c72f05c4390a2331b2e4a9fd4bdb5a173822ba76
|
5b1de5d76a5f7577ca76d7a533542b3da2614ee7
|
refs/heads/master
| 2021-06-12T01:38:30.912376
| 2021-03-30T15:27:54
| 2021-03-30T15:27:54
| 167,168,091
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,822
|
py
|
# -'- coding: utf-8 -*-
import sys
import json
from random import choice
import math
def dotproduct(v1, v2):
return sum((a * b) for a, b in zip(v1, v2))
def length(v):
return math.sqrt(dotproduct(v, v))
def angle(v1, v2):
return math.acos(dotproduct(v1, v2) / (length(v1) * length(v2)))
def get_position_from_location(location):
return location[1] + location[0] * 8
def get_location_from_position(position):
return (position % 8, position // 8)
def get_player_location(map_yx, player):
for y in range(0, 8):
for x in range(0, 8):
if map_yx[y][x] == player:
return (y, x)
return (-1, -1)
def tuple_sum(a, b):
return (a[0] + b[0], a[1] + b[1])
def next_step(step):
if step == 'U':
return (-1, 0)
elif step == 'D':
return (1, 0)
elif step == 'L':
return (0, -1)
elif step == 'R':
return (0, 1)
return (0, 0)
def is_available(map_yx, location):
position = get_position_from_location(location)
if position < 0 or (location[0] < 0 or location[0] >= 8 or location[1] < 0 or location[1] >= 8):
return False
ret = map_yx[location[0]][location[1]]
if ret != '*':
return False
return True
is_debug = False
def main():
'''
아래는 처음 판의 상태이다.
위로 가고 싶을 경우 U, 아래로 가고 싶을 경우 D,
오른쪽으로 가고 싶을 경우 R, 왼쪽으로 가고 싶을 경우 L
을 출력해주면 된다.
인풋은 json형식으로 들어오며
'map' : 8 * 8 크기의 판의 상태를 한 칸당 한 글자로 공백없이 string의 형태로 준다.
'opponent_history' : 지금까지 상대가 움직인 방향들을 string의 형태로 공백없이 준다. ex) 'UDDLLUR'
'my_history' : 지금까지 내가 움직인 방향들을 string의 형태로 공백없이 준다. ex) 위와 동일
'me' : 내가 누군지 알려줌. ex) 'A' or 'B'
'opponent' : 상대가 누군지 알려줌. ex) 위와 동일
map에 대한 상세한 설명
💎 : 갈 수 있는 곳입니다. 젬이라고 불리죠
A, B : 위에서 설명했듯 me로 들어온 알파벳이 본인이 움직일 말이 됩니다.
a, b : A, B가 이미 지나간 길, 다시말해 다시는 갈 수 없는 길입니다.
'''
########################
# #
# A 💎💎💎💎💎💎💎 #
# 💎💎💎💎💎💎💎💎 #
# 💎💎💎💎💎💎💎💎 #
# 💎💎💎💎💎💎💎💎 #
# 💎💎💎💎💎💎💎💎 #
# 💎💎💎💎💎💎💎💎 #
# 💎💎💎💎💎💎💎💎 #
# 💎💎💎💎💎💎💎B #
# #
########################
if (is_debug):
data = json.loads(argv[2])
else:
data = json.loads(sys.argv[1])
map_string = data['map']
opponent_history = data['opponent_history']
my_history = data['my_history']
player = data['me']
opponent = data['opponent']
# 재미를 위해 젬을 직접 이용해서 코드를 짜보세요!
new_input_str = map_string.replace("*", "💎")
map_yx = []
for i in range(8):
map_yx.append(list(map_string[8 * i:8 * i + 8]))
# TODO: 아래쪽을 변경하여 멋진 코드를 만들어 주세요!
available = [
['R', (1, 0), 0],
['L', (-1, 0), 0],
['D', (0, 1), 0],
['U', (0, -1), 0]
]
location = get_player_location(map_yx, player)
position = get_position_from_location(location)
# check availables
for i in range(0, len(available)):
if is_available(map_yx, tuple_sum(location, next_step(available[i][0]))):
available[i][2] = 1.0
available_target = sorted([x for x in available], key=lambda x: x[2], reverse=True)
# calc opponent's center
if opponent_history:
op_x = 0
op_y = 0
for j in range(8):
for i in range(8):
if 'b' == map_yx[j][i]:
op_x += i;
op_y += j
op_x /= len(opponent_history)
op_y /= len(opponent_history)
for i in range(len(available_target)):
if available_target[i][2] == 0:
continue
a = angle(available_target[i][1], (op_x, op_y))
if a == 0.0:
a = math.inf
else:
a = a
available_target[i][2] = a
available_target = sorted([x for x in available], key=lambda x: x[2], reverse=True)
ret = available_target[0][0]
if (is_debug):
return bytes(ret, 'utf-8')
else:
print(ret)
if is_debug == False:
main()
|
[
"junho85@gmail.com"
] |
junho85@gmail.com
|
b8951924632dac460f7d4f314645390863883216
|
f504253210cec1c4ec6c3ea50a45564db7d6cd7f
|
/prettyqt/bluetooth/bluetoothserviceinfo.py
|
1550d47d7d25b376a8935aee536e083c25471963
|
[
"MIT"
] |
permissive
|
phil65/PrettyQt
|
b1150cb4dce982b9b8d62f38f56694959b720a3e
|
f00500d992d1befb0f2c2ae62fd2a8aafba7fd45
|
refs/heads/master
| 2023-08-30T21:00:08.905444
| 2023-08-17T12:24:45
| 2023-08-17T12:24:45
| 177,451,205
| 17
| 5
|
MIT
| 2020-08-15T22:21:18
| 2019-03-24T18:10:21
|
Python
|
UTF-8
|
Python
| false
| false
| 4,059
|
py
|
from __future__ import annotations
from collections.abc import MutableMapping
from typing import Literal
from prettyqt.qt import QtBluetooth
from prettyqt.utils import bidict, datatypes
AttributeIdStr = Literal[
"service_record_handle",
"service_class_ids",
"service_record_state",
"service_id",
"protocol_descriptor_list",
"browse_group_list",
"language_base_attribute_id_list",
"service_info_time_to_live",
"service_availablity",
"bluetooth_profile_descriptor_list",
"documentation_url",
"client_executable_url",
"icon_url",
"additional_protocol_descriptor_list",
"primary_language_base",
"service_name",
"service_description",
"service_provider",
]
AttributeId = QtBluetooth.QBluetoothServiceInfo.AttributeId
ATTRIBUTE_IDS: bidict[AttributeIdStr, AttributeId] = bidict(
service_record_handle=AttributeId.ServiceRecordHandle,
service_class_ids=AttributeId.ServiceClassIds,
service_record_state=AttributeId.ServiceRecordState,
service_id=AttributeId.ServiceId,
protocol_descriptor_list=AttributeId.ProtocolDescriptorList,
browse_group_list=AttributeId.BrowseGroupList,
language_base_attribute_id_list=AttributeId.LanguageBaseAttributeIdList,
service_info_time_to_live=AttributeId.ServiceInfoTimeToLive,
service_availablity=AttributeId.ServiceAvailability,
bluetooth_profile_descriptor_list=AttributeId.BluetoothProfileDescriptorList,
documentation_url=AttributeId.DocumentationUrl,
client_executable_url=AttributeId.ClientExecutableUrl,
icon_url=AttributeId.IconUrl,
additional_protocol_descriptor_list=AttributeId.AdditionalProtocolDescriptorList,
primary_language_base=AttributeId.PrimaryLanguageBase,
# service_name=AttributeId.ServiceName,
service_description=AttributeId.ServiceDescription,
service_provider=AttributeId.ServiceProvider,
)
Protocol = QtBluetooth.QBluetoothServiceInfo.Protocol
ProtocolStr = Literal["unknown", "l2_cap", "rfcomm"]
PROTOCOL: bidict[ProtocolStr, Protocol] = bidict(
unknown=Protocol.UnknownProtocol,
l2_cap=Protocol.L2capProtocol,
rfcomm=Protocol.RfcommProtocol,
)
class BluetoothServiceInfo(
QtBluetooth.QBluetoothServiceInfo, MutableMapping, metaclass=datatypes.QABCMeta
):
"""Enables access to the attributes of a Bluetooth service.
Also implements MutableMapping interface, can be used as a dicionary.
"""
def __getitem__(self, value: str | int | AttributeId):
match value:
case int():
flag = value
case str():
if value not in ATTRIBUTE_IDS:
raise KeyError(value)
flag = ATTRIBUTE_IDS[value].value
case AttributeId():
flag = value.value
case _:
raise KeyError(value)
return self.attribute(flag)
def __delitem__(self, value: str | int | AttributeId):
match value:
case int():
flag = value
case str():
flag = ATTRIBUTE_IDS[value].value
case AttributeId():
flag = value.value
return self.removeAttribute(flag)
def __setitem__(self, index: str | int | AttributeId, value):
"""Set attribute."""
match index:
case int():
flag = index
case str():
flag = ATTRIBUTE_IDS[index].value
case AttributeId():
flag = index.value
return self.setAttribute(flag, value)
def __contains__(self, value: int) -> bool:
attr = ATTRIBUTE_IDS.inverse[value]
return self.contains(attr)
def __iter__(self):
"""Iter the info attributes."""
return iter(self.attributes())
def __len__(self):
return len(self.attributes())
if __name__ == "__main__":
from prettyqt import widgets
app = widgets.app()
address = BluetoothServiceInfo()
address["documentation_url"] = "test"
del address["documentation_url"]
|
[
"philipptemminghoff@googlemail.com"
] |
philipptemminghoff@googlemail.com
|
dfed2e98d13ccd325edc9957274158b2793b8f0b
|
6fcfb638fa725b6d21083ec54e3609fc1b287d9e
|
/python/dougsouza_face-frontalization/face-frontalization-master/frontalize.py
|
288c5ab260d12118c575d932c733df5094a5c992
|
[] |
no_license
|
LiuFang816/SALSTM_py_data
|
6db258e51858aeff14af38898fef715b46980ac1
|
d494b3041069d377d6a7a9c296a14334f2fa5acc
|
refs/heads/master
| 2022-12-25T06:39:52.222097
| 2019-12-12T08:49:07
| 2019-12-12T08:49:07
| 227,546,525
| 10
| 7
| null | 2022-12-19T02:53:01
| 2019-12-12T07:29:39
|
Python
|
UTF-8
|
Python
| false
| false
| 4,885
|
py
|
__author__ = 'Douglas'
import scipy.io as scio
import cv2
import numpy as np
np.set_printoptions(formatter={'float_kind': lambda x: "%.4f" % x})
class ThreeD_Model:
def __init__(self, path, name):
self.load_model(path, name)
def load_model(self, path, name):
model = scio.loadmat(path)[name]
self.out_A = np.asmatrix(model['outA'][0, 0], dtype='float32') #3x3
self.size_U = model['sizeU'][0, 0][0] #1x2
self.model_TD = np.asarray(model['threedee'][0,0], dtype='float32') #68x3
self.indbad = model['indbad'][0, 0]#0x1
self.ref_U = np.asarray(model['refU'][0,0])
def frontalize(img, proj_matrix, ref_U, eyemask):
ACC_CONST = 800
img = img.astype('float32')
print "query image shape:", img.shape
bgind = np.sum(np.abs(ref_U), 2) == 0
# count the number of times each pixel in the query is accessed
threedee = np.reshape(ref_U, (-1, 3), order='F').transpose()
temp_proj = proj_matrix * np.vstack((threedee, np.ones((1, threedee.shape[1]))))
temp_proj2 = np.divide(temp_proj[0:2, :], np.tile(temp_proj[2, :], (2,1)))
bad = np.logical_or(temp_proj2.min(axis=0) < 1, temp_proj2[1, :] > img.shape[0])
bad = np.logical_or(bad, temp_proj2[0, :] > img.shape[1])
bad = np.logical_or(bad, bgind.reshape((-1), order='F'))
bad = np.asarray(bad).reshape((-1), order='F')
nonbadind = np.nonzero(bad == 0)[0]
temp_proj2 = temp_proj2[:, nonbadind]
# because python arrays are zero indexed
temp_proj2 -= 1
ind = np.ravel_multi_index((np.asarray(temp_proj2[1, :].round(), dtype='int64'), np.asarray(temp_proj2[0, :].round(),
dtype='int64')), dims=img.shape[:-1], order='F')
synth_frontal_acc = np.zeros(ref_U.shape[:-1])
ind_frontal = np.arange(0, ref_U.shape[0]*ref_U.shape[1])
ind_frontal = ind_frontal[nonbadind]
c, ic = np.unique(ind, return_inverse=True)
bin_edges = np.r_[-np.Inf, 0.5 * (c[:-1] + c[1:]), np.Inf]
count, bin_edges = np.histogram(ind, bin_edges)
synth_frontal_acc = synth_frontal_acc.reshape(-1, order='F')
synth_frontal_acc[ind_frontal] = count[ic]
synth_frontal_acc = synth_frontal_acc.reshape((320, 320), order='F')
synth_frontal_acc[bgind] = 0
synth_frontal_acc = cv2.GaussianBlur(synth_frontal_acc, (15, 15), 30., borderType=cv2.BORDER_REPLICATE)
frontal_raw = np.zeros((102400, 3))
frontal_raw[ind_frontal, :] = cv2.remap(img, temp_proj2[0, :].astype('float32'), temp_proj2[1, :].astype('float32'), cv2.INTER_CUBIC)
frontal_raw = frontal_raw.reshape((320, 320, 3), order='F')
# which side has more occlusions?
midcolumn = np.round(ref_U.shape[1]/2)
sumaccs = synth_frontal_acc.sum(axis=0)
sum_left = sumaccs[0:midcolumn].sum()
sum_right = sumaccs[midcolumn+1:].sum()
sum_diff = sum_left - sum_right
if np.abs(sum_diff) > ACC_CONST: # one side is ocluded
ones = np.ones((ref_U.shape[0], midcolumn))
zeros = np.zeros((ref_U.shape[0], midcolumn))
if sum_diff > ACC_CONST: # left side of face has more occlusions
weights = np.hstack((zeros, ones))
else: # right side of face has more occlusions
weights = np.hstack((ones, zeros))
weights = cv2.GaussianBlur(weights, (33, 33), 60.5, borderType=cv2.BORDER_REPLICATE)
# apply soft symmetry to use whatever parts are visible in ocluded side
synth_frontal_acc /= synth_frontal_acc.max()
weight_take_from_org = 1. / np.exp(0.5+synth_frontal_acc)
weight_take_from_sym = 1 - weight_take_from_org
weight_take_from_org = np.multiply(weight_take_from_org, np.fliplr(weights))
weight_take_from_sym = np.multiply(weight_take_from_sym, np.fliplr(weights))
weight_take_from_org = np.tile(weight_take_from_org.reshape(320, 320, 1), (1, 1, 3))
weight_take_from_sym = np.tile(weight_take_from_sym.reshape(320, 320, 1), (1, 1, 3))
weights = np.tile(weights.reshape(320, 320, 1), (1, 1, 3))
denominator = weights + weight_take_from_org + weight_take_from_sym
frontal_sym = np.multiply(frontal_raw, weights) + np.multiply(frontal_raw, weight_take_from_org) + np.multiply(np.fliplr(frontal_raw), weight_take_from_sym)
frontal_sym = np.divide(frontal_sym, denominator)
# exclude eyes from symmetry
frontal_sym = np.multiply(frontal_sym, 1-eyemask) + np.multiply(frontal_raw, eyemask)
frontal_raw[frontal_raw > 255] = 255
frontal_raw[frontal_raw < 0] = 0
frontal_raw = frontal_raw.astype('uint8')
frontal_sym[frontal_sym > 255] = 255
frontal_sym[frontal_sym < 0] = 0
frontal_sym = frontal_sym.astype('uint8')
else: # both sides are occluded pretty much to the same extent -- do not use symmetry
frontal_sym = frontal_raw
return frontal_raw, frontal_sym
|
[
"659338505@qq.com"
] |
659338505@qq.com
|
1d0de82fd05133a73c2fa3479a0c7be727ae2905
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02821/s020142349.py
|
50cb9d7c7baefeb952496c4a3c79bcfb25bba6c7
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 799
|
py
|
import sys
from collections import *
import heapq
import math
import bisect
from itertools import permutations,accumulate,combinations,product
from fractions import gcd
def input():
return sys.stdin.readline()[:-1]
def ruiseki(lst):
return [0]+list(accumulate(lst))
mod=pow(10,9)+7
n,m=map(int,input().split())
a=list(map(int,input().split()))
a.sort()
left=0
right=(10**5)*2+5
s=ruiseki(a)
ans=0
while 1:
mid=(left+right)//2
cnt=0
for i in range(n):
tmp=bisect.bisect_left(a,mid-a[i])
cnt+=n-tmp
if cnt<m:
right=mid
else:
left=mid
# print(left,right,mid)
if right-left<=1:
break
for i in range(n):
tmp=bisect.bisect(a,left-a[i])
cnt=n-tmp
ans+=cnt*a[i]+s[n]-s[tmp]
m-=cnt
ans+=m*left
print(ans)
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
3c9e22c6450f222400baa09f6df8f48791d76322
|
a7d56438307e90d2939ecd203d062525b078dd80
|
/MedicineLibrary/asgi.py
|
49cec13f702daaf6c04550d89c647b991a9e0969
|
[] |
no_license
|
rak1b/MedicineLibrary
|
c7834654280dc19a51395421643e6c0536b15401
|
07e2ef8bff82371b9f091bcb48911eb7eee245b5
|
refs/heads/master
| 2023-08-08T01:03:43.519888
| 2021-09-18T18:46:22
| 2021-09-18T18:46:22
| 406,848,086
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 407
|
py
|
"""
ASGI config for MedicineLibrary project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'MedicineLibrary.settings')
application = get_asgi_application()
|
[
"rak13.dev@gmail.com"
] |
rak13.dev@gmail.com
|
cb3ab2a00c7c63d9d6b35fd9139e532e65e770de
|
7992bfaf2b16fb39d8951d3f0ec1998f139f3a7e
|
/runner.py
|
68c28d2ee2a93b91641345f3e436b8628f596eab
|
[] |
no_license
|
anjyzplace/drakefork
|
f6ca6a83fa53dad7b08b5d3a6a81d6c905b6bafd
|
08d3c23f776bc96e4f1bd6b102e97c685576cad0
|
refs/heads/main
| 2023-02-19T02:06:03.790650
| 2021-01-20T21:04:25
| 2021-01-20T21:04:25
| 331,431,643
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 691
|
py
|
import getopt, sys
from fetcher import fetch
argumentList = sys.argv[1:]
# Options
options = "heo:"
# Long options
long_options = ["Help", "Email", "Output ="]
try:
# Parsing argument
arguments, values = getopt.getopt(argumentList, options, long_options)
# checking each argument
for currentArgument, currentValue in arguments:
if currentArgument in ("-h", "--Help"):
print ("Diplaying Help")
elif currentArgument in ("-e", "--Email"):
# print ("Displaying file_name:", sys.argv[0])
email = argumentList[1]
fetch(email)
elif currentArgument in ("-o", "--Output"):
print ("Displaying file_name:", sys.argv[0])
except getopt.error as err:
print (str(err))
|
[
"anjola@outlook.com"
] |
anjola@outlook.com
|
d4312d38b45c273521c11d2ca388d74497fe0102
|
808eb39006b7d1baa7a2b15c2e9614a47a5019f0
|
/regressão linear simples - mundo com exceção.py
|
a9f8e90e3ed2f0d497eccf289c62054be4011f34
|
[
"MIT"
] |
permissive
|
MarlonBeloMarques/Consumo-de-bebida-alcoolica-mundial
|
40c706c6c6e568c8986e0fb311ae70e72ca2b4b6
|
edc5637f963ba5702295fc20badbbd231cddc3b1
|
refs/heads/master
| 2022-11-07T09:45:15.337463
| 2020-06-28T19:53:31
| 2020-06-28T19:53:31
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,223
|
py
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression
from yellowbrick.regressor import ResidualsPlot
'''Criação de correlação entre o consumo total de cada bebida alcoólica consumida VS
total de álcool ingerido em todo o mundo, com exceções, per capita'''
#Carregamento da base de dados de todos os países, e remoção dos países não alcoólicos
bebida_mundo = pd.read_csv('Consumo de bebidas alcoólicas ao redor do mundo.csv')
for index, row in bebida_mundo.iterrows():
if row['total_litres_of_pure_alcohol'] == 0:
bebida_mundo = bebida_mundo.drop(index)
'''1)Regressão linear da cerveja VS total álcool ingerido'''
#Seleciona as colunas para serem as variáveis explanatória e resposta, respectivamente
X = bebida_mundo.iloc[:, 1].values
y = bebida_mundo.iloc[:, 4].values
correlacao_cerveja = np.corrcoef(X, y)
#Criação do modelo de regressão linear
X = X.reshape(-1, 1)
modelo_cerveja = LinearRegression()
modelo_cerveja.fit(X, y)
#Mostra a acurácia do modelo de regresão linear da cerveja
score_cerveja = modelo_cerveja.score(X, y)
#Mostra a interceptação e inclinação da reta de regressão linear do modelo, respectivamente
modelo_cerveja.intercept_
modelo_cerveja.coef_
#Gráfico da regressão linear da cerveja
plt.scatter(X, y)
plt.title('Quantidade de cerveja ingerida por ano', fontsize = 16)
plt.xlabel('Copos de cerveja por pessoa')
plt.ylabel('Total de álcool ingerido (L)')
plt.plot(X, modelo_cerveja.predict(X), color = 'red')
#Calculo manual e utilizando o modelo para prever o valor de y, respectivamente
modelo_cerveja.intercept_ + modelo_cerveja.coef_ * 400
modelo_cerveja.predict([[400]])
''' Como no estudo não informou uma porção em litros como referência, podemos fazer suposições a partir desse modelo
por exemplo, se uma pessoa bebe 400 copos de cerveja por ano, e adotando um copo de cerveja com 300 ml (0,3L), uma
pessoa que bebe 400 copos por ano (dependendo de cada país, obviamente), bebe 120 litros de cerveja, e só de alcool puro,
uma pessoa bebe, aproximadamente, 13.65 litros de álcool (cerca de 11.37% aproximadamente) '''
#Visualização dos resíduos e o seu gráfico(resultado entre a distância dos pontos com a linha de regressão)
modelo_cerveja._residues
visualizador_cerveja = ResidualsPlot(modelo_cerveja)
visualizador_cerveja.fit(X, y)
visualizador_cerveja.poof()
'''2)Regressão linear de destilados VS total álcool ingerido'''
A = bebida_mundo.iloc[:, 2].values
b = bebida_mundo.iloc[:, 4].values
correlacao_destilados = np.corrcoef(A, b)
A = A.reshape(-1, 1)
modelo_destilados = LinearRegression()
modelo_destilados.fit(A, b)
score_destilados = modelo_destilados.score(A, b)
modelo_destilados.intercept_
modelo_destilados.coef_
plt.scatter(A, b)
plt.title('Quantidade de destilados ingerido por ano', fontsize = 16)
plt.xlabel('Copos de destilados por pessoa')
plt.ylabel('Total de álcool ingerido (L)')
plt.plot(A, modelo_destilados.predict(A), color = 'yellow')
modelo_destilados.intercept_ + modelo_destilados.coef_ * 400
modelo_destilados.predict([[400]])
''' Como no estudo não informou uma porção em litros como referência, podemos fazer suposições a partir desse modelo
por exemplo, se uma pessoa bebe 400 copos de destilados por ano, e adotando um copo de destilado com 100 ml (0,1L), uma
pessoa que bebe 400 copos por ano (dependendo de cada país, obviamente), bebe 40 litros de destilado, e só de alcool puro,
uma pessoa bebe, aproximadamente, 13.21 litros de álcool (cerca de 33.2% aproximadamente) '''
modelo_destilados._residues
visualizador_destilados = ResidualsPlot(modelo_destilados)
visualizador_destilados.fit(A, b)
visualizador_destilados.poof()
'''3)Regressão linear de vinho VS total álcool ingerido'''
C = bebida_mundo.iloc[:, 3].values
d = bebida_mundo.iloc[:, 4].values
correlacao_vinho = np.corrcoef(C, d)
C = C.reshape(-1, 1)
modelo_vinho = LinearRegression()
modelo_vinho.fit(C, d)
modelo_vinho.intercept_
modelo_vinho.coef_
score_vinho = modelo_vinho.score(C, d)
plt.scatter(C, d)
plt.title('Quantidade de vinho ingerido por ano', fontsize = 16)
plt.xlabel('Taças de vinho por pessoa')
plt.ylabel('Total de álcool ingerido (L)')
plt.plot(C, modelo_vinho.predict(C), color = 'purple')
modelo_vinho.intercept_ + modelo_vinho.coef_ * 400
modelo_vinho.predict([[400]])
''' Como no estudo não informou uma porção em litros como referência, podemos fazer suposições a partir desse modelo
por exemplo, se uma pessoa bebe 400 copos de destilados por ano, e adotando uma taça de de vinho com 450 ml (0,45L),
entretanto uma pessoa só bebe cerca de 1/3 da taça então uma pessoa bebe 150 ml (0,15L), uma pessoa que bebe 400 copos
por ano (dependendo de cada país, obviamente), bebe 60 litros de vinho, e só de alcool puro,
uma pessoa bebe, aproximadamente, 15.38 litros de álcool (cerca de 25.63% aproximadamente) '''
modelo_vinho._residues
visualizador_vinho = ResidualsPlot(modelo_vinho)
visualizador_vinho.fit(C, d)
visualizador_vinho.poof()
|
[
"noreply@github.com"
] |
MarlonBeloMarques.noreply@github.com
|
b573f6454ee0b453076f72b2e7ea8f09a2cd3522
|
bc2bc86fc1666968aaf640af03c5b24f4735d66b
|
/Task 2/Q.11.py
|
a4ab6d7036623904e1effd4ebb2ab48b9c12b0e4
|
[] |
no_license
|
ghimireu57/AssignmentCA2020
|
0404b49ff98acc85d9275197e60fcadb8ecb75c2
|
af5328aca1b1c9cc75717cd55e07a3b08b098a41
|
refs/heads/master
| 2022-04-17T08:52:29.644316
| 2020-04-10T23:37:02
| 2020-04-10T23:37:02
| 254,523,717
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 633
|
py
|
#11. In the previous question, insert “break” after the “Good guess!” print statement.
# “break” will terminate the while loop so that users do not have to continue guessing after they found the number.
# If the user does not guess the number at all, print “Sorry but that was not very successful”.
ans=100
counter=1
while counter <= 5:
num = int(input("Guess the lucky number "))
print('your',counter,"number",num)
counter = counter + 1
if num != ans:
print ("Try again.")
else:
print ("Good guess!")
break
else:
print("sorry but that was not very successful")
|
[
"noreply@github.com"
] |
ghimireu57.noreply@github.com
|
b5236ae52e5df10400422f3de177c87d0979cdf6
|
94aedd9dd803799ed5e8f871a7da90a26eaad596
|
/100 exercises/001.py
|
9dc03725b940694235d9f32802f8890b47773172
|
[] |
no_license
|
NguyenDucHung1601/Python-Basic
|
a238d39ea72d1c304a37b5b528f2dc72773468ec
|
db18592e38689c143492beb0cdf8f68d72788765
|
refs/heads/master
| 2020-09-17T11:09:52.980290
| 2019-11-26T02:24:53
| 2019-11-26T02:24:53
| 224,081,676
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 220
|
py
|
'''
Bai 1: Tim tat ca cac so nam trong [2000,3200]
chia het cho 7 nhung khong phai boi cua 5
'''
list = []
for i in range(2000, 3201):
if i % 7 == 0 and i % 5 != 0:
list.append(str(i))
print(','.join(list))
|
[
"48803420+NguyenDucHung1601@users.noreply.github.com"
] |
48803420+NguyenDucHung1601@users.noreply.github.com
|
4584aeb03fc5fe1e1267705fee3f37e0b7a301d7
|
c94f888541c0c430331110818ed7f3d6b27b788a
|
/saastest6/python/antchain_sdk_saastest6/models.py
|
75c4a8dc219b072244894e13e7800c6975e58841
|
[
"Apache-2.0",
"MIT"
] |
permissive
|
alipay/antchain-openapi-prod-sdk
|
48534eb78878bd708a0c05f2fe280ba9c41d09ad
|
5269b1f55f1fc19cf0584dc3ceea821d3f8f8632
|
refs/heads/master
| 2023-09-03T07:12:04.166131
| 2023-09-01T08:56:15
| 2023-09-01T08:56:15
| 275,521,177
| 9
| 10
|
MIT
| 2021-03-25T02:35:20
| 2020-06-28T06:22:14
|
PHP
|
UTF-8
|
Python
| false
| false
| 32,769
|
py
|
# -*- coding: utf-8 -*-
# This file is auto-generated, don't edit it. Thanks.
from Tea.model import TeaModel
from typing import List, BinaryIO
class Config(TeaModel):
"""
Model for initing client
"""
def __init__(
self,
access_key_id: str = None,
access_key_secret: str = None,
security_token: str = None,
protocol: str = None,
read_timeout: int = None,
connect_timeout: int = None,
http_proxy: str = None,
https_proxy: str = None,
endpoint: str = None,
no_proxy: str = None,
max_idle_conns: int = None,
user_agent: str = None,
socks_5proxy: str = None,
socks_5net_work: str = None,
max_idle_time_millis: int = None,
keep_alive_duration_millis: int = None,
max_requests: int = None,
max_requests_per_host: int = None,
):
# accesskey id
self.access_key_id = access_key_id
# accesskey secret
self.access_key_secret = access_key_secret
# security token
self.security_token = security_token
# http protocol
self.protocol = protocol
# read timeout
self.read_timeout = read_timeout
# connect timeout
self.connect_timeout = connect_timeout
# http proxy
self.http_proxy = http_proxy
# https proxy
self.https_proxy = https_proxy
# endpoint
self.endpoint = endpoint
# proxy white list
self.no_proxy = no_proxy
# max idle conns
self.max_idle_conns = max_idle_conns
# user agent
self.user_agent = user_agent
# socks5 proxy
self.socks_5proxy = socks_5proxy
# socks5 network
self.socks_5net_work = socks_5net_work
# 长链接最大空闲时长
self.max_idle_time_millis = max_idle_time_millis
# 长链接最大连接时长
self.keep_alive_duration_millis = keep_alive_duration_millis
# 最大连接数(长链接最大总数)
self.max_requests = max_requests
# 每个目标主机的最大连接数(分主机域名的长链接最大总数
self.max_requests_per_host = max_requests_per_host
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.access_key_id is not None:
result['accessKeyId'] = self.access_key_id
if self.access_key_secret is not None:
result['accessKeySecret'] = self.access_key_secret
if self.security_token is not None:
result['securityToken'] = self.security_token
if self.protocol is not None:
result['protocol'] = self.protocol
if self.read_timeout is not None:
result['readTimeout'] = self.read_timeout
if self.connect_timeout is not None:
result['connectTimeout'] = self.connect_timeout
if self.http_proxy is not None:
result['httpProxy'] = self.http_proxy
if self.https_proxy is not None:
result['httpsProxy'] = self.https_proxy
if self.endpoint is not None:
result['endpoint'] = self.endpoint
if self.no_proxy is not None:
result['noProxy'] = self.no_proxy
if self.max_idle_conns is not None:
result['maxIdleConns'] = self.max_idle_conns
if self.user_agent is not None:
result['userAgent'] = self.user_agent
if self.socks_5proxy is not None:
result['socks5Proxy'] = self.socks_5proxy
if self.socks_5net_work is not None:
result['socks5NetWork'] = self.socks_5net_work
if self.max_idle_time_millis is not None:
result['maxIdleTimeMillis'] = self.max_idle_time_millis
if self.keep_alive_duration_millis is not None:
result['keepAliveDurationMillis'] = self.keep_alive_duration_millis
if self.max_requests is not None:
result['maxRequests'] = self.max_requests
if self.max_requests_per_host is not None:
result['maxRequestsPerHost'] = self.max_requests_per_host
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('accessKeyId') is not None:
self.access_key_id = m.get('accessKeyId')
if m.get('accessKeySecret') is not None:
self.access_key_secret = m.get('accessKeySecret')
if m.get('securityToken') is not None:
self.security_token = m.get('securityToken')
if m.get('protocol') is not None:
self.protocol = m.get('protocol')
if m.get('readTimeout') is not None:
self.read_timeout = m.get('readTimeout')
if m.get('connectTimeout') is not None:
self.connect_timeout = m.get('connectTimeout')
if m.get('httpProxy') is not None:
self.http_proxy = m.get('httpProxy')
if m.get('httpsProxy') is not None:
self.https_proxy = m.get('httpsProxy')
if m.get('endpoint') is not None:
self.endpoint = m.get('endpoint')
if m.get('noProxy') is not None:
self.no_proxy = m.get('noProxy')
if m.get('maxIdleConns') is not None:
self.max_idle_conns = m.get('maxIdleConns')
if m.get('userAgent') is not None:
self.user_agent = m.get('userAgent')
if m.get('socks5Proxy') is not None:
self.socks_5proxy = m.get('socks5Proxy')
if m.get('socks5NetWork') is not None:
self.socks_5net_work = m.get('socks5NetWork')
if m.get('maxIdleTimeMillis') is not None:
self.max_idle_time_millis = m.get('maxIdleTimeMillis')
if m.get('keepAliveDurationMillis') is not None:
self.keep_alive_duration_millis = m.get('keepAliveDurationMillis')
if m.get('maxRequests') is not None:
self.max_requests = m.get('maxRequests')
if m.get('maxRequestsPerHost') is not None:
self.max_requests_per_host = m.get('maxRequestsPerHost')
return self
class DemoClass(TeaModel):
def __init__(
self,
some_string: str = None,
some_date: str = None,
some_boolean: bool = None,
some_int: int = None,
some_list: List[str] = None,
):
# 字符串测试
self.some_string = some_string
# 日期测试
self.some_date = some_date
# Boolean测试
self.some_boolean = some_boolean
# 整数测试
self.some_int = some_int
# 列表测试
self.some_list = some_list
def validate(self):
self.validate_required(self.some_string, 'some_string')
self.validate_required(self.some_date, 'some_date')
if self.some_date is not None:
self.validate_pattern(self.some_date, 'some_date', '\\d{4}[-]\\d{1,2}[-]\\d{1,2}[T]\\d{2}:\\d{2}:\\d{2}([Z]|([\\.]\\d{1,9})?[\\+]\\d{2}[\\:]?\\d{2})')
self.validate_required(self.some_boolean, 'some_boolean')
self.validate_required(self.some_int, 'some_int')
if self.some_int is not None:
self.validate_maximum(self.some_int, 'some_int', 2000)
self.validate_minimum(self.some_int, 'some_int', 1)
self.validate_required(self.some_list, 'some_list')
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.some_string is not None:
result['some_string'] = self.some_string
if self.some_date is not None:
result['some_date'] = self.some_date
if self.some_boolean is not None:
result['some_boolean'] = self.some_boolean
if self.some_int is not None:
result['some_int'] = self.some_int
if self.some_list is not None:
result['some_list'] = self.some_list
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('some_string') is not None:
self.some_string = m.get('some_string')
if m.get('some_date') is not None:
self.some_date = m.get('some_date')
if m.get('some_boolean') is not None:
self.some_boolean = m.get('some_boolean')
if m.get('some_int') is not None:
self.some_int = m.get('some_int')
if m.get('some_list') is not None:
self.some_list = m.get('some_list')
return self
class NameValuePair(TeaModel):
def __init__(
self,
name: str = None,
value: str = None,
):
# 键名
self.name = name
# 键值
self.value = value
def validate(self):
self.validate_required(self.name, 'name')
self.validate_required(self.value, 'value')
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.name is not None:
result['name'] = self.name
if self.value is not None:
result['value'] = self.value
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('name') is not None:
self.name = m.get('name')
if m.get('value') is not None:
self.value = m.get('value')
return self
class TestStruct(TeaModel):
def __init__(
self,
x: str = None,
y: DemoClass = None,
z: List[DemoClass] = None,
):
# x
self.x = x
# y
self.y = y
# z
self.z = z
def validate(self):
self.validate_required(self.x, 'x')
self.validate_required(self.y, 'y')
if self.y:
self.y.validate()
self.validate_required(self.z, 'z')
if self.z:
for k in self.z:
if k:
k.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.x is not None:
result['x'] = self.x
if self.y is not None:
result['y'] = self.y.to_map()
result['z'] = []
if self.z is not None:
for k in self.z:
result['z'].append(k.to_map() if k else None)
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('x') is not None:
self.x = m.get('x')
if m.get('y') is not None:
temp_model = DemoClass()
self.y = temp_model.from_map(m['y'])
self.z = []
if m.get('z') is not None:
for k in m.get('z'):
temp_model = DemoClass()
self.z.append(temp_model.from_map(k))
return self
class QueryMap(TeaModel):
def __init__(
self,
name: str = None,
value: List[NameValuePair] = None,
):
# 键值
self.name = name
# 额外用户信息
self.value = value
def validate(self):
self.validate_required(self.name, 'name')
if self.value:
for k in self.value:
if k:
k.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.name is not None:
result['name'] = self.name
result['value'] = []
if self.value is not None:
for k in self.value:
result['value'].append(k.to_map() if k else None)
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('name') is not None:
self.name = m.get('name')
self.value = []
if m.get('value') is not None:
for k in m.get('value'):
temp_model = NameValuePair()
self.value.append(temp_model.from_map(k))
return self
class XNameValuePair(TeaModel):
def __init__(
self,
name: str = None,
value: str = None,
):
# 键名
self.name = name
# 键值
self.value = value
def validate(self):
self.validate_required(self.name, 'name')
self.validate_required(self.value, 'value')
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.name is not None:
result['name'] = self.name
if self.value is not None:
result['value'] = self.value
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('name') is not None:
self.name = m.get('name')
if m.get('value') is not None:
self.value = m.get('value')
return self
class EchoDemoGatewayCheckRequest(TeaModel):
def __init__(
self,
auth_token: str = None,
product_instance_id: str = None,
input_demo: DemoClass = None,
input_string: str = None,
input_array: List[TestStruct] = None,
file_object: BinaryIO = None,
file_object_name: str = None,
file_id: str = None,
input_int: int = None,
file_name: str = None,
):
# OAuth模式下的授权token
self.auth_token = auth_token
self.product_instance_id = product_instance_id
# demo
self.input_demo = input_demo
# echo
self.input_string = input_string
# input_array
self.input_array = input_array
# file_id
# 待上传文件
self.file_object = file_object
# 待上传文件名
self.file_object_name = file_object_name
self.file_id = file_id
# 1
self.input_int = input_int
# 测试一下
self.file_name = file_name
def validate(self):
if self.input_demo:
self.input_demo.validate()
if self.input_string is not None:
self.validate_max_length(self.input_string, 'input_string', 20)
self.validate_required(self.input_array, 'input_array')
if self.input_array:
for k in self.input_array:
if k:
k.validate()
self.validate_required(self.file_id, 'file_id')
self.validate_required(self.input_int, 'input_int')
if self.input_int is not None:
self.validate_maximum(self.input_int, 'input_int', 40)
self.validate_minimum(self.input_int, 'input_int', 10)
self.validate_required(self.file_name, 'file_name')
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.auth_token is not None:
result['auth_token'] = self.auth_token
if self.product_instance_id is not None:
result['product_instance_id'] = self.product_instance_id
if self.input_demo is not None:
result['input_demo'] = self.input_demo.to_map()
if self.input_string is not None:
result['input_string'] = self.input_string
result['input_array'] = []
if self.input_array is not None:
for k in self.input_array:
result['input_array'].append(k.to_map() if k else None)
if self.file_object is not None:
result['fileObject'] = self.file_object
if self.file_object_name is not None:
result['fileObjectName'] = self.file_object_name
if self.file_id is not None:
result['file_id'] = self.file_id
if self.input_int is not None:
result['input_int'] = self.input_int
if self.file_name is not None:
result['file_name'] = self.file_name
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('auth_token') is not None:
self.auth_token = m.get('auth_token')
if m.get('product_instance_id') is not None:
self.product_instance_id = m.get('product_instance_id')
if m.get('input_demo') is not None:
temp_model = DemoClass()
self.input_demo = temp_model.from_map(m['input_demo'])
if m.get('input_string') is not None:
self.input_string = m.get('input_string')
self.input_array = []
if m.get('input_array') is not None:
for k in m.get('input_array'):
temp_model = TestStruct()
self.input_array.append(temp_model.from_map(k))
if m.get('fileObject') is not None:
self.file_object = m.get('fileObject')
if m.get('fileObjectName') is not None:
self.file_object_name = m.get('fileObjectName')
if m.get('file_id') is not None:
self.file_id = m.get('file_id')
if m.get('input_int') is not None:
self.input_int = m.get('input_int')
if m.get('file_name') is not None:
self.file_name = m.get('file_name')
return self
class EchoDemoGatewayCheckResponse(TeaModel):
def __init__(
self,
req_msg_id: str = None,
result_code: str = None,
result_msg: str = None,
output_demo: DemoClass = None,
output_string: str = None,
file_url: str = None,
):
# 请求唯一ID,用于链路跟踪和问题排查
self.req_msg_id = req_msg_id
# 结果码,一般OK表示调用成功
self.result_code = result_code
# 异常信息的文本描述
self.result_msg = result_msg
# output_demo
self.output_demo = output_demo
# output_string
self.output_string = output_string
# file_url
self.file_url = file_url
def validate(self):
if self.output_demo:
self.output_demo.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.req_msg_id is not None:
result['req_msg_id'] = self.req_msg_id
if self.result_code is not None:
result['result_code'] = self.result_code
if self.result_msg is not None:
result['result_msg'] = self.result_msg
if self.output_demo is not None:
result['output_demo'] = self.output_demo.to_map()
if self.output_string is not None:
result['output_string'] = self.output_string
if self.file_url is not None:
result['file_url'] = self.file_url
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('req_msg_id') is not None:
self.req_msg_id = m.get('req_msg_id')
if m.get('result_code') is not None:
self.result_code = m.get('result_code')
if m.get('result_msg') is not None:
self.result_msg = m.get('result_msg')
if m.get('output_demo') is not None:
temp_model = DemoClass()
self.output_demo = temp_model.from_map(m['output_demo'])
if m.get('output_string') is not None:
self.output_string = m.get('output_string')
if m.get('file_url') is not None:
self.file_url = m.get('file_url')
return self
class CreateAntcloudOpenapiApiserviceGroupRequest(TeaModel):
def __init__(
self,
auth_token: str = None,
provider_name: str = None,
suite_version: str = None,
description: str = None,
group_name: str = None,
api_group_type: str = None,
):
# OAuth模式下的授权token
self.auth_token = auth_token
# 所属产品
self.provider_name = provider_name
# 套件版本
self.suite_version = suite_version
# 分组描述
self.description = description
# 分组名称
self.group_name = group_name
# 分组类型
self.api_group_type = api_group_type
def validate(self):
self.validate_required(self.provider_name, 'provider_name')
self.validate_required(self.suite_version, 'suite_version')
self.validate_required(self.description, 'description')
self.validate_required(self.group_name, 'group_name')
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.auth_token is not None:
result['auth_token'] = self.auth_token
if self.provider_name is not None:
result['provider_name'] = self.provider_name
if self.suite_version is not None:
result['suite_version'] = self.suite_version
if self.description is not None:
result['description'] = self.description
if self.group_name is not None:
result['group_name'] = self.group_name
if self.api_group_type is not None:
result['api_group_type'] = self.api_group_type
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('auth_token') is not None:
self.auth_token = m.get('auth_token')
if m.get('provider_name') is not None:
self.provider_name = m.get('provider_name')
if m.get('suite_version') is not None:
self.suite_version = m.get('suite_version')
if m.get('description') is not None:
self.description = m.get('description')
if m.get('group_name') is not None:
self.group_name = m.get('group_name')
if m.get('api_group_type') is not None:
self.api_group_type = m.get('api_group_type')
return self
class CreateAntcloudOpenapiApiserviceGroupResponse(TeaModel):
def __init__(
self,
req_msg_id: str = None,
result_code: str = None,
result_msg: str = None,
):
# 请求唯一ID,用于链路跟踪和问题排查
self.req_msg_id = req_msg_id
# 结果码,一般OK表示调用成功
self.result_code = result_code
# 异常信息的文本描述
self.result_msg = result_msg
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.req_msg_id is not None:
result['req_msg_id'] = self.req_msg_id
if self.result_code is not None:
result['result_code'] = self.result_code
if self.result_msg is not None:
result['result_msg'] = self.result_msg
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('req_msg_id') is not None:
self.req_msg_id = m.get('req_msg_id')
if m.get('result_code') is not None:
self.result_code = m.get('result_code')
if m.get('result_msg') is not None:
self.result_msg = m.get('result_msg')
return self
class InitDemoBbpInsuranceUserRequest(TeaModel):
def __init__(
self,
auth_token: str = None,
product_instance_id: str = None,
business_code: str = None,
third_part_id: str = None,
channel: str = None,
burieds: QueryMap = None,
):
# OAuth模式下的授权token
self.auth_token = auth_token
self.product_instance_id = product_instance_id
# 保司编码
self.business_code = business_code
# 第三方id,此处为天猫uid
self.third_part_id = third_part_id
# 来源渠道
self.channel = channel
# 埋点信息
self.burieds = burieds
def validate(self):
self.validate_required(self.business_code, 'business_code')
self.validate_required(self.third_part_id, 'third_part_id')
self.validate_required(self.channel, 'channel')
if self.burieds:
self.burieds.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.auth_token is not None:
result['auth_token'] = self.auth_token
if self.product_instance_id is not None:
result['product_instance_id'] = self.product_instance_id
if self.business_code is not None:
result['business_code'] = self.business_code
if self.third_part_id is not None:
result['third_part_id'] = self.third_part_id
if self.channel is not None:
result['channel'] = self.channel
if self.burieds is not None:
result['burieds'] = self.burieds.to_map()
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('auth_token') is not None:
self.auth_token = m.get('auth_token')
if m.get('product_instance_id') is not None:
self.product_instance_id = m.get('product_instance_id')
if m.get('business_code') is not None:
self.business_code = m.get('business_code')
if m.get('third_part_id') is not None:
self.third_part_id = m.get('third_part_id')
if m.get('channel') is not None:
self.channel = m.get('channel')
if m.get('burieds') is not None:
temp_model = QueryMap()
self.burieds = temp_model.from_map(m['burieds'])
return self
class InitDemoBbpInsuranceUserResponse(TeaModel):
def __init__(
self,
req_msg_id: str = None,
result_code: str = None,
result_msg: str = None,
):
# 请求唯一ID,用于链路跟踪和问题排查
self.req_msg_id = req_msg_id
# 结果码,一般OK表示调用成功
self.result_code = result_code
# 异常信息的文本描述
self.result_msg = result_msg
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.req_msg_id is not None:
result['req_msg_id'] = self.req_msg_id
if self.result_code is not None:
result['result_code'] = self.result_code
if self.result_msg is not None:
result['result_msg'] = self.result_msg
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('req_msg_id') is not None:
self.req_msg_id = m.get('req_msg_id')
if m.get('result_code') is not None:
self.result_code = m.get('result_code')
if m.get('result_msg') is not None:
self.result_msg = m.get('result_msg')
return self
class CreateAntcloudGatewayxFileUploadRequest(TeaModel):
def __init__(
self,
auth_token: str = None,
api_code: str = None,
file_label: str = None,
file_metadata: str = None,
file_name: str = None,
mime_type: str = None,
api_cluster: str = None,
):
# OAuth模式下的授权token
self.auth_token = auth_token
# 上传文件作用的openapi method
self.api_code = api_code
# 文件标签,多个标签;分割
self.file_label = file_label
# 自定义的文件元数据
self.file_metadata = file_metadata
# 文件名,不传则随机生成文件名
self.file_name = file_name
# 文件的多媒体类型
self.mime_type = mime_type
# 产品方的api归属集群,即productInstanceId
self.api_cluster = api_cluster
def validate(self):
self.validate_required(self.api_code, 'api_code')
if self.file_label is not None:
self.validate_max_length(self.file_label, 'file_label', 100)
if self.file_metadata is not None:
self.validate_max_length(self.file_metadata, 'file_metadata', 1000)
if self.file_name is not None:
self.validate_max_length(self.file_name, 'file_name', 100)
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.auth_token is not None:
result['auth_token'] = self.auth_token
if self.api_code is not None:
result['api_code'] = self.api_code
if self.file_label is not None:
result['file_label'] = self.file_label
if self.file_metadata is not None:
result['file_metadata'] = self.file_metadata
if self.file_name is not None:
result['file_name'] = self.file_name
if self.mime_type is not None:
result['mime_type'] = self.mime_type
if self.api_cluster is not None:
result['api_cluster'] = self.api_cluster
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('auth_token') is not None:
self.auth_token = m.get('auth_token')
if m.get('api_code') is not None:
self.api_code = m.get('api_code')
if m.get('file_label') is not None:
self.file_label = m.get('file_label')
if m.get('file_metadata') is not None:
self.file_metadata = m.get('file_metadata')
if m.get('file_name') is not None:
self.file_name = m.get('file_name')
if m.get('mime_type') is not None:
self.mime_type = m.get('mime_type')
if m.get('api_cluster') is not None:
self.api_cluster = m.get('api_cluster')
return self
class CreateAntcloudGatewayxFileUploadResponse(TeaModel):
def __init__(
self,
req_msg_id: str = None,
result_code: str = None,
result_msg: str = None,
expired_time: str = None,
file_id: str = None,
upload_headers: List[XNameValuePair] = None,
upload_url: str = None,
):
# 请求唯一ID,用于链路跟踪和问题排查
self.req_msg_id = req_msg_id
# 结果码,一般OK表示调用成功
self.result_code = result_code
# 异常信息的文本描述
self.result_msg = result_msg
# 上传有效期
self.expired_time = expired_time
# 32位文件唯一id
self.file_id = file_id
# 放入http请求头里
self.upload_headers = upload_headers
# 文件上传地址
self.upload_url = upload_url
def validate(self):
if self.expired_time is not None:
self.validate_pattern(self.expired_time, 'expired_time', '\\d{4}[-]\\d{1,2}[-]\\d{1,2}[T]\\d{2}:\\d{2}:\\d{2}([Z]|([\\.]\\d{1,9})?[\\+]\\d{2}[\\:]?\\d{2})')
if self.upload_headers:
for k in self.upload_headers:
if k:
k.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.req_msg_id is not None:
result['req_msg_id'] = self.req_msg_id
if self.result_code is not None:
result['result_code'] = self.result_code
if self.result_msg is not None:
result['result_msg'] = self.result_msg
if self.expired_time is not None:
result['expired_time'] = self.expired_time
if self.file_id is not None:
result['file_id'] = self.file_id
result['upload_headers'] = []
if self.upload_headers is not None:
for k in self.upload_headers:
result['upload_headers'].append(k.to_map() if k else None)
if self.upload_url is not None:
result['upload_url'] = self.upload_url
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('req_msg_id') is not None:
self.req_msg_id = m.get('req_msg_id')
if m.get('result_code') is not None:
self.result_code = m.get('result_code')
if m.get('result_msg') is not None:
self.result_msg = m.get('result_msg')
if m.get('expired_time') is not None:
self.expired_time = m.get('expired_time')
if m.get('file_id') is not None:
self.file_id = m.get('file_id')
self.upload_headers = []
if m.get('upload_headers') is not None:
for k in m.get('upload_headers'):
temp_model = XNameValuePair()
self.upload_headers.append(temp_model.from_map(k))
if m.get('upload_url') is not None:
self.upload_url = m.get('upload_url')
return self
|
[
"sdk-team@alibabacloud.com"
] |
sdk-team@alibabacloud.com
|
d0fffadb2e1f3cb94ea162f5007996610d2c1570
|
8f213337c875f3c344b2a9f7e5b5a39622b86f5b
|
/auxiliary_functions.py
|
28b5759dd042fbf6661a64027f441d78c215f431
|
[] |
no_license
|
jgoncalocouto/Transient_energy_equation
|
abd05806cb05b4f44a3a70be314c975f1523a4bd
|
d9f96ebebd3b06c1503a72d6d43356eb11c59371
|
refs/heads/master
| 2022-12-23T04:51:05.675989
| 2020-09-28T16:31:35
| 2020-09-28T16:31:35
| 299,368,829
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,828
|
py
|
from fluid_properties import *
def external_natural_convection(T_in,T_amb,material,L_characteristic,A_ht):
Q_conv=0
material=material.lower()
g = 9.80665 # [m/s^2]
T_film = (T_in+T_amb) * 0.5
beta = 1 / (273.15 + T_film)
vu = thermal_properties('vu', material, T_film) # [m^2/(s)]
k = thermal_properties('k', material, T_film) # [W/(m*ºK)]
Pr = thermal_properties('Pr', 'air', T_film)
Gr = (g * beta * (L_characteristic ** 3) * (T_in - T_amb)) / (vu ** 2)
ht={}
ht['Gr'] = Gr
ht['Pr']=Pr
ht = nusselt_external_free(ht)
Nu= ht['Nu']
h_ext = (Nu * k) / (L_characteristic)
Q_conv = h_ext * (A_ht) * (T_amb-T_in)
return Q_conv
def external_forced_convection(T_in,T_amb,material,V_wind=0.5,L=0.4,W=0.7,H=2):
Q_conv=0
external={}
geometry={}
geometry['H']=H; geometry['W']=W; geometry['L']=L;
external['v_Wind'] = V_wind
external['T_film']=(T_in + T_amb) * 0.5
external['mu'] = thermal_properties('mu', material, external['T_film']) # [m^2/(s)]
external['rho'] = thermal_properties('rho', material, external['T_film']) # [kg/(m^3)]
external['k'] = thermal_properties('k', material, external['T_film']) # [W/m*°K]
external['Pr'] = thermal_properties('Pr', material, external['T_film'])
# Front Face
external_facefront = external.copy()
external_facefront['L_characteristic'] = geometry['H']
external_facefront['Re'] = (external_facefront['rho'] * external_facefront['v_Wind'] * external_facefront[
'L_characteristic']) / external_facefront['mu']
external_facefront = nusselt_external_frontplate(external_facefront)
external_facefront['h'] = (external_facefront['Nu'] * external_facefront['k']) / (
external_facefront['L_characteristic'])
external_facefront['A_sup'] = geometry['H'] * geometry['L']
# Laterals
external_laterals = external.copy()
external_laterals['L_characteristic'] = geometry['W']
external_laterals['Re'] = (external_laterals['rho'] * external_laterals['v_Wind'] * external_laterals[
'L_characteristic']) / external_laterals['mu']
external_laterals = nusselt_external_flatplate(external_laterals)
external_laterals['h'] = (external_laterals['Nu'] * external_laterals['k']) / (
external_laterals['L_characteristic'])
external_laterals['A_sup'] = 2 * geometry['H'] * geometry['W']
# Top Face
external_top = external_laterals.copy()
external_top['A_sup'] = geometry['L'] * geometry['W']
# Back Face
external_faceback = external.copy()
external_faceback['L_characteristic'] = geometry['H']
external_faceback['Re'] = (external_faceback['rho'] * external_faceback['v_Wind'] * external_faceback[
'L_characteristic']) / external_faceback['mu']
external_faceback = nusselt_external_backplate(external_faceback)
external_faceback['h'] = (external_faceback['Nu'] * external_faceback['k']) / (
external_faceback['L_characteristic'])
external_faceback['A_sup'] = geometry['H'] * geometry['L']
external['A_sup'] = (
external_facefront['A_sup'] + external_laterals['A_sup'] + external_top['A_sup'] + external_faceback[
'A_sup'])
external['h_ext'] = ((external_facefront['h'] * external_facefront['A_sup']) + (
external_laterals['h'] * external_laterals['A_sup']) + (external_top['h'] * external_top['A_sup']) + (
external_faceback['h'] * external_faceback['A_sup'])) / (external['A_sup'])
external['Q_conv'] = external['h_ext'] * (external['A_sup']) * (T_amb-T_in)
Q_conv=external['Q_conv']
return Q_conv
def heating_resistance(T,Q,T_on=0,T_off=10):
if T<=T_on:
Q_heat=Q
elif T>=T_off:
Q_heat=0
else:
Q_heat=0
return Q_heat
|
[
"jgoncalocouto@gmail.com"
] |
jgoncalocouto@gmail.com
|
ffe5c6c7f4b092f6b047a7ef7fa5b5df516006c2
|
5889393a7b591c7848333cd729412fe03f991c9c
|
/main/migrations/0004_auto_20210326_1259.py
|
e3cec324b99c5a5cfd4ee47e969a90e3569d6086
|
[] |
no_license
|
nishit-popat/Codemania
|
553928f8ecca11181c7a1f0b93876e58ca712f8b
|
ea204de8dd4511db6d028ef15fe20f40a07d2580
|
refs/heads/master
| 2023-06-03T13:06:16.704507
| 2021-06-19T06:00:54
| 2021-06-19T06:00:54
| 351,455,916
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 558
|
py
|
# Generated by Django 3.1.5 on 2021-03-26 07:29
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main', '0003_auto_20210326_1256'),
]
operations = [
migrations.AlterField(
model_name='contest',
name='contest_name',
field=models.CharField(max_length=200),
),
migrations.AlterField(
model_name='problem',
name='problem_definition',
field=models.TextField(max_length=700),
),
]
|
[
"nisitpopat9999@gmail.com"
] |
nisitpopat9999@gmail.com
|
90af23c2bff81ddafc6558e33d142447fc8311dd
|
8b8e41ad1b1f457c32c8ffcf446da5f60c72588d
|
/nokia_app/test_source_code.py
|
1d9572fee29c598d498d08dd185d9550d3f24777
|
[] |
no_license
|
NestorBracho/nokia-test
|
1b90e1acf307a9dd835b51254120efc7e0d12192
|
969509af41212e5fbd9862216af8d77bf72fd615
|
refs/heads/master
| 2023-05-30T04:48:22.118718
| 2021-06-13T22:18:23
| 2021-06-13T22:18:23
| 376,617,861
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,925
|
py
|
"""
Working from left-to-right if no digit is exceeded by the digit to its left it is called an increasing number;
for example, 134468.
Similarly if no digit is exceeded by the digit to its right it is called a decreasing number; for example, 66420.
We shall call a positive integer that is neither increasing nor decreasing a "bouncy" number; for example, 155349.
Clearly there cannot be any bouncy numbers below one-hundred, but just over half of the numbers below one-thousand
(525) are bouncy. In fact, the least number for which the proportion of bouncy numbers first reaches 50% is 538.
Surprisingly, bouncy numbers become more and more common and by the time we reach 21780 the proportion of bouncy
numbers is equal to 90%.
Find the least number for which the proportion of bouncy numbers is exactly 99%.
"""
def nokia_test(percentage: float = 90):
initial_number = 100
bounce = 0 # Total bouncy numbers
final_percentage = percentage / 100
while True:
aux_i = str(initial_number)
aux_list = [int(j) for j in aux_i]
is_bounce = False
aux_bool = None
for k in range(0, len(aux_list) - 1):
if aux_list[k] > aux_list[k + 1]:
if aux_bool is None:
aux_bool = True
elif aux_bool is False:
is_bounce = True
break
elif aux_list[k] < aux_list[k + 1]:
if aux_bool is None:
aux_bool = False
elif aux_bool is True:
is_bounce = True
break
if is_bounce:
bounce += 1
if float(bounce) / float(initial_number) >= final_percentage:
break
initial_number += 1
return {
'final_number': initial_number,
'bounce_amount': bounce,
'percentage': float(bounce) * 100 / float(initial_number)
}
|
[
"nestorbrachi2207@gmail.com"
] |
nestorbrachi2207@gmail.com
|
939922b8fd157040e6378dbe5e8c5dd212043968
|
20bb3c70e753417009448de2741e8ed94a1aa78c
|
/sim/experiments/taas/experiment3.py
|
29d33e82dac5b0838eef5a6b5f5b3fcce9e08434
|
[] |
no_license
|
ravi-rajyaguru15/simulatorMA
|
a4ef33f3312b3b01c3fb0b3ba36e512a93eff27c
|
34f326e47c265d3b0f12eebe756a9790b80e5b11
|
refs/heads/master
| 2023-03-28T11:08:44.478375
| 2020-12-30T15:59:34
| 2020-12-30T15:59:34
| 353,339,113
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,089
|
py
|
import multiprocessing
import sys
import traceback
from sim import debug, counters, plotting
from sim.experiments.experiment import executeMulti
from sim.experiments.scenario import REGULAR_SCENARIO_ROUND_ROBIN
from sim.learning.agent.lazyTableAgent import lazyTableAgent
from sim.learning.agent.minimalTableAgent import minimalTableAgent
from sim.learning.agent.dqnAgent import dqnAgent
from sim.learning.agent.randomAgent import randomAgent
from sim.learning.state.minimalSystemState import minimalSystemState
from sim.simulations import localConstants, constants
from sim.simulations.SimpleSimulation import SimpleSimulation
from sim.experiments.experiment import executeMulti, setupMultithreading
from sim.tasks.tasks import HARD
maxjobs = 5
numEnergyStates = 3
def runThread(agent, numEpisodes, results, finished):
exp = SimpleSimulation(numDevices=2, maxJobs=maxjobs, agentClass=agent, tasks=[HARD], systemStateClass=minimalSystemState, scenarioTemplate=REGULAR_SCENARIO_ROUND_ROBIN, centralisedLearning=True, numEnergyLevels=numEnergyStates, trainClassification=True)
# exp.scenario.setInterval(1)
exp.sharedAgent.loadModel()
exp.sharedAgent.setProductionMode()
exp.setBatterySize(1e-1)
exp.setFpgaIdleSleep(1e-3)
e = None
try:
for e in range(numEpisodes):
debug.infoEnabled = False
exp.simulateEpisode(e)
agentName = exp.devices[0].agent.__name__
result = [f"{agentName}", e, exp.numFinishedJobs]
print(result)
results.put(result)
# results.put([f"{agentName}", e, exp.getCurrentTime()])
except:
debug.printCache()
traceback.print_exc(file=sys.stdout)
print(agent, e)
print("Error in experiment :", exp.time)
sys.exit(0)
finished.put(True)
def run(numEpisodes):
print("starting experiment")
processes = list()
results = multiprocessing.Queue()
finished = multiprocessing.Queue()
localConstants.REPEATS = 4
# localConstants.REPEATS = 8
numEpisodes = int(numEpisodes)
# agentsToTest = [minimalTableAgent]
agentsToTest = [dqnAgent, minimalTableAgent] # minimalTableAgent, , localAgent]
for agent in agentsToTest: # [minimalAgent, lazyAgent]:
for _ in range(localConstants.REPEATS):
for centralised in [True]:
if not (not centralised and agent is randomAgent):
processes.append(multiprocessing.Process(target=runThread, args=(agent, numEpisodes, results, finished)))
results = executeMulti(processes, results, finished, numResults=len(processes) * numEpisodes)
# plotting.plotMultiWithErrors("experiment1", title="experiment 1", results=results, ylabel="", xlabel="Episode #") # , save=True)
plotting.plotMultiWithErrors("experiment3", title="experiment 3", results=results, ylabel="Job #", xlabel="Episode #") # , save=True)
if __name__ == "__main__":
setupMultithreading()
try:
run(1e1)
except:
traceback.print_exc(file=sys.stdout)
print("ERROR")
|
[
"alwyn.burger@uni-due.de"
] |
alwyn.burger@uni-due.de
|
1691bfa1eae47e55c1184a3c08098c2b274228ab
|
0233464a567919d7f32ff0d4ccf0fad3787d5b0d
|
/superlists/settings.py
|
8f84d3f8b2cfe161c9c2bd1866f7ae78789d7cef
|
[] |
no_license
|
slaneslane/superlists
|
939637282c0dbde7b68f9f3bfae3cdf3d369ab1c
|
3725daeb8aa4f4a78f7cf4e821dae51bac4e3c1b
|
refs/heads/master
| 2018-10-10T17:31:11.400215
| 2018-06-25T07:05:31
| 2018-06-25T07:05:31
| 110,593,740
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,984
|
py
|
"""
Django settings for superlists project.
Generated by 'django-admin startproject' using Django 1.10.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
if 'DJANGO_DEBUG_FALSE' in os.environ:
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.environ['DJANGO_SECRET_KEY']
ALLOWED_HOSTS = [os.environ['SITENAME']]
else:
DEBUG = True
SECRET_KEY = 'insecure-key-for-dev'
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
#'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'lists',
'accounts',
'functional_tests',
]
AUTH_USER_MODEL = 'accounts.User'
AUTHENTICATION_BACKENDS = [
'accounts.authentication.PasswordlessAuthenticationBackend',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'superlists.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'superlists.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
},
},
'loggers': {
'django': {
'handlers': ['console'],
},
},
'root': {'level': 'INFO'},
}
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_HOST_USER = 'twojelisty.serwis@gmail.com'
EMAIL_HOST_PASSWORD = os.environ.get('EMAIL_PASSWORD')
EMAIL_PORT = 587
EMAIL_USE_TLS = True
|
[
"slane@wp.pl"
] |
slane@wp.pl
|
ed0342f84e421f349bc8f9087c1e3b93ecf96cf2
|
96a1e8c1ca1bfc463ae2d6e6d9af854cd8764eb0
|
/ch02/heldout.py
|
f4f61e94865c0877554889962864e3c6e5d63920
|
[
"MIT"
] |
permissive
|
rob-nn/BuildingMachineLearningSystemsWithPython
|
a53b8eb85684ce84896a8c5dd33ea0275b4b5191
|
cf02b13a569126a46cf75200e85be3ecb410fc64
|
refs/heads/master
| 2021-01-21T01:17:23.009494
| 2019-10-09T22:25:46
| 2019-10-09T22:25:46
| 22,963,266
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,286
|
py
|
# This code is supporting material for the book
# Building Machine Learning Systems with Python
# by Willi Richert and Luis Pedro Coelho
# published by PACKT Publishing
#
# It is made available under the MIT License
# This script demonstrates the difference between the training accuracy and
# testing (held-out) accuracy.
from matplotlib import pyplot as plt
import numpy as np
from sklearn.datasets import load_iris
from threshold import fit_model, accuracy
data = load_iris()
features = data['data']
labels = data['target_names'][data['target']]
# We are going to remove the setosa examples as they are too easy:
setosa = (labels == 'setosa')
features = features[~setosa]
labels = labels[~setosa]
# Now we classify virginica vs non-virginica
virginica = (labels == 'virginica')
# Split the data in two: testing and training
testing = np.tile([True, False], 50) # testing = [True,False,True,False,True,False...]
training = ~testing
model = fit_model(features[training], virginica[training])
train_accuracy = accuracy(features[training], virginica[training], model)
test_accuracy = accuracy(features[testing], virginica[testing], model)
print('''\
Training accuracy was {0:.1%}.
Testing accuracy was {1:.1%} (N = {2}).
'''.format(train_accuracy, test_accuracy, testing.sum()))
|
[
"luis@luispedro.org"
] |
luis@luispedro.org
|
12ae02ddbb89a411f91a060815ce6dc4d0fb35e8
|
5a5eb1f043c529cc5d451c4da8475dabb048494c
|
/catkin_ws/build/hector_slam/hector_imu_tools/catkin_generated/pkg.installspace.context.pc.py
|
b46493ecdf54c1a5fb18a6333443a538110ad4da
|
[] |
no_license
|
XiaoYunhan/Vincent
|
d865e1af8fe198d4ee2e79109fc64acbc4576883
|
4894cb966862029816a522adc89b9e03c820de90
|
refs/heads/master
| 2020-07-03T04:30:52.862219
| 2019-08-11T15:40:26
| 2019-08-11T15:40:26
| 201,783,414
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 390
|
py
|
# generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "hector_imu_tools"
PROJECT_SPACE_DIR = "/home/user/EPP_slamBot/catkin_ws/install"
PROJECT_VERSION = "0.3.5"
|
[
"2028427786@qq.com"
] |
2028427786@qq.com
|
52b8eeb49579e8f4b716a86d3f2530a419da0a7f
|
7056aa7d801b19e705f733db6a2113a39f27e234
|
/python/app.py
|
e583679b973d1779ca9000c20db0db0984d96ebc
|
[] |
no_license
|
markszechely/content-gitops
|
997a9f4b20b9e37bfa1fa25213d368ccf7c13889
|
f293647a6d1786b0120ed947ce3dad8c4bafc42e
|
refs/heads/master
| 2023-06-19T01:21:47.014984
| 2021-07-12T12:55:20
| 2021-07-12T12:55:20
| 385,198,135
| 0
| 0
| null | 2021-07-12T12:25:57
| 2021-07-12T09:45:30
|
Python
|
UTF-8
|
Python
| false
| false
| 177
|
py
|
from flask import Flask
app = Flask(__name__)
@app.route("/")
def hello():
return "Hello LA students..."
if __name__ == "__main__":
app.run(host='0.0.0.0', port=8000)
|
[
"noreply@github.com"
] |
markszechely.noreply@github.com
|
ed1f5e3ec2f758d15da7f792256bafb01568a1ff
|
82e2e318ae480e59a129ba31d5a79a3475a8fe73
|
/cube_auto_rotate.py
|
b3a8054e13e1ce2212c2ab38389dae39df499f25
|
[] |
no_license
|
gluttony47/python-practice
|
bdcfc4981031b4e39bf680666b5040d27e770f33
|
6abe87075f7853bf1c088668b35b7ad384fc3cb3
|
refs/heads/master
| 2020-05-05T05:37:14.126224
| 2019-04-05T23:28:33
| 2019-04-05T23:28:33
| 179,759,384
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,099
|
py
|
## importing modules
import pygame
from pygame.locals import *
from OpenGL.GL import *
from OpenGL.GLU import *
vertices = (
(1, -1, -1),
(1, 1, -1),
(-1, 1, -1),
(-1, -1, -1),
(0, 0, 1)
)
## edge vertices description
edges = (
(0, 1),
(0, 3),
(0, 4),
(1, 4),
(1, 2),
(2, 4),
(2, 3),
(3, 4)
)
def Pyramid():
glLineWidth(5)
glBegin(GL_LINES)
for edge in edges:
for vertex in edge:
glVertex3fv(vertices[vertex])
glColor3f(1, 0, 0)
glEnd()
def main():
pygame.init()
display = (800, 800)
pygame.display.set_mode(display, DOUBLEBUF | OPENGL)
gluPerspective(45, (display[0] / display[1]), 0.1, 50)
glTranslatef(0, 0, -5)
clock = pygame.time.Clock()
while True:
clock.tick(60)
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
quit()
glRotatef(2, 1, 1, 3)
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
Pyramid()
pygame.display.flip()
main()
|
[
"philmist@gmail.com"
] |
philmist@gmail.com
|
9806a2889ffd8af1d35bdfd67621b06f203d1dd9
|
c53b3e120c59557daaa2fa5b7626413105eb5965
|
/tendenci/apps/resumes/migrations/0002_resume_association_id.py
|
9d8c58df4c2fd8fbc37a301dc162aced7be751e2
|
[] |
no_license
|
chendong0444/ams
|
8483334d9b687708d533190b62c1fa4fd4690f2c
|
f2ac4ecc076b223c262f2cde4fa3b35b4a5cd54e
|
refs/heads/master
| 2021-05-01T03:59:18.682836
| 2018-07-23T06:33:41
| 2018-07-23T06:33:41
| 121,194,728
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 396
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('resumes', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='resume',
name='association_id',
field=models.IntegerField(default=0),
),
]
|
[
"chendong0444@gmail.com"
] |
chendong0444@gmail.com
|
693ee3203451ac350387ab5a02f912dc67b5bbed
|
b998c7550393b9aa469cde84bad421d9cbe87c45
|
/proximity_widget.py
|
43807e7ac321926c3e416e5a3a2e82398eef8e0a
|
[
"Apache-2.0"
] |
permissive
|
negasora/proximity_graph
|
84b4d306956812f0b176d9060756347314dc8ef0
|
5df1201a6c11559cf07d2306a1bb0626f664b088
|
refs/heads/master
| 2022-11-26T13:51:15.235499
| 2020-08-07T23:39:58
| 2020-08-07T23:39:58
| 183,114,509
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,800
|
py
|
from collections import defaultdict
from binaryninjaui import FlowGraphWidget, ViewType
from binaryninja import Function
from binaryninja.flowgraph import FlowGraph, FlowGraphNode
from binaryninja.function import InstructionTextToken, DisassemblyTextLine, Function
from binaryninja.enums import InstructionTextTokenType, BranchType
from .proximity_graph import ProximityGraph
#TODO: add ability to find paths
#TODO: highlight nodes without children in red? (or add dummy "..." node to those that do)
class ProximityWidget(FlowGraphWidget):
def __init__(self, parent, data):
self.bv = data
self.edges = defaultdict(set)
self.graph = ProximityGraph(self, self.edges)
super(ProximityWidget, self).__init__(parent, self.bv, self.graph)
self.populate_initial()
def update_graph(self):
self.graph = ProximityGraph(self, self.edges)
self.updateToGraph(self.graph)
def populate_initial(self):
self.add_proximity_layer(self.bv.entry_function)
def navigateToFunction(self, func, addr):
self.add_proximity_layer(func)
return True
def navigate(self, addr):
func = self.bv.get_function_at(addr)
if func is None:
return False
self.navigateToFunction(func, addr)
return True
def get_children_nodes(self, func):
refs = self.bv.get_code_refs_from(func.lowest_address, func=func, length=func.highest_address-func.lowest_address)
refs = set(refs) #TODO: fix api and remove this workaround
return refs
def get_parent_nodes(self, func):
refs = self.bv.get_code_refs(func.start)
return refs
def add_proximity_layer(self, func):
parents = self.get_parent_nodes(func)
for r in parents:
self.edges[r.function].add(func)
children = self.get_children_nodes(func)
for r in children:
for f in self.bv.get_functions_containing(r):
self.edges[func].add(f)
#TODO: if a layer was expanded and a new node was added, check if it has a child that's in current graph. If it is, add an edge, else add '...' if it has other children
#TODO: iterate over IL and add nodes and edge if there is a reference to a function
#TODO: data vars
self.update_graph()
class ProximityViewType(ViewType):
def __init__(self):
super(ProximityViewType, self).__init__("Proximity View", "Proximity")
def getPriority(self, data, filename):
if data.executable:
# Use low priority so that this view is not picked by default
return 1
return 0
def create(self, data, view_frame):
return ProximityWidget(view_frame, data)
ViewType.registerViewType(ProximityViewType())
|
[
"negasora@negasora.com"
] |
negasora@negasora.com
|
a01a76518c57c2c50243b1d886052d3150c71b0e
|
3da6b8a0c049a403374e787149d9523012a1f0fc
|
/Coder_Old/爬虫数据/news/news/settings.py
|
8bb3db23bbe57c4bb90ee2d5415c41a313879b56
|
[] |
no_license
|
AndersonHJB/PyCharm_Coder
|
d65250d943e84b523f022f65ef74b13e7c5bc348
|
32f2866f68cc3a391795247d6aba69a7156e6196
|
refs/heads/master
| 2022-07-25T11:43:58.057376
| 2021-08-03T02:50:01
| 2021-08-03T02:50:01
| 348,922,058
| 3
| 3
| null | 2021-09-05T02:20:10
| 2021-03-18T02:57:16
|
Python
|
UTF-8
|
Python
| false
| false
| 3,046
|
py
|
# -*- coding: utf-8 -*-
# Scrapy settings for news project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# https://doc.scrapy.org/en/latest/topics/settings.html
# https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
# https://doc.scrapy.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'news'
SPIDER_MODULES = ['news.spiders']
NEWSPIDER_MODULE = 'news.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'news (+http://www.yourdomain.com)'
# Obey robots.txt rules
ROBOTSTXT_OBEY = True
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See https://doc.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
#COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#}
# Enable or disable spider middlewares
# See https://doc.scrapy.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'news.middlewares.NewsSpiderMiddleware': 543,
#}
# Enable or disable downloader middlewares
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
# 'news.middlewares.NewsDownloaderMiddleware': 543,
#}
# Enable or disable extensions
# See https://doc.scrapy.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See https://doc.scrapy.org/en/latest/topics/item-pipeline.html
ITEM_PIPELINES = {
'news.pipelines.NewsPipeline': 300,
}
# Enable and configure the AutoThrottle extension (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
|
[
"1432803776@qq.com"
] |
1432803776@qq.com
|
f26826fa4b56b1fe1686840c62f5adcc71d9d161
|
c793a449c4a2aa227ce25a8e3e72146f98e26dc4
|
/python/made_2020/d.py
|
cb082008bb1632722ed3eed8e436d99d6141a584
|
[
"MIT"
] |
permissive
|
holyketzer/ctci_v6
|
65ae4b000efc35222272cc7843ed7dd138d4827a
|
8877093c3f6744887a4c04ffcdee44d3592170c8
|
refs/heads/master
| 2021-07-22T18:30:30.219531
| 2020-08-20T07:14:31
| 2020-08-20T07:14:31
| 204,212,301
| 3
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,749
|
py
|
import sys
def is_open_tag(tag):
return len(tag) > 1 and tag[0] == '<' and tag[1] != '/' and tag[-1] == '>'
def is_close_tag(tag):
return len(tag) > 1 and tag[1] == '/'
def convert_to_open_tag(tag):
return tag[0:1] + tag[2:]
def convert_to_close_tag(tag):
return tag[0:1] + '/' + tag[1:]
x = int(sys.stdin.readline().strip())
for i in range(x):
s = int(sys.stdin.readline().strip())
all_tags = []
tags = []
invalid_closes = []
for j in range(s):
tag = sys.intern(sys.stdin.readline().strip().upper())
all_tags.append(tag)
if is_open_tag(tag):
tags.append(tag)
else:
if len(tags) > 0 and tags[-1] == convert_to_open_tag(tag):
del tags[-1]
else:
invalid_closes.append(tag)
if len(tags) == 0 and len(invalid_closes) == 0:
print('CORRECT')
elif len(tags) + len(invalid_closes) == 1:
if len(tags) > 0:
print('ALMOST ' + tags[0])
else:
print('ALMOST ' + invalid_closes[0])
else:
tags = []
invalid_opens = []
for tag in reversed(all_tags):
if is_close_tag(tag):
tags.append(tag)
else:
if len(tags) > 0 and tags[-1] == convert_to_close_tag(tag):
del tags[-1]
else:
invalid_opens.append(tag)
if len(tags) == 0 and len(invalid_opens) == 0:
print('CORRECT')
elif len(tags) + len(invalid_opens) == 1:
if len(tags) > 0:
print('ALMOST ' + tags[0])
else:
print('ALMOST ' + invalid_opens[0])
else:
print('INCORRECT')
|
[
"holyketzer@gmail.com"
] |
holyketzer@gmail.com
|
8e396267e0d8e0ee2807aaf4f1eb912f1f152398
|
7069623dbf1f8cec1733955704ef91221499789b
|
/hippy/builtin_klass.py
|
eff2601e9ae7f29ff27036496840d9439c2c3bf7
|
[
"MIT"
] |
permissive
|
rlamy/hippyvm
|
5bb9a4bfc5fbeeb61514c6b8ef0b29830b505f33
|
c5aff3f7ffe963b2845550ad1688fbe2b5104ef1
|
refs/heads/master
| 2020-12-24T16:06:05.378843
| 2014-08-26T17:31:13
| 2014-08-26T17:31:13
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,771
|
py
|
""" This module implements helpers for implementing builtin classes.
"""
from hippy.klass import GetterSetter, def_class
from hippy.builtin import wrap_method, Optional, ThisUnwrapper, Nullable
from hippy.objects.instanceobject import W_InstanceObject
from hippy import consts
class GetterSetterWrapper(object):
def __init__(self, getter, setter, name, accflags):
self.getter = getter
self.setter = setter
self.name = name
self.accflags = accflags
def build(self, klass):
"""NOT_RPYTHON: we need some magic RPython hackery here to convince
the getter/setter functions that we're passing a "this" of the
exact W_Xxx class rather than just a W_InstanceObject."""
if klass.custom_instance_class is not None:
W_ExactClass = klass.custom_instance_class
real_getter = self.getter
real_setter = self.setter
def typecasting_getter(interp, this):
assert isinstance(this, W_ExactClass)
return real_getter(interp, this)
def typecasting_setter(interp, this, w_newvalue):
assert isinstance(this, W_ExactClass)
return real_setter(interp, this, w_newvalue)
getter = typecasting_getter
setter = typecasting_setter
else:
getter = self.getter
setter = self.setter
return GetterSetter(getter, setter, self.name, klass,
self.accflags)
class W_ExceptionObject(W_InstanceObject):
def setup(self, interp):
self.traceback = interp.get_traceback()
def get_message(self, interp):
return self.getattr(interp, 'message', k_Exception)
@wrap_method(['interp', ThisUnwrapper(W_ExceptionObject),
Optional(str), Optional(int), Optional(Nullable('object'))],
name='Exception::__construct')
def new_exception(interp, this, message='', code=0, w_previous=None):
this.setattr(interp, 'file', interp.space.wrap(this.traceback[0][0]), k_Exception)
this.setattr(interp, 'message', interp.space.wrap(message), k_Exception)
this.setattr(interp, 'code', interp.space.wrap(code), k_Exception)
if w_previous is None:
w_previous = interp.space.w_Null
elif not k_Exception.is_parent_of(w_previous.klass):
interp.fatal("Wrong parameters for "
"Exception([string $exception [, long $code [, "
"Exception $previous = NULL]]])")
this.setattr(interp, 'previous', w_previous, k_Exception)
@wrap_method(['interp', 'this'], name='Exception::getMessage')
def exc_getMessage(interp, this):
return this.getattr(interp, 'message', k_Exception)
@wrap_method(['interp', 'this'], name='Exception::getCode')
def exc_getCode(interp, this):
return this.getattr(interp, 'code', k_Exception)
@wrap_method(['interp', 'this'], name='Exception::getPrevious')
def exc_getPrevious(interp, this):
return this.getattr(interp, 'previous', k_Exception)
@wrap_method(['interp', ThisUnwrapper(W_ExceptionObject)],
name='Exception::getTrace')
def exc_getTrace(interp, this):
from hippy.module.internal import backtrace_to_applevel
return backtrace_to_applevel(interp.space, this.traceback)
@wrap_method(['interp', ThisUnwrapper(W_ExceptionObject)],
name='Exception::getFile')
def exc_getFile(interp, this):
return this.getattr(interp, 'file', k_Exception)
@wrap_method(['interp', ThisUnwrapper(W_ExceptionObject)],
name='Exception::getLine')
def exc_getLine(interp, this):
return this.getattr(interp, 'line', k_Exception)
@wrap_method(['interp', ThisUnwrapper(W_ExceptionObject)],
name='Exception::__toString')
def exc___toString(interp, this):
name = this.klass.name
space = interp.space
message = space.str_w(this.getattr(interp, 'message', k_Exception))
file = space.str_w(this.getattr(interp, 'file', k_Exception))
line = space.int_w(this.getattr(interp, 'line', k_Exception))
msg = ["exception '%s' with message '%s' in %s:%d" % (name, message, file, line)]
msg.append("Stack trace")
for i, (filename, funcname, line, source) in enumerate(this.traceback):
msg.append("#%d %s(%d): %s()" % (i, filename, line, funcname))
return space.wrap("\n".join(msg))
@wrap_method(['interp', ThisUnwrapper(W_ExceptionObject)],
name='Exception::getTraceAsString')
def exc_getTraceAsString(interp, this):
msg = []
for i, (filename, funcname, line, source) in enumerate(this.traceback):
msg.append("#%d %s(%d): %s()" % (i, filename, line, funcname))
return interp.space.wrap("\n".join(msg))
k_Exception = def_class('Exception',
[new_exception, exc_getMessage, exc_getCode, exc_getPrevious,
exc_getTrace, exc_getFile, exc_getLine, exc___toString,
exc_getTraceAsString],
[('message', consts.ACC_PROTECTED),
('code', consts.ACC_PROTECTED),
('previous', consts.ACC_PRIVATE),
('file', consts.ACC_PROTECTED),
('line', consts.ACC_PROTECTED),
],
instance_class=W_ExceptionObject)
def_class('OutOfBoundsException', [], extends=k_Exception, instance_class=W_ExceptionObject)
k_stdClass = def_class('stdClass', [])
k_incomplete = def_class('__PHP_Incomplete_Class', [])
k_RuntimeException = def_class('RuntimeException', [], extends=k_Exception, instance_class=W_ExceptionObject)
k_LogicException = def_class('LogicException', [], extends=k_Exception, instance_class=W_ExceptionObject)
k_DomainException = def_class('DomainException', [], extends=k_Exception, instance_class=W_ExceptionObject)
k_UnexpectedValueException = def_class('UnexpectedValueException', [],
extends=k_Exception, instance_class=W_ExceptionObject)
def new_abstract_method(args, **kwds):
name = kwds['name']
assert args[0] == 'interp'
kwds['flags'] = kwds.get('flags', 0) | consts.ACC_ABSTRACT
def method(interp, *args):
interp.fatal("Cannot call abstract method %s()" % (name,))
return wrap_method(args, **kwds)(method)
k_Iterator = def_class('Iterator',
[new_abstract_method(["interp"], name="Iterator::current"),
new_abstract_method(["interp"], name="Iterator::next"),
new_abstract_method(["interp"], name="Iterator::key"),
new_abstract_method(["interp"], name="Iterator::rewind"),
new_abstract_method(["interp"], name="Iterator::valid")],
flags=consts.ACC_INTERFACE | consts.ACC_ABSTRACT,
is_iterator=True
)
def_class('SeekableIterator',
[new_abstract_method(["interp"], name="SeekableIterator::seek")],
flags=consts.ACC_INTERFACE | consts.ACC_ABSTRACT, implements=[k_Iterator])
def_class('RecursiveIterator',
[new_abstract_method(["interp"], name="RecursiveIterator::hasChildren"),
new_abstract_method(["interp"], name="RecursiveIterator::getChildren")],
flags=consts.ACC_INTERFACE | consts.ACC_ABSTRACT, implements=[k_Iterator])
def_class('Countable',
[new_abstract_method(["interp"], name="Countable::count")],
flags=consts.ACC_INTERFACE | consts.ACC_ABSTRACT)
ArrayAccess = def_class('ArrayAccess', [
new_abstract_method(["interp"], name="ArrayAccess::offsetExists"),
new_abstract_method(["interp"], name="ArrayAccess::offsetGet"),
new_abstract_method(["interp"], name="ArrayAccess::offsetSet"),
new_abstract_method(["interp"], name="ArrayAccess::offsetUnset"),],
flags=consts.ACC_INTERFACE | consts.ACC_ABSTRACT,
is_array_access=True)
def_class('Reflector',
[new_abstract_method(["interp"], name="Reflector::export"),
new_abstract_method(["interp"], name="Reflector::__toString")],
flags=consts.ACC_INTERFACE | consts.ACC_ABSTRACT)
|
[
"fijall@gmail.com"
] |
fijall@gmail.com
|
9f7b8b024fc721abf2fca89d322f5068423d0bc7
|
f139a99d51cfa01a7892f0ac5bbb022c0cee0664
|
/Pythonlogy/draft/Adder/01_ExampleDoc.py
|
55ec4b537c59fd5073243f672de6030d945b696f
|
[
"MIT"
] |
permissive
|
Ledoux/ShareYourSystem
|
90bb2e6be3088b458348afa37ace68c93c4b6a7a
|
3a2ffabf46f1f68b2c4fd80fa6edb07ae85fa3b2
|
refs/heads/master
| 2021-01-25T12:14:34.118295
| 2017-01-12T14:44:31
| 2017-01-12T14:44:31
| 29,198,670
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 634
|
py
|
#ImportModules
import ShareYourSystem as SYS
from ShareYourSystem.Noders import Adder
#Definition an Tree instance
MyAdder=Adder.AdderClass()+[
[
('NodeCollectionStr','Tree'),
('NodeKeyStr','MyTuplesList'),
('MyStr','Hello')
],
{
'NodeCollectionStr':'Tree',
'NodeKeyStr':'MyDict',
'MyOtherStr':'Bonjour'
},
Adder.AdderClass().update(
[
('NodeCollectionStr','Tree'),
('NodeKeyStr','MyChildAppender')
]
)
]
#Definition the AttestedStr
SYS._attest(
[
'MyAdder is '+SYS._str(
MyAdder,
**{
'RepresentingBaseKeyStrsListBool':False,
'RepresentingAlineaIsBool':False
}
)
]
)
#Print
|
[
"erwan.ledoux@ens.fr"
] |
erwan.ledoux@ens.fr
|
9f7a83c238900a1f3570eddb548aae958fc3ebf0
|
11d85b9e229521088eaead36140f593c18a5c7fd
|
/ResistanceModel/TextElement.py
|
1608a2d1be69a754bcf04ab4129ef3c50b9da14c
|
[] |
no_license
|
Ritten11/LAMAS2021
|
03ef83bbf96a0ee73adafd88012d542eb62f33b7
|
55ee618cecf35bb228442cf2173847b9da2f51f5
|
refs/heads/main
| 2023-06-05T10:06:28.640424
| 2021-06-25T12:09:15
| 2021-06-25T12:09:15
| 371,036,049
| 0
| 0
| null | 2021-06-25T12:09:16
| 2021-05-26T13:03:07
|
Python
|
UTF-8
|
Python
| false
| false
| 574
|
py
|
from mesa import Agent
class TextElement(Agent):
def __init__(self, unique_id, model, description):
'''
An agent solely created for the purpose of adding text within the grid of the simulation. It has no other
purpose than being informational.
:param unique_id: Id of the agent
:param model: Model in which the agent resides
:param description: Text that is to be printed within the grid
'''
super().__init__(unique_id, model)
self.description = description
def step(self):
pass
|
[
"ritten98@hotmail.com"
] |
ritten98@hotmail.com
|
f32a96ae5925a732bdf5a0d320c48011869dee45
|
05e859f77075cd4a469a3115a03c82748f6b684a
|
/cryptage/crypt.py
|
3cf123770fd288ce53c6eeaa7cff19611a3dabbe
|
[] |
no_license
|
Mistigrix/Crypto
|
3312c1a87c7286e143366069dedce6b1e09a89d4
|
5739629196fbc8625a23c8e8f694ba8ec68973fa
|
refs/heads/main
| 2023-05-15T21:49:21.040717
| 2021-06-04T08:04:18
| 2021-06-04T20:35:26
| 374,756,372
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,440
|
py
|
# coding:utf-8
from constantes import *
class Cryptage:
"""Classe permettant de cryptage de fichier ou de message"""
def __init__(self):
pass
def lockMess(self, message, clef):
"""Methode permettant de crypter un message et retourne le message crypter
Paramètre: message à crypter et la clé de chiffrement"""
return self.lock(message, clef)
def lockFile(self, path, clef):
"""Methode permettant de crypter un fichier en retournant une liste.
Paramètre: Chemin menant au fichier et la clé de chiffrement"""
try:
clef = int(clef)
file = open(path, 'r+')
content = file.readlines()
list_crypt = []
file.close()
# recupere chaque ligne, la crypte et l'ajoute a liste crypter
for line in content:
line_crypt = self.lock(line, clef)
list_crypt.append(line_crypt)
return list_crypt
except FileNotFoundError:
return None
except ValueError:
raise ValueError("The clef is not int ")
except:
raise Exception
def lock(self, message, clef):
message_crypter = ''
clef = int(clef) # conversion de la clé en entier (etait en chaine de carrctère au paravant)
# on verifie les numeroos des lettres et on les additionnent avec la clef
for lettre in message:
# recuperation des lettres et leurs indices dans la variables alphabets
for numero_lettre, lettre_alpha in alphabets.items():
# on verifie si on a la bonne lettre
if lettre == lettre_alpha:
# ajout de la clé au numero de la lettre
numero_lettre = numero_lettre + clef
# on verifie dabord si l'addition ne depasse par les nombres de lettres
while numero_lettre > len(alphabets) - 1:
# on prend la difference le numero de la lettre trouvé et le nombre de lettre dans notre alphabets
numero_lettre = numero_lettre - len(alphabets) # notre alphabets commence avec l'indice 0
# on ajoute la lettre crypter à notre message crypter
message_crypter += alphabets[numero_lettre]
return message_crypter
if __name__ == '__main__':
cryptage = Cryptage()
|
[
"irierayan@gmail.com"
] |
irierayan@gmail.com
|
3865c8dc73bf2c2a6ecc8a6eb39838dbb44b7e95
|
756a6a31ba96721ce0846094751eb0eaa1f5e7fd
|
/core/migrations/0009_auto_20191017_1747.py
|
8ddb182f3a88d7f65973459699cf9acc40824421
|
[] |
no_license
|
36Boxes/5starpropertymaintenance
|
4f65f47a897b1edc98331aa2963c6bbf8edc4d22
|
56d47bd76488a0062ca4f75e2e7921d47dba6fc9
|
refs/heads/master
| 2022-03-18T22:15:08.230921
| 2019-10-18T13:37:35
| 2019-10-18T13:37:35
| 216,035,149
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 907
|
py
|
# Generated by Django 2.2 on 2019-10-17 17:47
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('core', '0008_auto_20191017_1738'),
]
operations = [
migrations.RenameField(
model_name='before_and_afters',
old_name='review_4_PROJECT',
new_name='review_for_Project',
),
migrations.AddField(
model_name='before_and_afters',
name='project_name',
field=models.CharField(default='project', max_length=100),
preserve_default=False,
),
migrations.AlterField(
model_name='before_and_afters',
name='review_for_Project',
field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='core.Review'),
),
]
|
[
"46636974+36Boxes@users.noreply.github.com"
] |
46636974+36Boxes@users.noreply.github.com
|
f4aa1e2ef414ef15547121af46b5fc9fcebbe9ca
|
c5663f82e382fc203cd1748a6386fd51e723a040
|
/NlpBase/test.py
|
3ae2fdde1ef19e977a546be56f4078286e222178
|
[] |
no_license
|
gl-coding/OpenBaseCode
|
c7d8aaff255acc3de77ee760739504c3f3c80a10
|
14ce620291e67aa0d31b5fb63c5745b1f5512d64
|
refs/heads/master
| 2020-11-24T18:41:30.788494
| 2019-12-23T06:00:56
| 2019-12-23T06:00:56
| 228,295,455
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 327
|
py
|
#encoding=utf8
from service_jieba import *
if __name__ == "__main__":
#test data
s = u'我想和女朋友一起去北京故宫博物院参观和闲逛。'
cuts = jieba.cut(s.strip())
#test case
#cixing_dic = load_cixing_ch("./cixing.dic")
#load_stop_words("./stopword.txt")
remove_stopwords(cuts)
|
[
"1451607278@qq.com"
] |
1451607278@qq.com
|
f70d7614a6afa47c081519fa0f35eecb2d78171c
|
6b2a8dd202fdce77c971c412717e305e1caaac51
|
/solutions_5648941810974720_0/Python/asikowitz/base.py
|
d74480f407445e0c560bfb4baab19db770a191eb
|
[] |
no_license
|
alexandraback/datacollection
|
0bc67a9ace00abbc843f4912562f3a064992e0e9
|
076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf
|
refs/heads/master
| 2021-01-24T18:27:24.417992
| 2017-05-23T09:23:38
| 2017-05-23T09:23:38
| 84,313,442
| 2
| 4
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,247
|
py
|
inp = open("A-small-attempt0.in","r")
out = open("a0.out","w")
def readline(f):
return f.readline().strip()
n = int(readline(inp))
for case in range(n):
s = readline(inp)
ans = ""
def zero(s):
global ans
c = 0
while (s.find("Z") != -1):
s=s.replace("Z","",1)
s=s.replace("E","",1)
s=s.replace("R","",1)
s=s.replace("O","",1)
c += 1
ans += "0"*c
return s
def two(s):
global ans
c = 0
while (s.find("W") != -1):
s=s.replace("T","",1)
s=s.replace("W","",1)
s=s.replace("O","",1)
c += 1
ans += "2"*c
return s
def four(s):
global ans
c = 0
while (s.find("U") != -1):
s=s.replace("F","",1)
s=s.replace("O","",1)
s=s.replace("U","",1)
s=s.replace("R","",1)
c += 1
ans += "4"*c
return s
def six(s):
global ans
c = 0
while (s.find("X") != -1):
s=s.replace("S","",1)
s=s.replace("I","",1)
s=s.replace("X","",1)
c += 1
ans += "6"*c
return s
def eight(s):
global ans
c = 0
while (s.find("G") != -1):
s=s.replace("E","",1)
s=s.replace("I","",1)
s=s.replace("G","",1)
s=s.replace("H","",1)
s=s.replace("T","",1)
c += 1
ans += "8"*c
return s
def three(s):
global ans
c = 0
while (s.find("H") != -1):
s=s.replace("T","",1)
s=s.replace("H","",1)
s=s.replace("R","",1)
s=s.replace("E","",1)
s=s.replace("E","",1)
c += 1
ans += "3"*c
return s
def five(s):
global ans
c = 0
while (s.find("F") != -1):
s=s.replace("F","",1)
s=s.replace("I","",1)
s=s.replace("V","",1)
s=s.replace("E","",1)
c += 1
ans += "5"*c
return s
def seven(s):
global ans
c = 0
while (s.find("V") != -1):
s=s.replace("S","",1)
s=s.replace("E","",1)
s=s.replace("V","",1)
s=s.replace("E","",1)
s=s.replace("N","",1)
c += 1
ans += "7"*c
return s
def one(s):
global ans
c = 0
while (s.find("O") != -1):
s=s.replace("O","",1)
s=s.replace("N","",1)
s=s.replace("E","",1)
c += 1
ans += "1"*c
return s
def nine(s):
global ans
c = 0
while (s.find("N") != -1):
s=s.replace("N","",1)
s=s.replace("I","",1)
s=s.replace("N","",1)
s=s.replace("E","",1)
c += 1
ans += "9"*c
return s
s = zero(s)
s = two(s)
s = four(s)
s = six(s)
s = eight(s)
s = three(s)
s = five(s)
s = seven(s)
s = one(s)
s = nine(s)
out.write("Case #%d: %s\n" % (case+1,"".join(sorted(ans))))
inp.close()
out.close()
|
[
"alexandra1.back@gmail.com"
] |
alexandra1.back@gmail.com
|
ff0be3e5c5011f111fbc44a6dc7e311ec6dd177d
|
6b46331618b432795f148a2a44cd8ae41ee73373
|
/tests/left_tests/folder_tests/test_definition.py
|
0b71eded54c7613c93bc9894023c02ed476c8ea8
|
[
"MIT"
] |
permissive
|
lycantropos/lz
|
325530a63680fa1a52f7455ff4152df86d073fdf
|
a9c204ebe8620b1237c65178e83a5dac2be7255b
|
refs/heads/master
| 2023-03-07T18:56:21.468639
| 2023-02-23T23:36:34
| 2023-02-23T23:36:34
| 132,186,508
| 7
| 0
|
MIT
| 2019-12-22T09:53:21
| 2018-05-04T20:33:42
|
Python
|
UTF-8
|
Python
| false
| false
| 991
|
py
|
from hypothesis import given
from lz import (left,
right)
from lz.replication import duplicate
from tests.hints import LeftFolderCall
from tests.utils import are_objects_similar
from . import strategies
@given(strategies.empty_folder_calls)
def test_base_case(empty_folder_call: LeftFolderCall) -> None:
function, initial, empty_iterable = empty_folder_call
fold = left.folder(function, initial)
result = fold(empty_iterable)
assert result is initial
@given(strategies.non_empty_folder_calls)
def test_step(non_empty_folder_call: LeftFolderCall) -> None:
function, initial, non_empty_iterable = non_empty_folder_call
non_empty_iterator = iter(non_empty_iterable)
element = next(non_empty_iterator)
original, target = duplicate(non_empty_iterator)
fold = left.folder(function, initial)
attach = right.attacher(element)
result = fold(attach(target))
assert are_objects_similar(result, function(fold(original), element))
|
[
"noreply@github.com"
] |
lycantropos.noreply@github.com
|
81c35a41e0a045ec59debf61bbb167737cfb4083
|
597001e5134ac597e7f2701cfaabfde58d8d82c2
|
/html_parser.py
|
fc5b70c1fc0823bc0dfbb4d77e6d8ca99ab65a6c
|
[] |
no_license
|
ranmx/ToolBox_python
|
79940a3bbc55e211b779841071c19c6fea3f381e
|
5e8ea9b4cabc79f412c030a080d0727429dcb7e5
|
refs/heads/master
| 2021-06-08T11:26:28.833382
| 2016-10-31T06:01:05
| 2016-10-31T06:01:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,425
|
py
|
class HTML(object):
def __init__(self):
self.body = ''
class Table(object):
def __init__(self, attrs=None):
self.table = ''
self.attr_len = 0
if attrs is not None:
self.table_create(attrs)
def __str__(self):
return self.table + '</table>'
def table_create(self, attrs):
self.table = '<table style="width:100%">'
self.attr_len = len(attrs)
self._table_add_item(attrs, head=True)
def table_add(self, attrs):
self._table_add_item(attrs)
def _table_add_item(self, attrs, head=False):
if len(attrs) == self.attr_len:
pass
else:
raise TypeError('Attribution of the table is not correct')
if isinstance(attrs, list):
self.table += '<tr>'
for item in attrs:
if head:
self.table += '<th>{0}</th>'.format(str(item))
else:
self.table += '<td>{0}</td>'.format(str(item))
self.table += '</tr>'
elif isinstance(attrs, str):
self.table += '<tr>'
if head:
self.table += '<th>{0}</th>'.format(str(attrs))
else:
self.table += '<td>{0}</td>'.format(str(attrs))
self.table += '</tr>'
else:
raise TypeError('Attribution of the table is not correct')
def __repr__(self):
return '<html>' + '<head>' + self.style() + '</head>' + \
'<body>' + self.body + '</body>' + '<html>'
@staticmethod
def title(text):
return '<h1>{0}</h1>'.format(str(text))
@staticmethod
def _table_type(tp=None):
if tp is not None:
table_type = tp
else:
table_type = 'table, th, td {border: 1px solid black;}'
return table_type
def add(self, text):
self.body += '<p>{0}</p>'.format(str(text))
def style(self, tp='', table_type=None):
return '<style>' + tp + self._table_type(table_type) + '</style>'
html = HTML()
title1 = html.title('This is a test table')
html.add(title1)
html.add('Do not tell anybody!')
table = html.Table(['name', 'age', 'gender'])
table.table_add(['Alfred', '27', 'M'])
html.add(table)
print str(html)
|
[
"noreply@github.com"
] |
ranmx.noreply@github.com
|
d1175e336af81fa2abba853f6f35ad2ce6f411bf
|
d735694b03c63cf8facda84e1d926f8cebdf3f99
|
/Ejercicio3-1.py
|
d0cfde94083534d19d7546b5a910d41c643f655e
|
[] |
no_license
|
MauroLG/ejercicios-python
|
2243ecedf510596e9207a3093d7f5a0fca23a4ab
|
a32236eb29e1ed6a5c49bb8442578f19d64ac633
|
refs/heads/main
| 2023-07-14T06:43:29.766603
| 2021-08-18T20:15:12
| 2021-08-18T20:15:12
| 397,723,818
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 441
|
py
|
import math
# Ejercicio 3.1a
def intervalo_a_seg(h, m, s):
s += h * 3600
s += m * 60
return s
# Ejercicio 3.1b
def intervalo_a_hms(s):
total_s = s
h = round(math.modf(s/3600)[1])
m = round(math.modf((math.modf(s/3600)[0]*3600)/60)[1])
s = round(math.modf((math.modf(s/3600)[0]*3600)/60)[0]*60)
print("{total_s} segundos son {h} hora/s, {m} minuto/s y {s} segundo/s".format(total_s=total_s, h=h, m=m, s=s))
|
[
"maurogheco@gmail.com"
] |
maurogheco@gmail.com
|
8089fe4de34510647070bf8e5e00c7aaa2371e66
|
15f6e61d9ee8542d9feda70c3cf17d8c5028ef44
|
/validate_getattr.py
|
e1b1efb35fe7a8a2d96885ba03cc3f26bd87efbe
|
[] |
no_license
|
pniewiadowski/learing_python
|
e6446783dbef6105d026b94ec4b1ac8ce5c46ae9
|
ef12d299da40ec99b1d4550e1e2c2264c6c4dea5
|
refs/heads/master
| 2020-06-18T15:19:01.172124
| 2018-02-21T20:24:35
| 2018-02-21T20:24:35
| 94,167,240
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 969
|
py
|
class CardHolder:
acctlen = 8
retirage = 59.5
def __init__(self, acct, name, age, addr):
self.acct = acct
self.name = name
self.age = age
self.addr = addr
def __getattr__(self, item):
if item == 'acct':
return self._acct[:-3] + '***'
elif item == 'remain':
return self.retirage - self.age
else:
raise AttributeError(item)
def __setattr__(self, key, value):
if key == 'name':
value.lower().replace(' ', '_')
elif key == 'age':
if value < 0 or value > 150:
raise ValueError('invalid age')
elif key == 'acct':
name = '_acct'
value = value.replace('-', '')
if len(value) != self.acctlen:
raise TypeError('invalid acct number')
elif key == 'remain':
raise TypeError('can not set remain')
self.__dict__[key] = value
|
[
"niewiadowski@gmail.com"
] |
niewiadowski@gmail.com
|
ab813a77be5a81313190f7803ab9e57268e43c88
|
d0677b27a5360651c5ca491cf566207810e57639
|
/pterasoftware/steady_horseshoe_vortex_lattice_method.py
|
629d5048b1f5464fd82b227ac6a8f519972599cd
|
[
"MIT"
] |
permissive
|
Haider-BA/PteraSoftware
|
a0e9a73b7474a379cf1fbc686042ec9e3b52d4c5
|
06b62a78e1da04975d115af2d7b95434ab9cc550
|
refs/heads/master
| 2023-09-02T20:05:12.799392
| 2021-10-27T05:23:28
| 2021-10-27T05:23:28
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 15,221
|
py
|
""" This module contains the class definition of this package's steady horseshoe
vortex lattice solver.
This module contains the following classes:
SteadyHorseshoeVortexLatticeMethodSolver: This is an aerodynamics solver that
uses a steady horseshoe vortex lattice method.
This module contains the following exceptions:
None
This module contains the following functions:
None
"""
import logging
import numpy as np
from . import aerodynamics
from . import functions
# ToDo: Update this class's documentation.
class SteadyHorseshoeVortexLatticeMethodSolver:
"""This is an aerodynamics solver that uses a steady horseshoe vortex lattice
method.
Citation:
Adapted from: aerodynamics.vlm3.py in AeroSandbox
Author: Peter Sharpe
Date of Retrieval: 04/28/2020
This class contains the following public methods:
run: Run the solver on the steady problem.
initialize_panel_vortices: This method calculates the locations of the vortex
vertices, and then initializes the panels' vortices.
collapse_geometry: This method converts attributes of the problem's geometry
into 1D ndarrays. This facilitates vectorization, which speeds up the solver.
calculate_wing_wing_influences: This method finds the matrix of wing-wing
influence coefficients associated with this airplane's geometry.
calculate_vortex_strengths: Solve for each panels' vortex strengths.
calculate_near_field_forces_and_moments: Find the the forces and moments
calculated from the near field.
This class contains the following class attributes:
None
Subclassing:
This class is not meant to be subclassed.
"""
def __init__(self, steady_problem):
"""This is the initialization method.
:param steady_problem: SteadyProblem
This is the steady problem to be solved.
:return: None
"""
# Initialize this solution's attributes.
self.steady_problem = steady_problem
self.airplane = self.steady_problem.airplane
self.operating_point = self.steady_problem.operating_point
# Initialize attributes to hold aerodynamic data that pertains to this problem.
self.wing_wing_influences = np.zeros(
(self.airplane.num_panels, self.airplane.num_panels)
)
self.freestream_velocity = (
self.operating_point.calculate_freestream_velocity_geometry_axes()
)
self.freestream_wing_influences = np.zeros(self.airplane.num_panels)
self.vortex_strengths = np.zeros(self.airplane.num_panels)
self.panel_normal_directions = np.zeros((self.airplane.num_panels, 3))
self.panel_areas = np.zeros(self.airplane.num_panels)
self.panel_collocation_points = np.zeros((self.airplane.num_panels, 3))
self.panel_vortex_strengths = np.zeros(self.airplane.num_panels)
self.panel_back_right_vortex_vertices = np.zeros((self.airplane.num_panels, 3))
self.panel_front_right_vortex_vertices = np.zeros((self.airplane.num_panels, 3))
self.panel_front_left_vortex_vertices = np.zeros((self.airplane.num_panels, 3))
self.panel_back_left_vortex_vertices = np.zeros((self.airplane.num_panels, 3))
self.panels = np.empty(self.airplane.num_panels, dtype=object)
self.panel_bound_vortex_centers = np.zeros((self.airplane.num_panels, 3))
self.panel_bound_vortex_vectors = np.zeros((self.airplane.num_panels, 3))
self.seed_points = np.empty((0, 3))
self.streamline_points = None
def run(self, logging_level="Warning"):
"""Run the solver on the steady problem.
:param logging_level: str, optional
This parameter determines the detail of information that the solver's
logger will output while running. The options are, in order of detail and
severity, "Debug", "Info", "Warning", "Error", "Critical". The default
value is "Warning".
:return: None
"""
# Configure the problem's logger.
logging_level_value = functions.convert_logging_level_name_to_value(
logging_level
)
logging.basicConfig(level=logging_level_value)
# Initialize this problem's panels to have vortices congruent with this
# solver type.
logging.info("Initializing the panel vortices.")
self.initialize_panel_vortices()
# Collapse this problem's geometry matrices into 1D ndarrays of attributes.
logging.info("Collapsing the geometry.")
self.collapse_geometry()
# Find the matrix of aerodynamic influence coefficients associated with this
# problem's geometry.
logging.info("Calculating the wing-wing influences.")
self.calculate_wing_wing_influences()
# Find the normal freestream speed at every collocation points without
# vortices.
logging.info("Calculating the freestream-wing influences.")
functions.calculate_steady_freestream_wing_influences(steady_solver=self)
# Solve for each panel's vortex strengths.
logging.info("Calculating the vortex strengths.")
self.calculate_vortex_strengths()
# Solve for the near field forces and moments on each panel.
logging.info("Calculating the near field forces.")
self.calculate_near_field_forces_and_moments()
# Solve for the location of the streamlines coming off the back of the wings.
logging.info("Calculating streamlines.")
functions.calculate_streamlines(self)
def initialize_panel_vortices(self):
"""This method calculates the locations of the vortex vertices, and then
initializes the panels' vortices.
Every panel has a horseshoe vortex. The vortex's finite leg runs along the
panel's quarter chord from right to left. It's infinite legs points backwards
in the positive x direction.
:return: None
"""
# Find the freestream direction in geometry axes.
freestream_direction = (
self.operating_point.calculate_freestream_direction_geometry_axes()
)
# Iterate through the current_airplane's wings.
for wing in self.airplane.wings:
# Find a suitable length for the "infinite" legs of the horseshoe
# vortices on this wing. At twenty-times the wing's span, these legs are
# essentially infinite.
infinite_leg_length = wing.span * 20
# Iterate through the wing's chordwise and spanwise panel positions.
for chordwise_position in range(wing.num_chordwise_panels):
for spanwise_position in range(wing.num_spanwise_panels):
# Pull the panel object out of the wing's list of panels.
panel = wing.panels[chordwise_position, spanwise_position]
# Find the location of the panel's front and right vortex vertices.
front_left_vortex_vertex = panel.front_left_vortex_vertex
front_right_vortex_vertex = panel.front_right_vortex_vertex
# Initialize the horseshoe vortex at this panel.
panel.horseshoe_vortex = aerodynamics.HorseshoeVortex(
finite_leg_origin=front_right_vortex_vertex,
finite_leg_termination=front_left_vortex_vertex,
strength=None,
infinite_leg_direction=freestream_direction,
infinite_leg_length=infinite_leg_length,
)
def collapse_geometry(self):
"""This method converts attributes of the problem's geometry into 1D
ndarrays. This facilitates vectorization, which speeds up the solver.
:return: None
"""
# Initialize a variable to hold the global position of the panel as we
# iterate through them.
global_panel_position = 0
# Iterate through the airplane's wings.
for wing in self.airplane.wings:
# Convert this wing's 2D array of panels into a 1D array.
panels = np.ravel(wing.panels)
# Iterate through the 1D array of this wing's panels.
for panel in panels:
# Update the solver's list of attributes with this panel's attributes.
self.panels[global_panel_position] = panel
self.panel_normal_directions[
global_panel_position, :
] = panel.normal_direction
self.panel_areas[global_panel_position] = panel.area
self.panel_collocation_points[
global_panel_position, :
] = panel.collocation_point
self.panel_back_right_vortex_vertices[
global_panel_position, :
] = panel.horseshoe_vortex.right_leg.origin
self.panel_front_right_vortex_vertices[
global_panel_position, :
] = panel.horseshoe_vortex.right_leg.termination
self.panel_front_left_vortex_vertices[
global_panel_position, :
] = panel.horseshoe_vortex.left_leg.origin
self.panel_back_left_vortex_vertices[
global_panel_position, :
] = panel.horseshoe_vortex.left_leg.termination
self.panel_bound_vortex_centers[
global_panel_position, :
] = panel.horseshoe_vortex.finite_leg.center
self.panel_bound_vortex_vectors[
global_panel_position, :
] = panel.horseshoe_vortex.finite_leg.vector
# Check if this panel is on the trailing edge.
if panel.is_trailing_edge:
# If it is, calculate it's streamline seed point and add it to
# the solver's array of seed points.
self.seed_points = np.vstack(
(
self.seed_points,
panel.back_left_vertex
+ 0.5 * (panel.back_right_vertex - panel.back_left_vertex),
)
)
# Increment the global panel position.
global_panel_position += 1
def calculate_wing_wing_influences(self):
"""This method finds the matrix of wing-wing influence coefficients
associated with this airplane's geometry.
:return: None
"""
# Find the matrix of normalized velocities induced at every panel's
# collocation point by every panel's horseshoe vortex.
induced_velocities = aerodynamics.expanded_velocities_from_horseshoe_vortices(
points=self.panel_collocation_points,
back_right_vortex_vertices=self.panel_back_right_vortex_vertices,
front_right_vortex_vertices=self.panel_front_right_vortex_vertices,
front_left_vortex_vertices=self.panel_front_left_vortex_vertices,
back_left_vortex_vertices=self.panel_back_left_vortex_vertices,
strengths=np.ones(self.airplane.num_panels),
)
# Take the batch dot product of the normalized velocities with each panel's
# normal direction. This is now the problem's matrix of wing-wing influence
# coefficients.
self.wing_wing_influences = np.einsum(
"...k,...k->...",
induced_velocities,
np.expand_dims(self.panel_normal_directions, axis=1),
)
def calculate_vortex_strengths(self):
"""Solve for each panel's vortex strengths.
:return: None
"""
# Solve for the strength of each panel's vortex.
self.vortex_strengths = np.linalg.solve(
self.wing_wing_influences, -self.freestream_wing_influences
)
# Iterate through the panels and update their vortex strengths.
for panel_num in range(self.panels.size):
# Get the panel at this location.
panel = self.panels[panel_num]
# Update this panel's horseshoe vortex strength.
panel.horseshoe_vortex.update_strength(self.vortex_strengths[panel_num])
# ToDo: Update this method's documentation.
def calculate_solution_velocity(self, points):
"""
:return:
"""
induced_velocities = aerodynamics.collapsed_velocities_from_horseshoe_vortices(
points=points,
back_right_vortex_vertices=self.panel_back_right_vortex_vertices,
front_right_vortex_vertices=self.panel_front_right_vortex_vertices,
front_left_vortex_vertices=self.panel_front_left_vortex_vertices,
back_left_vortex_vertices=self.panel_back_left_vortex_vertices,
strengths=self.vortex_strengths,
)
total_velocities = induced_velocities + self.freestream_velocity
return total_velocities
def calculate_near_field_forces_and_moments(self):
"""Find the the forces and moments calculated from the near field.
Note: The forces and moments calculated are in geometry axes. The moment is
about the airplane's reference point, which should be at the center of
gravity. The units are Newtons and Newton-meters.
:return: None
"""
# Calculate the velocities induced at every panel's bound vortex center.
induced_velocities = aerodynamics.collapsed_velocities_from_horseshoe_vortices(
points=self.panel_bound_vortex_centers,
back_right_vortex_vertices=self.panel_back_right_vortex_vertices,
front_right_vortex_vertices=self.panel_front_right_vortex_vertices,
front_left_vortex_vertices=self.panel_front_left_vortex_vertices,
back_left_vortex_vertices=self.panel_back_left_vortex_vertices,
strengths=self.vortex_strengths,
)
# Add the freestream velocity to the induced velocities to calculate the
# total velocity at every panel's bound vortex center.
total_velocities = induced_velocities + self.freestream_velocity
# Calculate the near field force, in geometry axes, on each panel's bound
# vortex.
near_field_forces_geometry_axes = (
self.operating_point.density
* np.expand_dims(self.vortex_strengths, axis=1)
* np.cross(total_velocities, self.panel_bound_vortex_vectors, axis=-1)
)
# Calculate the near field moments, in geometry axes, on each panel's bound
# vortex.
near_field_moments_geometry_axes = np.cross(
self.panel_bound_vortex_centers - self.airplane.xyz_ref,
near_field_forces_geometry_axes,
axis=-1,
)
functions.process_steady_solver_forces(
steady_solver=self,
near_field_forces_geometry_axes=near_field_forces_geometry_axes,
near_field_moments_geometry_axes=near_field_moments_geometry_axes,
)
|
[
"camerongurban@gmail.com"
] |
camerongurban@gmail.com
|
84829197581dab53cc849f2dd19f3832c32eded1
|
e8fad6e19d96b142f9d15e836b964b187a066777
|
/FilterPosition.py
|
737e954252f0bca21e57d5c02f2213063c226070
|
[] |
no_license
|
RABKKK/CenterAxis
|
348daacb247926e180622fbf2a633bc58e4f1b20
|
176839251dc2bce41b314270c7d1c24b89e391ce
|
refs/heads/master
| 2020-11-30T00:08:16.579525
| 2020-01-10T07:08:17
| 2020-01-10T07:08:17
| 230,247,773
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 932
|
py
|
import numpy as np
from matplotlib import pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
#import math
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
posmat=np.genfromtxt('Position.csv')
refposmat=np.array([])
for val in posmat:
if val[2]>0:
if refposmat.shape[0]==0:
refposmat=np.array([val[0],val[1],val[2],0])
inval=val
else:
indist=np.power(np.sum((val-inval)*(val-inval)),0.5)
if indist<np.power(2,0.5)*0.09:#maximum distance of diagonal
refposmat=np.vstack((refposmat,np.array([val[0],val[1],val[2],0])))
refposmat[-1,3]=indist
np.savetxt('FiltPosition.csv',refposmat)
ax.scatter(refposmat[:,0],refposmat[:,1],refposmat[:,2],c=refposmat[:,3]/np.max(refposmat[:,3]))
#ax.plot(refposmat[:,0],refposmat[:,1],refposmat[:,2])
plt.xlim(-0.1,0.06)
plt.ylim(-0.06,.1)
#ax.zlim()
plt.show()
|
[
"noreply@github.com"
] |
RABKKK.noreply@github.com
|
3597810923be7e5ba27e70e1a7d632e48b3f4bfd
|
f225049121c2a7b1cc0e0e3e1dff038343796829
|
/GetComputeComplexity.py
|
01119915c32d8c6b90de6a7ef9a55cbe40b35341
|
[] |
no_license
|
GulshanSaleem/Surveilia
|
225602800d2fc74e80ca906b52468608b78e54b1
|
a4e1138f646658945b2eb4e0ee9e99260c69951e
|
refs/heads/main
| 2023-04-15T05:03:13.379710
| 2022-09-04T03:08:26
| 2022-09-04T03:08:26
| 476,690,151
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,261
|
py
|
# performance metrics
import torch
from ops.models import TSN
from ops.transforms import *
from ptflops import get_model_complexity_info
this_weights='checkpoint/TSM_ucfcrime_RGB_mobilenetv2_shift8_blockres_avg_segment8_e25/ckpt.best.pth.tar'
#this_weights='checkpoint/TSM_ucfcrime_RGB_resnet50_shift8_blockres_avg_segment8_e25/ckpt.best.pth.tar'
this_arch = 'mobilenetv2'
def parse_shift_option_from_log_name(log_name):
if 'shift' in log_name:
strings = log_name.split('_')
for i, s in enumerate(strings):
if 'shift' in s:
break
return True, int(strings[i].replace('shift', '')), strings[i + 1]
else:
return False, None, None
is_shift, shift_div, shift_place = parse_shift_option_from_log_name(this_weights)
print(is_shift, shift_div, shift_place)
with torch.cuda.device(0):
net = TSN(2, 1, 'RGB',
base_model=this_arch,
consensus_type='avg',
img_feature_dim='225',
#pretrain=args.pretrain,
is_shift=is_shift, shift_div=shift_div, shift_place=shift_place,
non_local='_nl' in this_weights,
)
macs, params = get_model_complexity_info(net, (1,3, 224, 224), as_strings=True,print_per_layer_stat=False, verbose=False)
print("Using ptflops")
print('{:<30} {:<8}'.format('Computational complexity: ', macs))
print('{:<30} {:<8}'.format('Number of parameters: ', params))
from thop import profile
model = net = TSN(2, 1, 'RGB',
base_model=this_arch,
consensus_type='avg',
img_feature_dim='225',
#pretrain=args.pretrain,
is_shift=is_shift, shift_div=shift_div, shift_place=shift_place,
non_local='_nl' in this_weights,
)
input = torch.randn(1, 3, 224, 224)
macs, params = profile(model, inputs=(input, ))
from thop import clever_format
macs, params = clever_format([macs, params], "%.3f")
print('{:<30} {:<8}'.format('Computational complexity: ', macs))
print('{:<30} {:<8}'.format('Number of parameters: ', params))
# Draw model graph.
from torch.utils.tensorboard import SummaryWriter
tb = SummaryWriter()
tb.add_graph(model,input)
tb.close()
|
[
"noreply@github.com"
] |
GulshanSaleem.noreply@github.com
|
4253fd5f2ff9e52127da94da2d9d23cc343574be
|
d06644461b679a08b98d0da9bed9781c0ed2ffba
|
/DocumentClassification/code/__init__.py
|
6848590808e833a64d563fff646299be0d119d78
|
[] |
no_license
|
xxx0624/QA_Model
|
e9478e67576bbd023ec5f8dcdff02e16d8b113b3
|
6b72e6d4049bd1cc526dda6e024ef63783f790b4
|
refs/heads/master
| 2021-06-05T00:05:06.912489
| 2019-03-05T03:09:29
| 2019-03-05T03:09:29
| 96,797,586
| 23
| 4
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 55
|
py
|
__author__ = 'xing'
a={1:2,3:4}
for k in a:
print k
|
[
"xzheng0624@gmail.com"
] |
xzheng0624@gmail.com
|
afcbf7fadfed65175c99e47818275c1d00e368d4
|
02267c8104c951faf4fee48f525225b7f605d9ee
|
/venv/Lib/site-packages/gslib/tests/testcase/integration_testcase.py
|
15f488b31f4c5fe40a2cff72bf2b62f9c382189d
|
[] |
no_license
|
zakaria6193/examproject
|
9c559df703bb03ce993f4ca0666c3f56f1b8b12b
|
b07f3a2e795f8efb1d9bdd2aaa677352279176a4
|
refs/heads/master
| 2023-06-03T12:13:24.627470
| 2021-06-19T20:22:42
| 2021-06-19T20:22:42
| 378,498,436
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 53,345
|
py
|
# -*- coding: utf-8 -*-
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains gsutil base integration test case class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import contextlib
import datetime
import locale
import logging
import os
import signal
import subprocess
import sys
import tempfile
import threading
import time
import boto
from boto import config
from boto.exception import StorageResponseError
from boto.s3.deletemarker import DeleteMarker
from boto.storage_uri import BucketStorageUri
import gslib
from gslib.boto_translation import BotoTranslation
from gslib.cloud_api import PreconditionException
from gslib.cloud_api import Preconditions
from gslib.discard_messages_queue import DiscardMessagesQueue
from gslib.exception import CommandException
from gslib.gcs_json_api import GcsJsonApi
from gslib.kms_api import KmsApi
from gslib.project_id import GOOG_PROJ_ID_HDR
from gslib.project_id import PopulateProjectId
from gslib.tests.testcase import base
import gslib.tests.util as util
from gslib.tests.util import InvokedFromParFile
from gslib.tests.util import ObjectToURI as suri
from gslib.tests.util import RUN_S3_TESTS
from gslib.tests.util import SetBotoConfigForTest
from gslib.tests.util import SetEnvironmentForTest
from gslib.tests.util import unittest
from gslib.tests.util import USING_JSON_API
import gslib.third_party.storage_apitools.storage_v1_messages as apitools_messages
from gslib.utils.constants import UTF8
from gslib.utils.encryption_helper import Base64Sha256FromBase64EncryptionKey
from gslib.utils.encryption_helper import CryptoKeyWrapperFromKey
from gslib.utils.hashing_helper import Base64ToHexHash
from gslib.utils.metadata_util import CreateCustomMetadata
from gslib.utils.metadata_util import GetValueFromObjectCustomMetadata
from gslib.utils.posix_util import ATIME_ATTR
from gslib.utils.posix_util import GID_ATTR
from gslib.utils.posix_util import MODE_ATTR
from gslib.utils.posix_util import MTIME_ATTR
from gslib.utils.posix_util import UID_ATTR
from gslib.utils.retry_util import Retry
import six
from six.moves import range
LOGGER = logging.getLogger('integration-test')
# TODO: Replace tests which looks for test_api == ApiSelector.(XML|JSON) with
# these decorators.
def SkipForXML(reason):
"""Skips the test if running S3 tests, or if prefer_api isn't set to json."""
if not USING_JSON_API or RUN_S3_TESTS:
return unittest.skip(reason)
else:
return lambda func: func
def SkipForJSON(reason):
if USING_JSON_API:
return unittest.skip(reason)
else:
return lambda func: func
def SkipForGS(reason):
if not RUN_S3_TESTS:
return unittest.skip(reason)
else:
return lambda func: func
def SkipForS3(reason):
if RUN_S3_TESTS:
return unittest.skip(reason)
else:
return lambda func: func
# TODO: Right now, most tests use the XML API. Instead, they should respect
# prefer_api in the same way that commands do.
@unittest.skipUnless(util.RUN_INTEGRATION_TESTS,
'Not running integration tests.')
class GsUtilIntegrationTestCase(base.GsUtilTestCase):
"""Base class for gsutil integration tests."""
GROUP_TEST_ADDRESS = 'gs-discussion@googlegroups.com'
GROUP_TEST_ID = (
'00b4903a97d097895ab58ef505d535916a712215b79c3e54932c2eb502ad97f5')
USER_TEST_ADDRESS = 'gsutiltestuser@gmail.com'
USER_TEST_ID = (
'00b4903a97b201e40d2a5a3ddfe044bb1ab79c75b2e817cbe350297eccc81c84')
DOMAIN_TEST = 'google.com'
# No one can create this bucket without owning the gmail.com domain, and we
# won't create this bucket, so it shouldn't exist.
# It would be nice to use google.com here but JSON API disallows
# 'google' in resource IDs.
nonexistent_bucket_name = 'nonexistent-bucket-foobar.gmail.com'
def setUp(self):
"""Creates base configuration for integration tests."""
super(GsUtilIntegrationTestCase, self).setUp()
self.bucket_uris = []
# Set up API version and project ID handler.
self.api_version = boto.config.get_value('GSUtil', 'default_api_version',
'1')
# Instantiate a JSON API for use by the current integration test.
self.json_api = GcsJsonApi(BucketStorageUri, logging.getLogger(),
DiscardMessagesQueue(), 'gs')
self.xml_api = BotoTranslation(BucketStorageUri, logging.getLogger(),
DiscardMessagesQueue, self.default_provider)
self.kms_api = KmsApi(logging.getLogger())
self.multiregional_buckets = util.USE_MULTIREGIONAL_BUCKETS
if util.RUN_S3_TESTS:
self.nonexistent_bucket_name = (
'nonexistentbucket-asf801rj3r9as90mfnnkjxpo02')
# Retry with an exponential backoff if a server error is received. This
# ensures that we try *really* hard to clean up after ourselves.
# TODO: As long as we're still using boto to do the teardown,
# we decorate with boto exceptions. Eventually this should be migrated
# to CloudApi exceptions.
@Retry(StorageResponseError, tries=7, timeout_secs=1)
def tearDown(self):
super(GsUtilIntegrationTestCase, self).tearDown()
while self.bucket_uris:
bucket_uri = self.bucket_uris[-1]
try:
bucket_list = self._ListBucket(bucket_uri)
except StorageResponseError as e:
# This can happen for tests of rm -r command, which for bucket-only
# URIs delete the bucket at the end.
if e.status == 404:
self.bucket_uris.pop()
continue
else:
raise
while bucket_list:
error = None
for k in bucket_list:
try:
if isinstance(k, DeleteMarker):
bucket_uri.get_bucket().delete_key(k.name,
version_id=k.version_id)
else:
k.delete()
except StorageResponseError as e:
# This could happen if objects that have already been deleted are
# still showing up in the listing due to eventual consistency. In
# that case, we continue on until we've tried to deleted every
# object in the listing before raising the error on which to retry.
if e.status == 404:
# This could happen if objects that have already been deleted are
# still showing up in the listing due to eventual consistency. In
# that case, we continue on until we've tried to deleted every
# obj in the listing before raising the error on which to retry.
error = e
elif e.status == 403 and (e.error_code == 'ObjectUnderActiveHold' or
e.error_code == 'RetentionPolicyNotMet'):
# Object deletion fails if they are under active Temporary Hold,
# Event-Based hold or still under retention.
#
# We purposefully do not raise error in order to allow teardown
# to process all the objects in a bucket first. The retry logic on
# the teardown method will kick in when bucket deletion fails (due
# to bucket being non-empty) and retry deleting these objects
# and their associated buckets.
self._ClearHoldsOnObjectAndWaitForRetentionDuration(
bucket_uri, k.name)
else:
raise
if error:
raise error # pylint: disable=raising-bad-type
bucket_list = self._ListBucket(bucket_uri)
bucket_uri.delete_bucket()
self.bucket_uris.pop()
def _ClearHoldsOnObjectAndWaitForRetentionDuration(self, bucket_uri,
object_name):
"""Removes Holds on test objects and waits till retention duration is over.
This method makes sure that object is not under active Temporary Hold or
Release Hold. It also waits (up to 1 minute) till retention duration for the
object is over. This is necessary for cleanup, otherwise such test objects
cannot be deleted.
It's worth noting that tests should do their best to remove holds and wait
for objects' retention period on their own and this is just a fallback.
Additionally, Tests should not use retention duration longer than 1 minute,
preferably only few seconds in order to avoid lengthening test execution
time unnecessarily.
Args:
bucket_uri: bucket's uri.
object_name: object's name.
"""
object_metadata = self.json_api.GetObjectMetadata(
bucket_uri.bucket_name,
object_name,
fields=['timeCreated', 'temporaryHold', 'eventBasedHold'])
object_uri = '{}{}'.format(bucket_uri, object_name)
if object_metadata.temporaryHold:
self.RunGsUtil(['retention', 'temp', 'release', object_uri])
if object_metadata.eventBasedHold:
self.RunGsUtil(['retention', 'event', 'release', object_uri])
retention_policy = self.json_api.GetBucket(bucket_uri.bucket_name,
fields=['retentionPolicy'
]).retentionPolicy
retention_period = (retention_policy.retentionPeriod
if retention_policy is not None else 0)
# throwing exceptions for Retention durations larger than 60 seconds.
if retention_period <= 60:
time.sleep(retention_period)
else:
raise CommandException(('Retention duration is too large for bucket "{}".'
' Use shorter durations for Retention duration in'
' tests').format(bucket_uri))
def _SetObjectCustomMetadataAttribute(self, provider, bucket_name,
object_name, attr_name, attr_value):
"""Sets a custom metadata attribute for an object.
Args:
provider: Provider string for the bucket, ex. 'gs' or 's3.
bucket_name: The name of the bucket the object is in.
object_name: The name of the object itself.
attr_name: The name of the custom metadata attribute to set.
attr_value: The value of the custom metadata attribute to set.
Returns:
None
"""
obj_metadata = apitools_messages.Object()
obj_metadata.metadata = CreateCustomMetadata({attr_name: attr_value})
if provider == 'gs':
self.json_api.PatchObjectMetadata(bucket_name,
object_name,
obj_metadata,
provider=provider)
else:
self.xml_api.PatchObjectMetadata(bucket_name,
object_name,
obj_metadata,
provider=provider)
def SetPOSIXMetadata(self,
provider,
bucket_name,
object_name,
atime=None,
mtime=None,
uid=None,
gid=None,
mode=None):
"""Sets POSIX metadata for the object."""
obj_metadata = apitools_messages.Object()
obj_metadata.metadata = apitools_messages.Object.MetadataValue(
additionalProperties=[])
if atime is not None:
CreateCustomMetadata(entries={ATIME_ATTR: atime},
custom_metadata=obj_metadata.metadata)
if mode is not None:
CreateCustomMetadata(entries={MODE_ATTR: mode},
custom_metadata=obj_metadata.metadata)
if mtime is not None:
CreateCustomMetadata(entries={MTIME_ATTR: mtime},
custom_metadata=obj_metadata.metadata)
if uid is not None:
CreateCustomMetadata(entries={UID_ATTR: uid},
custom_metadata=obj_metadata.metadata)
if gid is not None:
CreateCustomMetadata(entries={GID_ATTR: gid},
custom_metadata=obj_metadata.metadata)
if provider == 'gs':
self.json_api.PatchObjectMetadata(bucket_name,
object_name,
obj_metadata,
provider=provider)
else:
self.xml_api.PatchObjectMetadata(bucket_name,
object_name,
obj_metadata,
provider=provider)
def ClearPOSIXMetadata(self, obj):
"""Uses the setmeta command to clear POSIX attributes from user metadata.
Args:
obj: The object to clear POSIX metadata for.
"""
provider_meta_string = 'goog' if obj.scheme == 'gs' else 'amz'
self.RunGsUtil([
'setmeta', '-h',
'x-%s-meta-%s' % (provider_meta_string, ATIME_ATTR), '-h',
'x-%s-meta-%s' % (provider_meta_string, MTIME_ATTR), '-h',
'x-%s-meta-%s' % (provider_meta_string, UID_ATTR), '-h',
'x-%s-meta-%s' % (provider_meta_string, GID_ATTR), '-h',
'x-%s-meta-%s' % (provider_meta_string, MODE_ATTR),
suri(obj)
])
def _ServiceAccountCredentialsPresent(self):
# TODO: Currently, service accounts cannot be project owners (unless
# they are exempted for legacy reasons). Unfortunately, setting a canned ACL
# other than project-private, the ACL that buckets get by default, removes
# project-editors access from the bucket ACL. So any canned ACL that would
# actually represent a change the bucket would also orphan the service
# account's access to the bucket. If service accounts can be owners
# in the future, remove this function and update all callers.
return (config.has_option('Credentials', 'gs_service_key_file') or
config.has_option('GoogleCompute', 'service_account'))
def _ListBucket(self, bucket_uri):
if bucket_uri.scheme == 's3':
# storage_uri will omit delete markers from bucket listings, but
# these must be deleted before we can remove an S3 bucket.
return list(v for v in bucket_uri.get_bucket().list_versions())
return list(bucket_uri.list_bucket(all_versions=True))
def AssertNObjectsInBucket(self, bucket_uri, num_objects, versioned=False):
"""Checks (with retries) that 'ls bucket_uri/**' returns num_objects.
This is a common test pattern to deal with eventual listing consistency for
tests that rely on a set of objects to be listed.
Args:
bucket_uri: storage_uri for the bucket.
num_objects: number of objects expected in the bucket.
versioned: If True, perform a versioned listing.
Raises:
AssertionError if number of objects does not match expected value.
Returns:
Listing split across lines.
"""
def _CheckBucket():
command = ['ls', '-a'] if versioned else ['ls']
b_uri = [suri(bucket_uri) + '/**'] if num_objects else [suri(bucket_uri)]
listing = self.RunGsUtil(command + b_uri, return_stdout=True).split('\n')
# num_objects + one trailing newline.
self.assertEquals(len(listing), num_objects + 1)
return listing
if self.multiregional_buckets:
# Use @Retry as hedge against bucket listing eventual consistency.
@Retry(AssertionError, tries=5, timeout_secs=1)
def _Check1():
return _CheckBucket()
return _Check1()
else:
return _CheckBucket()
def AssertObjectUsesCSEK(self, object_uri_str, encryption_key):
"""Strongly consistent check that the correct CSEK encryption key is used.
This check forces use of the JSON API, as encryption information is not
returned in object metadata via the XML API.
Args:
object_uri_str: uri for the object.
encryption_key: expected CSEK key.
"""
with SetBotoConfigForTest([('GSUtil', 'prefer_api', 'json')]):
stdout = self.RunGsUtil(['stat', object_uri_str], return_stdout=True)
self.assertIn(
Base64Sha256FromBase64EncryptionKey(encryption_key).decode('ascii'),
stdout, 'Object %s did not use expected encryption key with hash %s. '
'Actual object: %s' %
(object_uri_str, Base64Sha256FromBase64EncryptionKey(encryption_key),
stdout))
def AssertObjectUsesCMEK(self, object_uri_str, encryption_key):
"""Strongly consistent check that the correct KMS encryption key is used.
This check forces use of the JSON API, as encryption information is not
returned in object metadata via the XML API.
Args:
object_uri_str: uri for the object.
encryption_key: expected CMEK key.
"""
with SetBotoConfigForTest([('GSUtil', 'prefer_api', 'json')]):
stdout = self.RunGsUtil(['stat', object_uri_str], return_stdout=True)
self.assertRegexpMatches(stdout, r'KMS key:\s+%s' % encryption_key)
def AssertObjectUnencrypted(self, object_uri_str):
"""Checks that no CSEK or CMEK attributes appear in `stat` output.
This check forces use of the JSON API, as encryption information is not
returned in object metadata via the XML API.
Args:
object_uri_str: uri for the object.
"""
with SetBotoConfigForTest([('GSUtil', 'prefer_api', 'json')]):
stdout = self.RunGsUtil(['stat', object_uri_str], return_stdout=True)
self.assertNotIn('Encryption key SHA256', stdout)
self.assertNotIn('KMS key', stdout)
def CreateBucketWithRetentionPolicy(self,
retention_period_in_seconds,
is_locked=None,
bucket_name=None):
"""Creates a test bucket with Retention Policy.
The bucket and all of its contents will be deleted after the test.
Args:
retention_period_in_seconds: Retention duration in seconds
is_locked: Indicates whether Retention Policy should be locked
on the bucket or not.
bucket_name: Create the bucket with this name. If not provided, a
temporary test bucket name is constructed.
Returns:
StorageUri for the created bucket.
"""
# Creating bucket with Retention Policy.
retention_policy = (apitools_messages.Bucket.RetentionPolicyValue(
retentionPeriod=retention_period_in_seconds))
bucket_uri = self.CreateBucket(bucket_name=bucket_name,
retention_policy=retention_policy,
prefer_json_api=True)
if is_locked:
# Locking Retention Policy
self.RunGsUtil(['retention', 'lock', suri(bucket_uri)], stdin='y')
# Verifying Retention Policy on the bucket.
self.VerifyRetentionPolicy(
bucket_uri,
expected_retention_period_in_seconds=retention_period_in_seconds,
expected_is_locked=is_locked)
return bucket_uri
def VerifyRetentionPolicy(self,
bucket_uri,
expected_retention_period_in_seconds=None,
expected_is_locked=None):
"""Verifies the Retention Policy on a bucket.
Args:
bucket_uri: Specifies the bucket.
expected_retention_period_in_seconds: Specifies the expected Retention
Period of the Retention Policy on
the bucket. Setting this field to
None, implies that no Retention
Policy should be present.
expected_is_locked: Indicates whether the Retention Policy should be
locked or not.
"""
actual_retention_policy = self.json_api.GetBucket(
bucket_uri.bucket_name, fields=['retentionPolicy']).retentionPolicy
if expected_retention_period_in_seconds is None:
self.assertEqual(actual_retention_policy, None)
else:
self.assertEqual(actual_retention_policy.retentionPeriod,
expected_retention_period_in_seconds)
self.assertEqual(actual_retention_policy.isLocked, expected_is_locked)
# Verifying the effectiveTime of the Retention Policy:
# since this is integration test and we don't have exact time of the
# server. We just verify that the effective time is a timestamp within
# last minute.
effective_time_in_seconds = self.DateTimeToSeconds(
actual_retention_policy.effectiveTime)
current_time_in_seconds = self.DateTimeToSeconds(datetime.datetime.now())
self.assertGreater(effective_time_in_seconds,
current_time_in_seconds - 60)
def DateTimeToSeconds(self, datetime_obj):
return int(time.mktime(datetime_obj.timetuple()))
def CreateBucket(self,
bucket_name=None,
test_objects=0,
storage_class=None,
retention_policy=None,
provider=None,
prefer_json_api=False,
versioning_enabled=False,
bucket_policy_only=False,
bucket_name_prefix='',
bucket_name_suffix='',
location=None):
"""Creates a test bucket.
The bucket and all of its contents will be deleted after the test.
Args:
bucket_name: Create the bucket with this name. If not provided, a
temporary test bucket name is constructed.
test_objects: The number of objects that should be placed in the bucket.
Defaults to 0.
storage_class: Storage class to use. If not provided we us standard.
retention_policy: Retention policy to be used on the bucket.
provider: Provider to use - either "gs" (the default) or "s3".
prefer_json_api: If True, use the JSON creation functions where possible.
versioning_enabled: If True, set the bucket's versioning attribute to
True.
bucket_policy_only: If True, set the bucket's iamConfiguration's
bucketPolicyOnly attribute to True.
bucket_name_prefix: Unicode string to be prepended to bucket_name
bucket_name_suffix: Unicode string to be appended to bucket_name
location: The location/region in which the bucket should be created.
Returns:
StorageUri for the created bucket.
"""
if not provider:
provider = self.default_provider
# Location is controlled by the -b test flag.
if location is None:
if self.multiregional_buckets or provider == 's3':
location = None
else:
# We default to the "us-central1" location for regional buckets,
# but allow overriding this value in the Boto config.
location = boto.config.get('GSUtil',
'test_cmd_regional_bucket_location',
'us-central1')
bucket_name_prefix = six.ensure_text(bucket_name_prefix)
bucket_name_suffix = six.ensure_text(bucket_name_suffix)
if bucket_name:
bucket_name = ''.join(
[bucket_name_prefix, bucket_name, bucket_name_suffix])
bucket_name = util.MakeBucketNameValid(bucket_name)
else:
bucket_name = self.MakeTempName('bucket',
prefix=bucket_name_prefix,
suffix=bucket_name_suffix)
if prefer_json_api and provider == 'gs':
json_bucket = self.CreateBucketJson(bucket_name=bucket_name,
test_objects=test_objects,
storage_class=storage_class,
location=location,
versioning_enabled=versioning_enabled,
retention_policy=retention_policy,
bucket_policy_only=bucket_policy_only)
bucket_uri = boto.storage_uri('gs://%s' % json_bucket.name.lower(),
suppress_consec_slashes=False)
return bucket_uri
bucket_uri = boto.storage_uri('%s://%s' % (provider, bucket_name.lower()),
suppress_consec_slashes=False)
if provider == 'gs':
# Apply API version and project ID headers if necessary.
headers = {
'x-goog-api-version': self.api_version,
GOOG_PROJ_ID_HDR: PopulateProjectId()
}
else:
headers = {}
#
@Retry(StorageResponseError, tries=7, timeout_secs=1)
def _CreateBucketWithExponentialBackoff():
"""Creates a bucket, retrying with exponential backoff on error.
Parallel tests can easily run into bucket creation quotas.
Retry with exponential backoff so that we create them as fast as we
reasonably can.
Returns:
StorageUri for the created bucket
"""
try:
bucket_uri.create_bucket(storage_class=storage_class,
location=location or '',
headers=headers)
except StorageResponseError as e:
# If the service returns a transient error or a connection breaks,
# it's possible the request succeeded. If that happens, the service
# will return 409s for all future calls even though our intent
# succeeded. If the error message says we already own the bucket,
# assume success to reduce test flakiness. This depends on
# randomness of test naming buckets to prevent name collisions for
# test buckets created concurrently in the same project, which is
# acceptable because this is far less likely than service errors.
if e.status == 409 and e.body and 'already own' in e.body:
pass
else:
raise
_CreateBucketWithExponentialBackoff()
self.bucket_uris.append(bucket_uri)
if versioning_enabled:
bucket_uri.configure_versioning(True)
for i in range(test_objects):
self.CreateObject(bucket_uri=bucket_uri,
object_name=self.MakeTempName('obj'),
contents='test {:d}'.format(i).encode('ascii'))
return bucket_uri
def CreateVersionedBucket(self, bucket_name=None, test_objects=0):
"""Creates a versioned test bucket.
The bucket and all of its contents will be deleted after the test.
Args:
bucket_name: Create the bucket with this name. If not provided, a
temporary test bucket name is constructed.
test_objects: The number of objects that should be placed in the bucket.
Defaults to 0.
Returns:
StorageUri for the created bucket with versioning enabled.
"""
# Note that we prefer the JSON API so that we don't require two separate
# steps to create and then set versioning on the bucket (as versioning
# propagation on an existing bucket is subject to eventual consistency).
bucket_uri = self.CreateBucket(bucket_name=bucket_name,
test_objects=test_objects,
prefer_json_api=True,
versioning_enabled=True)
return bucket_uri
def CreateObject(self,
bucket_uri=None,
object_name=None,
contents=None,
prefer_json_api=False,
encryption_key=None,
mode=None,
mtime=None,
uid=None,
gid=None,
storage_class=None,
gs_idempotent_generation=0,
kms_key_name=None):
"""Creates a test object.
Args:
bucket_uri: The URI of the bucket to place the object in. If not
specified, a new temporary bucket is created.
object_name: The name to use for the object. If not specified, a temporary
test object name is constructed.
contents: The contents to write to the object. If not specified, the key
is not written to, which means that it isn't actually created
yet on the server.
prefer_json_api: If true, use the JSON creation functions where possible.
encryption_key: AES256 encryption key to use when creating the object,
if any.
mode: The POSIX mode for the object. Must be a base-8 3-digit integer
represented as a string.
mtime: The modification time of the file in POSIX time (seconds since
UTC 1970-01-01). If not specified, this defaults to the current
system time.
uid: A POSIX user ID.
gid: A POSIX group ID.
storage_class: String representing the storage class to use for the
object.
gs_idempotent_generation: For use when overwriting an object for which
you know the previously uploaded generation. Create GCS object
idempotently by supplying this generation number as a precondition
and assuming the current object is correct on precondition failure.
Defaults to 0 (new object); to disable, set to None.
kms_key_name: Fully-qualified name of the KMS key that should be used to
encrypt the object. Note that this is currently only valid for 'gs'
objects.
Returns:
A StorageUri for the created object.
"""
bucket_uri = bucket_uri or self.CreateBucket()
# checking for valid types - None or unicode/binary text
if contents is not None:
if not isinstance(contents, (six.binary_type, six.text_type)):
raise TypeError('contents must be either none or bytes, not {}'.format(
type(contents)))
contents = six.ensure_binary(contents)
if (contents and bucket_uri.scheme == 'gs' and
(prefer_json_api or encryption_key or kms_key_name)):
object_name = object_name or self.MakeTempName('obj')
json_object = self.CreateObjectJson(
contents=contents,
bucket_name=bucket_uri.bucket_name,
object_name=object_name,
encryption_key=encryption_key,
mtime=mtime,
storage_class=storage_class,
gs_idempotent_generation=gs_idempotent_generation,
kms_key_name=kms_key_name)
object_uri = bucket_uri.clone_replace_name(object_name)
# pylint: disable=protected-access
# Need to update the StorageUri with the correct values while
# avoiding creating a versioned string.
md5 = (Base64ToHexHash(json_object.md5Hash),
json_object.md5Hash.strip('\n"\''))
object_uri._update_from_values(None,
json_object.generation,
True,
md5=md5)
# pylint: enable=protected-access
return object_uri
bucket_uri = bucket_uri or self.CreateBucket()
object_name = object_name or self.MakeTempName('obj')
key_uri = bucket_uri.clone_replace_name(object_name)
if contents is not None:
if bucket_uri.scheme == 'gs' and gs_idempotent_generation is not None:
try:
key_uri.set_contents_from_string(contents,
headers={
'x-goog-if-generation-match':
str(gs_idempotent_generation)
})
except StorageResponseError as e:
if e.status == 412:
pass
else:
raise
else:
key_uri.set_contents_from_string(contents)
custom_metadata_present = (mode is not None or mtime is not None or
uid is not None or gid is not None)
if custom_metadata_present:
self.SetPOSIXMetadata(bucket_uri.scheme,
bucket_uri.bucket_name,
object_name,
atime=None,
mtime=mtime,
uid=uid,
gid=gid,
mode=mode)
return key_uri
def CreateBucketJson(self,
bucket_name=None,
test_objects=0,
storage_class=None,
location=None,
versioning_enabled=False,
retention_policy=None,
bucket_policy_only=False):
"""Creates a test bucket using the JSON API.
The bucket and all of its contents will be deleted after the test.
Args:
bucket_name: Create the bucket with this name. If not provided, a
temporary test bucket name is constructed.
test_objects: The number of objects that should be placed in the bucket.
Defaults to 0.
storage_class: Storage class to use. If not provided we use standard.
location: Location to use.
versioning_enabled: If True, set the bucket's versioning attribute to
True.
retention_policy: Retention policy to be used on the bucket.
bucket_policy_only: If True, set the bucket's iamConfiguration's
bucketPolicyOnly attribute to True.
Returns:
Apitools Bucket for the created bucket.
"""
bucket_name = util.MakeBucketNameValid(bucket_name or
self.MakeTempName('bucket'))
bucket_metadata = apitools_messages.Bucket(name=bucket_name.lower())
if storage_class:
bucket_metadata.storageClass = storage_class
if location:
bucket_metadata.location = location
if versioning_enabled:
bucket_metadata.versioning = (apitools_messages.Bucket.VersioningValue(
enabled=True))
if retention_policy:
bucket_metadata.retentionPolicy = retention_policy
if bucket_policy_only:
iam_config = apitools_messages.Bucket.IamConfigurationValue()
iam_config.bucketPolicyOnly = iam_config.BucketPolicyOnlyValue()
iam_config.bucketPolicyOnly.enabled = True
bucket_metadata.iamConfiguration = iam_config
# TODO: Add retry and exponential backoff.
bucket = self.json_api.CreateBucket(bucket_name, metadata=bucket_metadata)
# Add bucket to list of buckets to be cleaned up.
# TODO: Clean up JSON buckets using JSON API.
self.bucket_uris.append(
boto.storage_uri('gs://%s' % bucket_name,
suppress_consec_slashes=False))
for i in range(test_objects):
self.CreateObjectJson(bucket_name=bucket_name,
object_name=self.MakeTempName('obj'),
contents='test {:d}'.format(i).encode('ascii'))
return bucket
def CreateObjectJson(self,
contents,
bucket_name=None,
object_name=None,
encryption_key=None,
mtime=None,
storage_class=None,
gs_idempotent_generation=None,
kms_key_name=None):
"""Creates a test object (GCS provider only) using the JSON API.
Args:
contents: The contents to write to the object.
bucket_name: Name of bucket to place the object in. If not specified,
a new temporary bucket is created. Assumes the given bucket name is
valid.
object_name: The name to use for the object. If not specified, a temporary
test object name is constructed.
encryption_key: AES256 encryption key to use when creating the object,
if any.
mtime: The modification time of the file in POSIX time (seconds since
UTC 1970-01-01). If not specified, this defaults to the current
system time.
storage_class: String representing the storage class to use for the
object.
gs_idempotent_generation: For use when overwriting an object for which
you know the previously uploaded generation. Create GCS object
idempotently by supplying this generation number as a precondition
and assuming the current object is correct on precondition failure.
Defaults to 0 (new object); to disable, set to None.
kms_key_name: Fully-qualified name of the KMS key that should be used to
encrypt the object. Note that this is currently only valid for 'gs'
objects.
Returns:
An apitools Object for the created object.
"""
bucket_name = bucket_name or self.CreateBucketJson().name
object_name = object_name or self.MakeTempName('obj')
preconditions = Preconditions(gen_match=gs_idempotent_generation)
custom_metadata = apitools_messages.Object.MetadataValue(
additionalProperties=[])
if mtime is not None:
CreateCustomMetadata({MTIME_ATTR: mtime}, custom_metadata)
object_metadata = apitools_messages.Object(
name=object_name,
metadata=custom_metadata,
bucket=bucket_name,
contentType='application/octet-stream',
storageClass=storage_class,
kmsKeyName=kms_key_name)
encryption_keywrapper = CryptoKeyWrapperFromKey(encryption_key)
try:
return self.json_api.UploadObject(six.BytesIO(contents),
object_metadata,
provider='gs',
encryption_tuple=encryption_keywrapper,
preconditions=preconditions)
except PreconditionException:
if gs_idempotent_generation is None:
raise
with SetBotoConfigForTest([('GSUtil', 'decryption_key1', encryption_key)
]):
return self.json_api.GetObjectMetadata(bucket_name, object_name)
def VerifyObjectCustomAttribute(self,
bucket_name,
object_name,
attr_name,
expected_value,
expected_present=True):
"""Retrieves and verifies an object's custom metadata attribute.
Args:
bucket_name: The name of the bucket the object is in.
object_name: The name of the object itself.
attr_name: The name of the custom metadata attribute.
expected_value: The expected retrieved value for the attribute.
expected_present: True if the attribute must be present in the
object metadata, False if it must not be present.
Returns:
None
"""
gsutil_api = (self.json_api
if self.default_provider == 'gs' else self.xml_api)
metadata = gsutil_api.GetObjectMetadata(bucket_name,
object_name,
provider=self.default_provider,
fields=['metadata/%s' % attr_name])
attr_present, value = GetValueFromObjectCustomMetadata(
metadata, attr_name, default_value=expected_value)
self.assertEqual(expected_present, attr_present)
self.assertEqual(expected_value, value)
def RunGsUtil(self,
cmd,
return_status=False,
return_stdout=False,
return_stderr=False,
expected_status=0,
stdin=None,
env_vars=None):
"""Runs the gsutil command.
Args:
cmd: The command to run, as a list, e.g. ['cp', 'foo', 'bar']
return_status: If True, the exit status code is returned.
return_stdout: If True, the standard output of the command is returned.
return_stderr: If True, the standard error of the command is returned.
expected_status: The expected return code. If not specified, defaults to
0. If the return code is a different value, an exception
is raised.
stdin: A string of data to pipe to the process as standard input.
env_vars: A dictionary of variables to extend the subprocess's os.environ
with.
Returns:
If multiple return_* values were specified, this method returns a tuple
containing the desired return values specified by the return_* arguments
(in the order those parameters are specified in the method definition).
If only one return_* value was specified, that value is returned directly
rather than being returned within a 1-tuple.
"""
cmd = [
gslib.GSUTIL_PATH, '--testexceptiontraces', '-o',
'GSUtil:default_project_id=' + PopulateProjectId()
] + cmd
if stdin is not None:
if six.PY3:
if not isinstance(stdin, bytes):
stdin = stdin.encode(UTF8)
else:
stdin = stdin.encode(UTF8)
# checking to see if test was invoked from a par file (bundled archive)
# if not, add python executable path to ensure correct version of python
# is used for testing
cmd = [str(sys.executable)] + cmd if not InvokedFromParFile() else cmd
env = os.environ.copy()
if env_vars:
env.update(env_vars)
# Ensuring correct text types
envstr = dict()
for k, v in six.iteritems(env):
envstr[six.ensure_str(k)] = six.ensure_str(v)
cmd = [six.ensure_str(part) for part in cmd]
# executing command - the setsid allows us to kill the process group below
# if the execution times out. With python 2.7, there's no other way to
# stop the execution (p.kill() doesn't work). Since setsid is not available
# on Windows, we just deal with the occasional timeouts on Windows.
preexec_fn = os.setsid if hasattr(os, 'setsid') else None
p = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
stdin=subprocess.PIPE,
env=envstr,
preexec_fn=preexec_fn)
comm_kwargs = {'input': stdin}
def Kill():
os.killpg(os.getpgid(p.pid), signal.SIGKILL)
if six.PY3:
# TODO(b/135936279): Make this number configurable in .boto
comm_kwargs['timeout'] = 180
else:
timer = threading.Timer(180, Kill)
timer.start()
c_out = p.communicate(**comm_kwargs)
if not six.PY3:
timer.cancel()
try:
c_out = [six.ensure_text(output) for output in c_out]
except UnicodeDecodeError:
c_out = [
six.ensure_text(output, locale.getpreferredencoding(False))
for output in c_out
]
stdout = c_out[0].replace(os.linesep, '\n')
stderr = c_out[1].replace(os.linesep, '\n')
status = p.returncode
if expected_status is not None:
cmd = map(six.ensure_text, cmd)
self.assertEqual(
int(status),
int(expected_status),
msg='Expected status {}, got {}.\nCommand:\n{}\n\nstderr:\n{}'.format(
expected_status, status, ' '.join(cmd), stderr))
toreturn = []
if return_status:
toreturn.append(status)
if return_stdout:
toreturn.append(stdout)
if return_stderr:
toreturn.append(stderr)
if len(toreturn) == 1:
return toreturn[0]
elif toreturn:
return tuple(toreturn)
def RunGsUtilTabCompletion(self, cmd, expected_results=None):
"""Runs the gsutil command in tab completion mode.
Args:
cmd: The command to run, as a list, e.g. ['cp', 'foo', 'bar']
expected_results: The expected tab completion results for the given input.
"""
cmd = [gslib.GSUTIL_PATH] + ['--testexceptiontraces'] + cmd
if InvokedFromParFile():
argcomplete_start_idx = 1
else:
argcomplete_start_idx = 2
# Prepend the interpreter path; ensures we use the same interpreter that
# was used to invoke the integration tests. In practice, this only differs
# when you're running the tests using a different interpreter than
# whatever `/usr/bin/env python` resolves to.
cmd = [str(sys.executable)] + cmd
cmd_str = ' '.join(cmd)
@Retry(AssertionError, tries=5, timeout_secs=1)
def _RunTabCompletion():
"""Runs the tab completion operation with retries."""
# Set this to True if the argcomplete tests start failing and you want to
# see any output you can get. I've had to do this so many times that I'm
# just going to leave this in the code for convenience ¯\_(ツ)_/¯
#
# If set, this will print out extra info from the argcomplete subprocess.
# You'll probably want to find one test that's failing and run it
# individually, e.g.:
# python3 ./gsutil test tabcomplete.TestTabComplete.test_single_object
# so that only one subprocess is run, thus routing the output to your
# local terminal rather than swallowing it.
hacky_debugging = False
results_string = None
with tempfile.NamedTemporaryFile(
delete=False) as tab_complete_result_file:
if hacky_debugging:
# These redirectons are valuable for debugging purposes. 1 and 2 are,
# obviously, stdout and stderr of the subprocess. 9 is the fd for
# argparse debug stream.
cmd_str_with_result_redirect = (
'{cs} 1>{fn} 2>{fn} 8>{fn} 9>{fn}'.format(
cs=cmd_str, fn=tab_complete_result_file.name))
else:
# argcomplete returns results via the '8' file descriptor, so we
# redirect to a file so we can capture the completion results.
cmd_str_with_result_redirect = '{cs} 8>{fn}'.format(
cs=cmd_str, fn=tab_complete_result_file.name)
env = os.environ.copy()
env['_ARGCOMPLETE'] = str(argcomplete_start_idx)
# Use a sane default for COMP_WORDBREAKS.
env['_ARGCOMPLETE_COMP_WORDBREAKS'] = '''"'@><=;|&(:'''
if 'COMP_WORDBREAKS' in env:
env['_ARGCOMPLETE_COMP_WORDBREAKS'] = env['COMP_WORDBREAKS']
env['COMP_LINE'] = cmd_str
env['COMP_POINT'] = str(len(cmd_str))
subprocess.call(cmd_str_with_result_redirect, env=env, shell=True)
results_string = tab_complete_result_file.read().decode(
locale.getpreferredencoding())
if results_string:
if hacky_debugging:
print('---------------------------------------')
print(results_string)
print('---------------------------------------')
results = results_string.split('\013')
else:
results = []
self.assertEqual(results, expected_results)
# When tests are run in parallel, tab completion could take a long time,
# so choose a long timeout value.
with SetBotoConfigForTest([('GSUtil', 'tab_completion_timeout', '120')]):
_RunTabCompletion()
@contextlib.contextmanager
def SetAnonymousBotoCreds(self):
# Tell gsutil not to override the real error message with a warning about
# anonymous access if no credentials are provided in the config file.
boto_config_for_test = [('Tests', 'bypass_anonymous_access_warning', 'True')
]
# Also, maintain any custom host/port/API configuration, since we'll need
# to contact the same host when operating in a development environment.
for creds_config_key in ('gs_host', 'gs_json_host', 'gs_json_host_header',
'gs_post', 'gs_json_port'):
boto_config_for_test.append(('Credentials', creds_config_key,
boto.config.get('Credentials',
creds_config_key, None)))
boto_config_for_test.append(
('Boto', 'https_validate_certificates',
boto.config.get('Boto', 'https_validate_certificates', None)))
for api_config_key in ('json_api_version', 'prefer_api'):
boto_config_for_test.append(('GSUtil', api_config_key,
boto.config.get('GSUtil', api_config_key,
None)))
with SetBotoConfigForTest(boto_config_for_test, use_existing_config=False):
# Make sure to reset Developer Shell credential port so that the child
# gsutil process is really anonymous. Also, revent Boto from falling back
# on credentials from other files like ~/.aws/credentials or environment
# variables.
with SetEnvironmentForTest({
'DEVSHELL_CLIENT_PORT': None,
'AWS_SECRET_ACCESS_KEY': '_'
}):
yield
def _VerifyLocalMode(self, path, expected_mode):
"""Verifies the mode of the file specified at path.
Args:
path: The path of the file on the local file system.
expected_mode: The expected mode as a 3-digit base-8 number.
Returns:
None
"""
self.assertEqual(expected_mode, int(oct(os.stat(path).st_mode)[-3:], 8))
def _VerifyLocalUid(self, path, expected_uid):
"""Verifies the uid of the file specified at path.
Args:
path: The path of the file on the local file system.
expected_uid: The expected uid of the file.
Returns:
None
"""
self.assertEqual(expected_uid, os.stat(path).st_uid)
def _VerifyLocalGid(self, path, expected_gid):
"""Verifies the gid of the file specified at path.
Args:
path: The path of the file on the local file system.
expected_gid: The expected gid of the file.
Returns:
None
"""
self.assertEqual(expected_gid, os.stat(path).st_gid)
def VerifyLocalPOSIXPermissions(self, path, gid=None, uid=None, mode=None):
"""Verifies the uid, gid, and mode of the file specified at path.
Will only check the attribute if the corresponding method parameter is not
None.
Args:
path: The path of the file on the local file system.
gid: The expected gid of the file.
uid: The expected uid of the file.
mode: The expected mode of the file.
Returns:
None
"""
if gid is not None:
self._VerifyLocalGid(path, gid)
if uid is not None:
self._VerifyLocalUid(path, uid)
if mode is not None:
self._VerifyLocalMode(path, mode)
def FlatListDir(self, directory):
"""Perform a flat listing over directory.
Args:
directory: The directory to list
Returns:
Listings with path separators canonicalized to '/', to make assertions
easier for Linux vs Windows.
"""
result = []
for dirpath, _, filenames in os.walk(directory):
for f in filenames:
result.append(os.path.join(dirpath, f))
return '\n'.join(result).replace('\\', '/')
def FlatListBucket(self, bucket_url_string):
"""Perform a flat listing over bucket_url_string."""
return self.RunGsUtil(['ls', suri(bucket_url_string, '**')],
return_stdout=True)
def StorageUriCloneReplaceKey(self, storage_uri, key):
"""Wrapper for StorageUri.clone_replace_key().
Args:
storage_uri: URI representing the object to be cloned
key: key for the new StorageUri to represent
"""
return storage_uri.clone_replace_key(key)
def StorageUriCloneReplaceName(self, storage_uri, name):
"""Wrapper for StorageUri.clone_replace_name().
Args:
storage_uri: URI representing the object to be cloned
key: new object name
"""
return storage_uri.clone_replace_name(name)
def StorageUriSetContentsFromString(self, storage_uri, contents):
"""Wrapper for StorageUri.set_contents_from_string().
Args:
storage_uri: URI representing the object
contents: String of the new contents of the object
"""
return storage_uri.set_contents_from_string(contents)
class KmsTestingResources(object):
"""Constants for KMS resource names to be used in integration testing."""
KEYRING_LOCATION = 'us-central1'
# Since KeyRings and their child resources cannot be deleted, we minimize the
# number of resources created by using a hard-coded keyRing name.
KEYRING_NAME = 'keyring-for-gsutil-integration-tests'
# Used by tests where we don't need to alter the state of a cryptoKey and/or
# its IAM policy bindings once it's initialized the first time.
CONSTANT_KEY_NAME = 'key-for-gsutil-integration-tests'
CONSTANT_KEY_NAME2 = 'key-for-gsutil-integration-tests2'
# Pattern used for keys that should only be operated on by one tester at a
# time. Because multiple integration test invocations can run at the same
# time, we want to minimize the risk of them operating on each other's key,
# while also not creating too many one-time-use keys (as they cannot be
# deleted). Tests should fill in the %d entries with a digit between 0 and 9.
MUTABLE_KEY_NAME_TEMPLATE = 'cryptokey-for-gsutil-integration-tests-%d%d%d'
|
[
"afif.zakaria1997@gmail.com"
] |
afif.zakaria1997@gmail.com
|
162d2cbb02462b15ffcb61197211d4d535f9ee79
|
2df257136428898816895dc303d3b9b3e4b9b853
|
/blog/blog/urls.py
|
97917052f02a52114169ff401780150906260513
|
[] |
no_license
|
Arkaim/Endterm
|
d4dd5afea0efdcbbab2bf465bdd5d27665181b9e
|
96f1a72838943b2f829a3412431b6e1fe74cbdc3
|
refs/heads/master
| 2020-03-11T23:02:07.989841
| 2018-04-20T06:50:42
| 2018-04-20T06:50:42
| 130,310,517
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 812
|
py
|
"""blog URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include, re_path
urlpatterns = [
path('admin/', admin.site.urls),
path('api/blog/', include('blogApp.urls') )
]
|
[
"kaimbayev@gmail.com"
] |
kaimbayev@gmail.com
|
486d1b61f79a31a74f2983f06dff9745c4743031
|
4899940597a48d28c01fdffbe66e098543ee43f8
|
/experiment_v2/train_daegc.py
|
a31b2c8132a0ad05e82701e3ff875c164d8026f2
|
[] |
no_license
|
BigPig-LittleTail/clean_dp
|
d7a0f8af6ea0808c252580a8e0c7239fa66535d7
|
47113cd2393b800fe15390b81d99f1098212913a
|
refs/heads/master
| 2023-05-03T17:04:02.619923
| 2021-06-02T11:08:27
| 2021-06-02T11:08:27
| 372,802,310
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,661
|
py
|
import sys
import os
import argparse
import torch
import numpy as np
import torch.nn.functional as F
sys.path.append(os.path.dirname(os.path.dirname(__file__)))
from sklearn.cluster import KMeans
from torch.optim import Adam
from torch_geometric.nn import GAE
from torch_geometric.utils import to_undirected
from model.model import DAEGCEncoder
from model.utils import construct_edge_index_direction, eva, target_distribution
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description="train",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("--x_path", type=str, default="")
parser.add_argument("--y_path", type=str, default="")
parser.add_argument("--graph_x_path", type=str, default="")
parser.add_argument("--pre_path", type=str, default="")
parser.add_argument("--lr", type=float, default=0.0001)
parser.add_argument("--train_epoch", type=int, default=200)
parser.add_argument("--input_dim", type=int, default=2000)
parser.add_argument("--n_z", type=int, default=10)
parser.add_argument("--n_cluster", type=int, default=30)
parser.add_argument("--seed", type=int, default=None)
parser.add_argument("--beta", type=float, default=10)
args = parser.parse_args()
lr = args.lr
train_epoch = args.train_epoch
input_dim = args.input_dim
n_z = args.n_z
n_cluster = args.n_cluster
seed = args.seed
beta = args.beta
if seed is not None:
torch.manual_seed(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.backends.cudnn.deterministic = True
x_path = args.x_path
y_path = args.y_path
pre_path = args.pre_path
graph_x_path = args.graph_x_path
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
feature = np.loadtxt(x_path, dtype=np.float32)
y = np.loadtxt(y_path, dtype=np.int32)
graph_x = np.loadtxt(graph_x_path, dtype=np.float32)
encoder = DAEGCEncoder(4, 1, 500, input_dim, n_z, n_cluster,
att_dropout=0.0, fdd_dropout=0.0)
x = torch.from_numpy(feature)
graph_x = torch.from_numpy(graph_x)
edge_index_direction = torch.from_numpy(construct_edge_index_direction(graph_x)).t().contiguous()
edge_index = to_undirected(edge_index_direction, num_nodes=graph_x.shape[0])
model = GAE(encoder)
model.load_state_dict(torch.load(pre_path))
model = model.to(device)
x = x.to(device)
edge_index = edge_index.to(device)
optimizer = Adam(model.parameters(), lr=lr)
with torch.no_grad():
z, q = model.encode(x, edge_index)
kmeans = KMeans(n_clusters=n_cluster, n_init=20, random_state=seed)
k_predict = kmeans.fit_predict(z.data.cpu().numpy())
encoder.cluster_layer.data = torch.tensor(kmeans.cluster_centers_).to(device)
eva(y, k_predict, "pretrain")
for epoch in range(train_epoch):
z, q = model.encode(x, edge_index)
out_edge = model.decode(z, edge_index)
if epoch % 1 == 0:
p = target_distribution(q.data)
res1 = q.data.cpu().numpy().argmax(1) # Q
res2 = p.data.cpu().numpy().argmax(1) # P
eva(y, res1, str(epoch) + 'Q')
eva(y, res2, str(epoch) + 'P')
kl_loss = F.kl_div(q.log(), p, reduction='batchmean')
re_loss = F.mse_loss(out_edge, torch.ones_like(out_edge))
loss = re_loss + beta * kl_loss
print("kl_loss{}, re_loss{}".format(kl_loss, re_loss))
optimizer.zero_grad()
loss.backward()
optimizer.step()
|
[
"1070437907@qq.com"
] |
1070437907@qq.com
|
236c1bb3ab27151734793ded3b60b40ee401d7e3
|
41df5552f879a8ed4f4ec7f3e2f5b50958bac4af
|
/lib/python3.4/site-packages/parsel/selector.py
|
282f2e130ec876b06b50a03903f3f7e79390bc51
|
[] |
no_license
|
pengye91/py3_spider
|
cc1d3fb526b0d9b031e77dc65c386b0aae486c72
|
9e4a519aebaf1707aed0707e1ff925459d4105be
|
refs/heads/master
| 2016-09-13T03:34:16.110251
| 2016-04-24T08:52:05
| 2016-04-24T08:52:05
| 56,961,231
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,837
|
py
|
"""
XPath selectors based on lxml
"""
import six
from lxml import etree
from .utils import flatten, iflatten, extract_regex
from .csstranslator import HTMLTranslator, GenericTranslator
class SafeXMLParser(etree.XMLParser):
def __init__(self, *args, **kwargs):
kwargs.setdefault('resolve_entities', False)
super(SafeXMLParser, self).__init__(*args, **kwargs)
_ctgroup = {
'html': {'_parser': etree.HTMLParser,
'_csstranslator': HTMLTranslator(),
'_tostring_method': 'html'},
'xml': {'_parser': SafeXMLParser,
'_csstranslator': GenericTranslator(),
'_tostring_method': 'xml'},
}
def _st(st):
if st is None:
return 'html'
elif st in _ctgroup:
return st
else:
raise ValueError('Invalid type: %s' % st)
def create_root_node(text, parser_cls, base_url=None):
"""Create root node for text using given parser class.
"""
body = text.strip().encode('utf8') or b'<html/>'
parser = parser_cls(recover=True, encoding='utf8')
return etree.fromstring(body, parser=parser, base_url=base_url)
class SelectorList(list):
"""
The :class:`SelectorList` class is a subclass of the builtin ``list``
class, which provides a few additional methods.
"""
# __getslice__ is deprecated but `list` builtin implements it only in Py2
def __getslice__(self, i, j):
o = super(SelectorList, self).__getslice__(i, j)
return self.__class__(o)
def __getitem__(self, pos):
o = super(SelectorList, self).__getitem__(pos)
return self.__class__(o) if isinstance(pos, slice) else o
def xpath(self, xpath):
"""
Call the ``.xpath()`` method for each element in this list and return
their results flattened as another :class:`SelectorList`.
``query`` is the same argument as the one in :meth:`Selector.xpath`
"""
return self.__class__(flatten([x.xpath(xpath) for x in self]))
def css(self, xpath):
"""
Call the ``.css()`` method for each element in this list and return
their results flattened as another :class:`SelectorList`.
``query`` is the same argument as the one in :meth:`Selector.css`
"""
return self.__class__(flatten([x.css(xpath) for x in self]))
def re(self, regex):
"""
Call the ``.re()`` method for each element is this list and return
their results flattened, as a list of unicode strings.
"""
return flatten([x.re(regex) for x in self])
def re_first(self, regex):
for el in iflatten(x.re(regex) for x in self):
return el
def extract(self):
"""
Call the ``.extract()`` method for each element is this list and return
their results flattened, as a list of unicode strings.
"""
return [x.extract() for x in self]
def extract_first(self, default=None):
for x in self:
return x.extract()
else:
return default
class Selector(object):
"""
:class:`Selector` allows you to select parts of an XML or HTML text using CSS
or XPath expressions and extract data from it.
``text`` is a ``unicode`` object in Python 2 or a ``str`` object in Python 3
``type`` defines the selector type, it can be ``"html"``, ``"xml"`` or ``None`` (default).
If ``type`` is ``None``, the selector defaults to ``"html"``.
"""
__slots__ = ['text', 'namespaces', 'type', '_expr', 'root',
'__weakref__', '_parser', '_csstranslator', '_tostring_method']
_default_type = None
_default_namespaces = {
"re": "http://exslt.org/regular-expressions",
# supported in libxslt:
# set:difference
# set:has-same-node
# set:intersection
# set:leading
# set:trailing
"set": "http://exslt.org/sets"
}
_lxml_smart_strings = False
selectorlist_cls = SelectorList
def __init__(self, text=None, type=None, namespaces=None, root=None,
base_url=None, _expr=None):
self.type = st = _st(type or self._default_type)
self._parser = _ctgroup[st]['_parser']
self._csstranslator = _ctgroup[st]['_csstranslator']
self._tostring_method = _ctgroup[st]['_tostring_method']
if text is not None:
if not isinstance(text, six.text_type):
raise TypeError("text argument should be of type %s" % six.text_type)
root = self._get_root(text, base_url)
elif root is None:
raise ValueError("Selector needs either text or root argument")
self.namespaces = dict(self._default_namespaces)
if namespaces is not None:
self.namespaces.update(namespaces)
self.root = root
self._expr = _expr
def _get_root(self, text, base_url=None):
return create_root_node(text, self._parser, base_url=base_url)
def xpath(self, query):
"""
Find nodes matching the xpath ``query`` and return the result as a
:class:`SelectorList` instance with all elements flattened. List
elements implement :class:`Selector` interface too.
``query`` is a string containing the XPATH query to apply.
"""
try:
xpathev = self.root.xpath
except AttributeError:
return self.selectorlist_cls([])
try:
result = xpathev(query, namespaces=self.namespaces,
smart_strings=self._lxml_smart_strings)
except etree.XPathError:
msg = u"Invalid XPath: %s" % query
raise ValueError(msg if six.PY3 else msg.encode("unicode_escape"))
if type(result) is not list:
result = [result]
result = [self.__class__(root=x, _expr=query,
namespaces=self.namespaces,
type=self.type)
for x in result]
return self.selectorlist_cls(result)
def css(self, query):
"""
Apply the given CSS selector and return a :class:`SelectorList` instance.
``query`` is a string containing the CSS selector to apply.
In the background, CSS queries are translated into XPath queries using
`cssselect`_ library and run ``.xpath()`` method.
"""
return self.xpath(self._css2xpath(query))
def _css2xpath(self, query):
return self._csstranslator.css_to_xpath(query)
def re(self, regex):
"""
Apply the given regex and return a list of unicode strings with the
matches.
``regex`` can be either a compiled regular expression or a string which
will be compiled to a regular expression using ``re.compile(regex)``
"""
return extract_regex(regex, self.extract())
def extract(self):
"""
Serialize and return the matched nodes as a list of unicode strings.
Percent encoded content is unquoted.
"""
try:
return etree.tostring(self.root,
method=self._tostring_method,
encoding='unicode',
with_tail=False)
except (AttributeError, TypeError):
if self.root is True:
return u'1'
elif self.root is False:
return u'0'
else:
return six.text_type(self.root)
def register_namespace(self, prefix, uri):
"""
Register the given namespace to be used in this :class:`Selector`.
Without registering namespaces you can't select or extract data from
non-standard namespaces. See :ref:`selector-examples-xml`.
"""
self.namespaces[prefix] = uri
def remove_namespaces(self):
"""
Remove all namespaces, allowing to traverse the document using
namespace-less xpaths. See :ref:`removing-namespaces`.
"""
for el in self.root.iter('*'):
if el.tag.startswith('{'):
el.tag = el.tag.split('}', 1)[1]
# loop on element attributes also
for an in el.attrib.keys():
if an.startswith('{'):
el.attrib[an.split('}', 1)[1]] = el.attrib.pop(an)
def __bool__(self):
"""
Return ``True`` if there is any real content selected or ``False``
otherwise. In other words, the boolean value of a :class:`Selector` is
given by the contents it selects.
"""
return bool(self.extract())
__nonzero__ = __bool__
def __str__(self):
data = repr(self.extract()[:40])
return "<%s xpath=%r data=%s>" % (type(self).__name__, self._expr, data)
__repr__ = __str__
|
[
"pengye91@hotmail.com"
] |
pengye91@hotmail.com
|
119415bc326dbc3ad52fff959dd118ea2bbd69bd
|
5ddc0cc9a788cc51cebe5407d1ea3a16203835ca
|
/sunshine_conversations_client/model/quoted_message_message.py
|
fec16d4e0408e9e25574a1016559d532e47c3b38
|
[
"Apache-2.0"
] |
permissive
|
pizanao/sunshine-conversations-python
|
117f18d48ecd772114853a1b1551883f7f2d1d76
|
28c710c36344d50ffa26c953c6e9ee6815f2b211
|
refs/heads/master
| 2023-08-13T22:09:35.470568
| 2021-09-28T17:18:04
| 2021-09-28T17:18:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,214
|
py
|
# coding: utf-8
"""
Sunshine Conversations API
The version of the OpenAPI document: 9.4.5
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from sunshine_conversations_client.configuration import Configuration
from sunshine_conversations_client.undefined import Undefined
class QuotedMessageMessage(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'type': 'str',
'message': 'Message'
}
attribute_map = {
'type': 'type',
'message': 'message'
}
nulls = set()
def __init__(self, type='message', message=None, local_vars_configuration=None): # noqa: E501
"""QuotedMessageMessage - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._type = None
self._message = None
self.discriminator = None
if type is not None:
self.type = type
if message is not None:
self.message = message
@property
def type(self):
"""Gets the type of this QuotedMessageMessage. # noqa: E501
The type of quotedMessage - a complete Sunshine Conversations message is provided. # noqa: E501
:return: The type of this QuotedMessageMessage. # noqa: E501
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this QuotedMessageMessage.
The type of quotedMessage - a complete Sunshine Conversations message is provided. # noqa: E501
:param type: The type of this QuotedMessageMessage. # noqa: E501
:type: str
"""
self._type = type
@property
def message(self):
"""Gets the message of this QuotedMessageMessage. # noqa: E501
:return: The message of this QuotedMessageMessage. # noqa: E501
:rtype: Message
"""
return self._message
@message.setter
def message(self, message):
"""Sets the message of this QuotedMessageMessage.
:param message: The message of this QuotedMessageMessage. # noqa: E501
:type: Message
"""
self._message = message
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, QuotedMessageMessage):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, QuotedMessageMessage):
return True
return self.to_dict() != other.to_dict()
|
[
"zendesk-ops-ci@zendesk.com"
] |
zendesk-ops-ci@zendesk.com
|
a498d8e90a7422320e43f47cf9b9ea43862a41fe
|
d371b8e4c6d19a4ca7c75145b642940221072b5c
|
/prac1.py
|
a92a59615c92a86b91bbd242c79169e984b4b628
|
[] |
no_license
|
jaypal1046/IotPrac
|
77357e25686064d2b0043ab3575c21d5e53c665b
|
e2b47b975fa92fd3a282019325b1cb017e5ac3bd
|
refs/heads/master
| 2022-12-25T09:42:13.906547
| 2020-09-14T06:48:41
| 2020-09-14T06:48:41
| 295,312,254
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 397
|
py
|
import telepot
token = '1212487782:AAFiqI9MymcNubUOoo9TzGCVE0OyCOsa_r4'
TelegramBot = telepot.Bot(token)
print(TelegramBot.getMe())
def handle(msg):
content_type, chat_type, chat_id = telepot.glance(msg)
print(content_type, chat_type, chat_id)
if content_type == "text":
TelegramBot.sendMessage(chat_id, "You said '{}'".format(msg["text"]))
TelegramBot.message_loop(handle)
|
[
"noreply@github.com"
] |
jaypal1046.noreply@github.com
|
0e165c9f0d837468b9a0c6dcaa7e10a3fe342087
|
76df8b5fbbafaafeba585db013f29fb05427df44
|
/LIST_TUPLE.py
|
f4081f88485ccb69a1673f350084e0c6deb95d79
|
[] |
no_license
|
rfsip/learnpython
|
f911750719e8314d4440380890b023c5e04bee9b
|
ffecdb2ca1d169741948b9eeab6a38ecbfaa3687
|
refs/heads/master
| 2021-01-20T12:22:59.619955
| 2017-06-02T12:51:26
| 2017-06-02T12:51:26
| 90,357,486
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,909
|
py
|
#coding:UTF-8
'''
list是有序集合,可更改元素。len()获得个数。
l[n]索引值0~n-1,可为负数,-1表示n-1。
list函数:
l.append(x):追加x到列表末尾。
l.insert(索引号,x):在指定索引号插入x, 原该索引号及以后的元素依次后移。
l.pop(i):删除指定位置的元素并返回被删除元素。默认为最后一个。
l.sort(reverse=0/1):排序,参考值可选,默认正序。
**对于可迭代对象采用sorted()全局函数。
l.index(x):返回第一次出现x的索引值。
l.count(x):返回x在list中的数量。
l.remove(x):取出第一次出现的x元素。
'''
'''
tuple是有序序列,元素不可变。
在定义tuple时其元素就被定义,一个元素其后面必须加',' eg : tl = (1,)
除了对元素的修改,其他函数与list一致。
'''
'''
**************join与split函数*************
str.split('i'):将字符串str以字符串中的i为界限分为list。(默认为空格)
'i'.join(iterable):将可迭代序列按照'i'连接为字符串。(其中序列元素必须为str格式)
>>> '1 2 3 4 5 6 7 8 9'.split()
['1', '2', '3', '4', '5', '6', '7', '8', '9']
>>> l = ['1','2','3']
>>> '-'.join(l)
'1-2-3'
'''
#列表生成式
a = [i for i in range(10)]
print(a)
print('\n')
#生成器,按需生成可迭代对象
'''
将列表生成式中的[]改为()或加上yield.
遇到yield返回,再次执行时在上次返回的yield处执行。
使用next()调用下一个值。或者使用循环来遍历。
**生成器的return要通过捕获StopIteration的value.
'''
aa = (i1 for i1 in range(10))
print(aa)
print(next(aa))
print('\n')
for i2 in aa:
print(i2)
print('\n')
#练习索引
L = [
['Apple','Google','Microsoft'],
['Java','Python','Ruby','PHP'],
('Bob','Lisa','icefog')
]
#打印'Google'
print(L[0][1])
#打印'Python'
print(L[1][1])
#打印'icefog'
print(L[2][2])
|
[
"rfsip@outlook.com"
] |
rfsip@outlook.com
|
1ded337a5ffdfb6cfa5b77d979c6e05e6b0f2bce
|
12420f2f97277e3dda4bfba9b101448616bbfc76
|
/Second_attempt_Exercise7.1.py
|
38a7b5e20b9bfdc67d086f5a9231bb935b534982
|
[] |
no_license
|
Environmental-Informatics/building-more-complex-programs-with-python-Gautam6-asmita
|
171585d8b038d0c56e608bdf8cf27365281918e0
|
fbd319dd09fda955dfbda47beb7b8718755c7183
|
refs/heads/master
| 2020-12-19T21:03:57.253160
| 2020-03-08T19:22:44
| 2020-03-08T19:22:44
| 235,852,593
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,724
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Created on 2020-01-24 by Asmita Gautam
Assignment 01: Python - Learning the Basics
Think Python Chapter 7: Exercises 7.1
##To make the table as given in 7.1
Modified for resubmission on 2020-03-04
"""
"""
Importing the required module math, which allows for doing mathematical operations
"""
import math
"""
This function 'mysqrt' takes a parameter a
to determine squareroot of the variable a
"""
def mysqrt (a): #Defines a function mysqrt which takes a variable a
x=2
a=int(a)
while True: #while function runs the iteration untile the statement is true
y=(x+a/x)/2
if y==x:
break
x=y
return y
"""
This function 'diff' takes a parameter a
to i.e difference of math.sqrt and my.sqrt for each value of a
"""
def diff(a): #Define the function diff,
d=math.sqrt(a)- mysqrt(a)
return d
"""
This function 'test_square_root' doesnot takes any argument
to print the test_square_root as sample table in Exercise 7.1
"""
def test_square_root(): #Defines a function test_square_root
header=['a','mysqrt(a)','math.sqrt(a)','diff'] #For header of table
print('{:<6s} {:<20s} {:<20s} {:<20s}'.format(header[0],header[1],header[2],header[3])) #Print the header
print('{:<6s} {:<20s} {:<20s} {:<20s}'.format('-','------','--------','------')) # Print the -- lines making as string
for a in range(1,10):
lst=[a,mysqrt(a),math.sqrt(a),diff(a)] #[ ] includes the list of variables and functions
print('{:.1f} {:<20.11f} {:<20.11f} {:<20.11f}'.format(lst[0],lst[1],lst[2],lst[3]))
test_square_root() #Displays the table created
|
[
"noreply@github.com"
] |
Environmental-Informatics.noreply@github.com
|
78c4802a1bfdcd6878a1375099b9dce3165985b3
|
0153b5eae4fa4ee1b04dbab75c1bd71c799f773f
|
/bucles_while_6_VII.py
|
4bc60e489389c7d37bce6ee82d97dd59859cc6b1
|
[] |
no_license
|
dasafo/Python
|
deab015777792f55c85c2758cf529c4486efdb13
|
00839b819abd60af981bfed6446d5b154d98ed40
|
refs/heads/master
| 2022-01-07T17:33:22.699096
| 2019-07-25T09:09:51
| 2019-07-25T09:09:51
| 155,708,564
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 496
|
py
|
import math
print("Programa de cálculo de raiz cuadrada")
numero=int(input("Introduce un numero ostias: "))
intentos=0
while numero<0:
print("No se puede hallar la raiz de un numero negrativo")
if intentos==2:
print("demasiados intentos bye bye")
break;
numero=int(input("Introduce un numero ostias: "))
if numero<0:
intentos=intentos+1
if intentos<2:
solucion=math.sqrt(numero)
print("La raiz cuadrada de " +str(numero) + " es " + str(solucion))
import sys
print(sys.version)
|
[
"dasafo1985@gmail.com"
] |
dasafo1985@gmail.com
|
f7fff813f7ca53db6b298e1ecfd99d606882e9c2
|
768727ae72be16afd27c54ac22227e160fa457bd
|
/angr_ctf/scaffold06.py
|
cb68005dc05028264494a427c1ffc5566404ad21
|
[] |
no_license
|
Reijaff/CTF_reverse
|
3279798b9e6b06315834d19c8f7700521301ecd6
|
2201297203bd480b73c4baf716f0bf57cf929c3d
|
refs/heads/master
| 2020-06-24T17:36:28.539068
| 2019-12-01T16:42:32
| 2019-12-01T16:42:32
| 199,032,380
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,611
|
py
|
import angr
import claripy
import sys
def main(argv):
path_to_binary = argv[1]
project = angr.Project(path_to_binary)
start_address = 0x8048699
initial_state = project.factory.blank_state(addr=start_address)
# The binary is calling scanf("%8s %8s").
# (!)
password0 = claripy.BVS('password0', 64)
password1 = claripy.BVS('password1', 64)
# Instead of telling the binary to write to the address of the memory
# allocated with malloc, we can simply fake an address to any unused block of
# memory and overwrite the pointer to the data. This will point the pointer
# with the address of pointer_to_malloc_memory_address0 to fake_heap_address.
# Be aware, there is more than one pointer! Analyze the binary to determine
# global location of each pointer.
# Note: by default, Angr stores integers in memory with big-endianness. To
# specify to use the endianness of your architecture, use the parameter
# endness=project.arch.memory_endness. On x86, this is little-endian.
# (!)
fake_heap_address0 = 0x4444440
fake_heap_address1 = 0x4444448
pointer_to_malloc_memory_address0 = 0xABCC8A4
pointer_to_malloc_memory_address1 = 0xABCC8AC
initial_state.memory.store(pointer_to_malloc_memory_address0,
fake_heap_address0, endness=project.arch.memory_endness)
initial_state.memory.store(pointer_to_malloc_memory_address1,
fake_heap_address1, endness=project.arch.memory_endness)
# Store our symbolic values at our fake_heap_address. Look at the binary to
# determine the offsets from the fake_heap_address where scanf writes.
# (!)
initial_state.memory.store(fake_heap_address0, password0)
initial_state.memory.store(fake_heap_address1, password1)
simulation = project.factory.simgr(initial_state)
def is_successful(state):
stdout_output = state.posix.dumps(sys.stdout.fileno())
return "Good Job." in stdout_output
def should_abort(state):
stdout_output = state.posix.dumps(sys.stdout.fileno())
return "Try again."in stdout_output
simulation.explore(find=is_successful, avoid=should_abort)
if simulation.found:
solution_state = simulation.found[0]
solution0 = solution_state.se.eval(password0, cast_to=str)
solution1 = solution_state.se.eval(password1, cast_to=str)
solution = " ".join([solution0, solution1])
print solution
else:
raise Exception('Could not find the solution')
if __name__ == '__main__':
main(sys.argv)
|
[
"interlord406@gmail.com"
] |
interlord406@gmail.com
|
7d93d4dcf5fdb754a691c73ec36aa5a0525aed3b
|
54f406f134a000c4b92edebc6b888e343c39a105
|
/3_aggregate_count.py
|
1111dd414a80964c8e1ef18643c5537210d40eef
|
[] |
no_license
|
wattlebird/KaggleTalkingData
|
6f44ef2398d2b9f61690d8f3a836f3b8a0ffd792
|
bc5c4fdadf61c269405a7fb90f8f4ed6c40d102e
|
refs/heads/master
| 2020-03-08T21:13:09.688937
| 2018-05-05T00:38:31
| 2018-05-05T00:38:31
| 128,402,259
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,103
|
py
|
import pandas as pd
import numpy as np
from setting import *
import gc
gc.enable()
dtype={
'ip' : 'uint32',
'app' : 'uint16',
'device' : 'uint16',
'os' : 'uint16',
'channel' : 'uint16',
'hour' : 'uint8',
'click_id' : 'uint32',
'ip_app_nclick' : 'uint32',
'ip_app_os_nclick': 'uint16',
'ip_app_os_device_nclick': 'uint16',
'ip_channel_nclick': 'uint32',
'ip_hour_day_nclick': 'uint16',
'ip_day_nuniqhour': 'uint8',
'ip_device_os_nuniqapp': 'uint8',
'ip_nuniqapp': 'uint8',
'ip_nuniqchannel': 'uint8',
'ip_nuniqdevice': 'uint16',
'app_nuniqchannel': 'uint8',
'ip_app_channel_varhour': 'float32',
'ip_app_os_varhour': 'float32',
'ip_day_channel_varhour': 'float32',
'ip_app_channel_avghour': 'float32'
}
train = pd.read_csv(DATA + 'train/train.csv', dtype=dtype,
usecols=['ip', 'app', 'device', 'os', 'channel', 'is_attributed', 'hour', 'day'], engine='c')
c1 = pd.read_csv(FDATA+'feature/train/ip_app_nclick.csv', dtype=dtype)
train = train.merge(c1, how='left', on=['ip', 'app'])
del c1
c1 = pd.read_csv(FDATA+'feature/train/ip_app_os_nclick.csv', dtype=dtype)
train = train.merge(c1, how='left', on=['ip', 'app', 'os'])
del c1
c1 = pd.read_csv(FDATA+'feature/train/ip_app_os_device_nclick.csv', dtype=dtype)
train = train.merge(c1, how='left', on=['ip', 'app', 'os', 'device'])
del c1
c1 = pd.read_csv(FDATA+'feature/train/ip_channel_nclick.csv', dtype=dtype)
train = train.merge(c1, how='left', on=['ip', 'channel'])
del c1
c1 = pd.read_csv(FDATA+'feature/train/ip_hour_day_nclick.csv', dtype=dtype)
train = train.merge(c1, how='left', on=['ip', 'hour', 'day'])
del c1
c1 = pd.read_csv(FDATA+'feature/train/ip_day_nuniqhour.csv', dtype=dtype)
train = train.merge(c1, how='left', on=['ip', 'day'])
del c1
c1 = pd.read_csv(FDATA+'feature/train/ip_device_os_nuniqapp.csv', dtype=dtype)
train = train.merge(c1, how='left', on=['ip', 'device', 'os'])
del c1
c1 = pd.read_csv(FDATA+'feature/train/ip_nuniqapp.csv', dtype=dtype)
train = train.merge(c1, how='left', on=['ip'])
del c1
c1 = pd.read_csv(FDATA+'feature/train/ip_nuniqchannel.csv', dtype=dtype)
train = train.merge(c1, how='left', on=['ip'])
del c1
c1 = pd.read_csv(FDATA+'feature/train/ip_nuniqdevice.csv', dtype=dtype)
train = train.merge(c1, how='left', on=['ip'])
del c1
c1 = pd.read_csv(FDATA+'feature/train/app_nuniqchannel.csv', dtype=dtype)
train = train.merge(c1, how='left', on=['app'])
del c1
c1 = pd.read_csv(FDATA+'feature/train/ip_app_channel_varhour.csv', dtype=dtype)
train = train.merge(c1, how='left', on=['ip', 'app', 'channel'])
del c1
c1 = pd.read_csv(FDATA+'feature/train/ip_app_os_varhour.csv', dtype=dtype)
train = train.merge(c1, how='left', on=['ip', 'app', 'os'])
del c1
c1 = pd.read_csv(FDATA+'feature/train/ip_day_channel_varhour.csv', dtype=dtype)
train = train.merge(c1, how='left', on=['ip', 'day', 'channel'])
del c1
c1 = pd.read_csv(FDATA+'feature/train/ip_app_channel_avghour.csv', dtype=dtype)
train = train.merge(c1, how='left', on=['ip', 'app', 'channel'])
del c1
train.to_csv(FDATA+'feature/train/train_withcount.csv', index=False)
del train
test = pd.read_csv(DATA + 'train/test.csv', dtype=dtype,
usecols=['ip', 'app', 'device', 'os', 'channel', 'is_attributed', 'hour', 'day'], engine='c')
c1 = pd.read_csv(FDATA+'feature/train/ip_app_nclick.csv', dtype=dtype)
test = test.merge(c1, how='left', on=['ip', 'app'])
del c1
c1 = pd.read_csv(FDATA+'feature/train/ip_app_os_nclick.csv', dtype=dtype)
test = test.merge(c1, how='left', on=['ip', 'app', 'os'])
del c1
c1 = pd.read_csv(FDATA+'feature/train/ip_app_os_device_nclick.csv', dtype=dtype)
test = test.merge(c1, how='left', on=['ip', 'app', 'os', 'device'])
del c1
c1 = pd.read_csv(FDATA+'feature/train/ip_channel_nclick.csv', dtype=dtype)
test = test.merge(c1, how='left', on=['ip', 'channel'])
del c1
c1 = pd.read_csv(FDATA+'feature/train/ip_hour_day_nclick.csv', dtype=dtype)
test = test.merge(c1, how='left', on=['ip', 'hour', 'day'])
del c1
c1 = pd.read_csv(FDATA+'feature/train/ip_day_nuniqhour.csv', dtype=dtype)
test = test.merge(c1, how='left', on=['ip', 'day'])
del c1
c1 = pd.read_csv(FDATA+'feature/train/ip_device_os_nuniqapp.csv', dtype=dtype)
test = test.merge(c1, how='left', on=['ip', 'device', 'os'])
del c1
c1 = pd.read_csv(FDATA+'feature/train/ip_nuniqapp.csv', dtype=dtype)
test = test.merge(c1, how='left', on=['ip'])
del c1
c1 = pd.read_csv(FDATA+'feature/train/ip_nuniqchannel.csv', dtype=dtype)
test = test.merge(c1, how='left', on=['ip'])
del c1
c1 = pd.read_csv(FDATA+'feature/train/ip_nuniqdevice.csv', dtype=dtype)
test = test.merge(c1, how='left', on=['ip'])
del c1
c1 = pd.read_csv(FDATA+'feature/train/app_nuniqchannel.csv', dtype=dtype)
test = test.merge(c1, how='left', on=['app'])
del c1
c1 = pd.read_csv(FDATA+'feature/train/ip_app_channel_varhour.csv', dtype=dtype)
test = test.merge(c1, how='left', on=['ip', 'app', 'channel'])
del c1
c1 = pd.read_csv(FDATA+'feature/train/ip_app_os_varhour.csv', dtype=dtype)
test = test.merge(c1, how='left', on=['ip', 'app', 'os'])
del c1
c1 = pd.read_csv(FDATA+'feature/train/ip_day_channel_varhour.csv', dtype=dtype)
test = test.merge(c1, how='left', on=['ip', 'day', 'channel'])
del c1
c1 = pd.read_csv(FDATA+'feature/train/ip_app_channel_avghour.csv', dtype=dtype)
test = test.merge(c1, how='left', on=['ip', 'app', 'channel'])
del c1
test.to_csv(FDATA+'feature/train/test_withcount.csv', index=False)
del test
test = pd.read_csv(DATA + 'test/train.csv', dtype=dtype,
usecols=['ip', 'app', 'device', 'os', 'channel', 'is_attributed', 'hour', 'day'], engine='c')
c1 = pd.read_csv(FDATA+'feature/test/ip_app_nclick.csv', dtype=dtype)
test = test.merge(c1, how='left', on=['ip', 'app'])
del c1
c1 = pd.read_csv(FDATA+'feature/test/ip_app_os_nclick.csv', dtype=dtype)
test = test.merge(c1, how='left', on=['ip', 'app', 'os'])
del c1
c1 = pd.read_csv(FDATA+'feature/test/ip_app_os_device_nclick.csv', dtype=dtype)
test = test.merge(c1, how='left', on=['ip', 'app', 'os', 'device'])
del c1
c1 = pd.read_csv(FDATA+'feature/test/ip_channel_nclick.csv', dtype=dtype)
test = test.merge(c1, how='left', on=['ip', 'channel'])
del c1
c1 = pd.read_csv(FDATA+'feature/test/ip_hour_day_nclick.csv', dtype=dtype)
test = test.merge(c1, how='left', on=['ip', 'hour', 'day'])
del c1
c1 = pd.read_csv(FDATA+'feature/test/ip_day_nuniqhour.csv', dtype=dtype)
test = test.merge(c1, how='left', on=['ip', 'day'])
del c1
c1 = pd.read_csv(FDATA+'feature/test/ip_device_os_nuniqapp.csv', dtype=dtype)
test = test.merge(c1, how='left', on=['ip', 'device', 'os'])
del c1
c1 = pd.read_csv(FDATA+'feature/test/ip_nuniqapp.csv', dtype=dtype)
test = test.merge(c1, how='left', on=['ip'])
del c1
c1 = pd.read_csv(FDATA+'feature/test/ip_nuniqchannel.csv', dtype=dtype)
test = test.merge(c1, how='left', on=['ip'])
del c1
c1 = pd.read_csv(FDATA+'feature/test/ip_nuniqdevice.csv', dtype=dtype)
test = test.merge(c1, how='left', on=['ip'])
del c1
c1 = pd.read_csv(FDATA+'feature/test/app_nuniqchannel.csv', dtype=dtype)
test = test.merge(c1, how='left', on=['app'])
del c1
c1 = pd.read_csv(FDATA+'feature/test/ip_app_channel_varhour.csv', dtype=dtype)
test = test.merge(c1, how='left', on=['ip', 'app', 'channel'])
del c1
c1 = pd.read_csv(FDATA+'feature/test/ip_app_os_varhour.csv', dtype=dtype)
test = test.merge(c1, how='left', on=['ip', 'app', 'os'])
del c1
c1 = pd.read_csv(FDATA+'feature/test/ip_day_channel_varhour.csv', dtype=dtype)
test = test.merge(c1, how='left', on=['ip', 'day', 'channel'])
del c1
c1 = pd.read_csv(FDATA+'feature/test/ip_app_channel_avghour.csv', dtype=dtype)
test = test.merge(c1, how='left', on=['ip', 'app', 'channel'])
del c1
test.to_csv(FDATA+'feature/test/train_withcount.csv', index=False)
test = pd.read_csv(DATA + 'test/test.csv', dtype=dtype,
usecols=['ip', 'app', 'device', 'os', 'channel', 'click_id', 'hour', 'day'], engine='c')
c1 = pd.read_csv(FDATA+'feature/test/ip_app_nclick.csv', dtype=dtype)
test = test.merge(c1, how='left', on=['ip', 'app'])
del c1
c1 = pd.read_csv(FDATA+'feature/test/ip_app_os_nclick.csv', dtype=dtype)
test = test.merge(c1, how='left', on=['ip', 'app', 'os'])
del c1
c1 = pd.read_csv(FDATA+'feature/test/ip_app_os_device_nclick.csv', dtype=dtype)
test = test.merge(c1, how='left', on=['ip', 'app', 'os', 'device'])
del c1
c1 = pd.read_csv(FDATA+'feature/test/ip_channel_nclick.csv', dtype=dtype)
test = test.merge(c1, how='left', on=['ip', 'channel'])
del c1
c1 = pd.read_csv(FDATA+'feature/test/ip_hour_day_nclick.csv', dtype=dtype)
test = test.merge(c1, how='left', on=['ip', 'hour', 'day'])
del c1
c1 = pd.read_csv(FDATA+'feature/test/ip_day_nuniqhour.csv', dtype=dtype)
test = test.merge(c1, how='left', on=['ip', 'day'])
del c1
c1 = pd.read_csv(FDATA+'feature/test/ip_device_os_nuniqapp.csv', dtype=dtype)
test = test.merge(c1, how='left', on=['ip', 'device', 'os'])
del c1
c1 = pd.read_csv(FDATA+'feature/test/ip_nuniqapp.csv', dtype=dtype)
test = test.merge(c1, how='left', on=['ip'])
del c1
c1 = pd.read_csv(FDATA+'feature/test/ip_nuniqchannel.csv', dtype=dtype)
test = test.merge(c1, how='left', on=['ip'])
del c1
c1 = pd.read_csv(FDATA+'feature/test/ip_nuniqdevice.csv', dtype=dtype)
test = test.merge(c1, how='left', on=['ip'])
del c1
c1 = pd.read_csv(FDATA+'feature/test/app_nuniqchannel.csv', dtype=dtype)
test = test.merge(c1, how='left', on=['app'])
del c1
c1 = pd.read_csv(FDATA+'feature/test/ip_app_channel_varhour.csv', dtype=dtype)
test = test.merge(c1, how='left', on=['ip', 'app', 'channel'])
del c1
c1 = pd.read_csv(FDATA+'feature/test/ip_app_os_varhour.csv', dtype=dtype)
test = test.merge(c1, how='left', on=['ip', 'app', 'os'])
del c1
c1 = pd.read_csv(FDATA+'feature/test/ip_day_channel_varhour.csv', dtype=dtype)
test = test.merge(c1, how='left', on=['ip', 'day', 'channel'])
del c1
c1 = pd.read_csv(FDATA+'feature/test/ip_app_channel_avghour.csv', dtype=dtype)
test = test.merge(c1, how='left', on=['ip', 'app', 'channel'])
del c1
test.to_csv(FDATA+'feature/test/test_withcount.csv', index=False)
|
[
"geniusxiaoguai@gmail.com"
] |
geniusxiaoguai@gmail.com
|
db8b1e3552c89679bea007f10213550429f8d25f
|
1869948448dd20a4abb309d2c35b58c6ede816d6
|
/binary/_mac/pandoc/pandoc.py
|
3d00f1104d2a6d7c87cd53aa4385b474634b9100
|
[
"BSD-2-Clause"
] |
permissive
|
sklnet/craft-blueprints-kde
|
52a5637adab75b30a90a79bbfcf31626267da5d9
|
b07b8736b2b6686b447956fa09e4b9ade7284401
|
refs/heads/master
| 2022-12-30T01:32:59.744929
| 2020-10-18T07:20:54
| 2020-10-18T07:20:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 974
|
py
|
# -*- coding: utf-8 -*-
import info
class subinfo(info.infoclass):
def setTargets(self):
versions = ["2.9.2.1"]
for ver in versions:
self.targets[ver] = f"https://github.com/jgm/pandoc/releases/download/{ver}/pandoc-{ver}-macOS.zip"
self.targetDigestsX64["2.9.2.1"] = (['c4847f7a6e6a02a7d1b8dc17505896d8a6e4c2ee9e8b325e47a0468036675307'], CraftHash.HashAlgorithm.SHA256)
self.defaultTarget = "2.9.2.1"
from Package.BinaryPackageBase import *
class Package(BinaryPackageBase):
def __init__(self):
BinaryPackageBase.__init__(self)
def install(self):
# On Mac, the files are neetly sorted into /bin and /share in the upstream zip, already. We just need to copy the whole thing, stripping the outer folder.
dirs = os.listdir(self.workDir())
if len(dirs) != 1:
return False
utils.copyDir(os.path.join(self.workDir(), dirs[0]), self.installDir())
return True
|
[
"thomas.friedrichsmeier@kdemail.net"
] |
thomas.friedrichsmeier@kdemail.net
|
1682abe6e823855c668cc36f52c5e2edd2c0f47c
|
d08de24371daf54419ebd430e232ae0e92c6b262
|
/api/migrations/0004_auto_20201001_1820.py
|
872e6f4ae6deb7994595520b03d0a89d5d309cb9
|
[] |
no_license
|
iTh1nk/server-portfolio
|
3baad63e78f00344e95178ec9483ccda89ff41c2
|
d00a46dcec16ce1f6aee7755fa65361e0cf01b6e
|
refs/heads/master
| 2022-12-22T21:24:54.271556
| 2020-10-05T05:37:10
| 2020-10-05T05:37:10
| 299,807,885
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,207
|
py
|
# Generated by Django 3.1.1 on 2020-10-02 01:20
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('api', '0003_auto_20201001_1751'),
]
operations = [
migrations.AlterField(
model_name='message',
name='created_at',
field=models.DateTimeField(auto_now_add=True),
),
migrations.AlterField(
model_name='message',
name='updated_at',
field=models.DateTimeField(auto_now=True),
),
migrations.AlterField(
model_name='post',
name='created_at',
field=models.DateTimeField(auto_now_add=True),
),
migrations.AlterField(
model_name='post',
name='updated_at',
field=models.DateTimeField(auto_now=True),
),
migrations.AlterField(
model_name='project',
name='created_at',
field=models.DateTimeField(auto_now_add=True),
),
migrations.AlterField(
model_name='project',
name='updated_at',
field=models.DateTimeField(auto_now=True),
),
]
|
[
"fnchao@hotmail.com"
] |
fnchao@hotmail.com
|
2916662efe193673fd0888fc719d215581ecaee1
|
c6c45b4f3f06a8f7a23235d396acbe66b599a303
|
/graphgen.py
|
198beabbc90784be06870b1bb0e6ec0962953945
|
[] |
no_license
|
ivanpostolski/sed2017
|
77033485f747fd29a0f0d18d49ce197918875531
|
9f5856ec2941235ab90d96fd7799b7fb79dbd624
|
refs/heads/master
| 2021-07-04T20:32:47.609296
| 2017-09-25T20:12:34
| 2017-09-25T20:12:34
| 104,384,446
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,522
|
py
|
import random as rnd
import networkx as nx
from networkx.utils import uniform_sequence, create_degree_sequence
import sys
def printma(CM,infect=[]):
#prelude
print "[top]"
#components
components = "components: "
nodes = CM.nodes()
for n in nodes:
components += "node%d@persona%d " % (n,CM.degree(n) if CM.degree(n) > 0 else 1)
print components
print "in: infect"
for i in infect:
print "link: infect infect@node%d" % i
ports = [0 for i in range(0,len(nodes))]
for edge in CM.edges():
origin = edge[0]
target = edge[1]
print "link: out%d@node%d in%d@node%d" % (ports[origin],origin,ports[target],target)
print "link: out%d@node%d in%d@node%d" % (ports[target],target,ports[origin],origin)
ports[origin] += 1
ports[target] += 1
if (len(sys.argv) < 2):
print "arg[0] is population size, arg[1] is gamma alpha (default 10), arg[2] is beta (default 1) and arg[3] is infected size (default 1%)"
sys.exit(0);
n = int(sys.argv[1])
seq=create_degree_sequence(n,lambda x: [rnd.gammavariate(alpha=10.0 if len(sys.argv) < 3 else float(sys.argv[2]) ,beta=1.0 if len(sys.argv) < 4 else float(sys.argv[3])) for i in range(x)])
G=nx.configuration_model(seq)
G.remove_edges_from(G.selfloop_edges())
G=nx.Graph(G)
degree_sequence=sorted(nx.degree(G).values(),reverse=True) # degree sequence
print "%Degree sequence is ", degree_sequence
dmax=max(degree_sequence)
print "%%Max degree is: %d"%dmax
printma(G,infect=[rnd.randint(0,n-1) for i in range(n/100 if len(sys.argv) < 5 else int(sys.argv[4]))])
|
[
"ivan.postolski@gmail.com"
] |
ivan.postolski@gmail.com
|
346e11a0107d50cc89d7540ea87bb1c421c330ac
|
8a95359a35ee57bf87f64bb49b2552adfbdac9fd
|
/2015/day02.py
|
d72721dae6f489ea41b99f038d4c0e7706109cd7
|
[] |
no_license
|
ekuns/AdventOfCode
|
f0461b60d77cdb08e4a877bf13a7bd847168e043
|
fa17fcca93e5ff4684a37b75997383f9a883cf83
|
refs/heads/master
| 2020-08-08T04:36:13.314391
| 2019-10-11T03:16:52
| 2019-10-11T03:16:52
| 213,715,179
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 475
|
py
|
f = open("input02.txt", "r")
lines = f.readlines()
f.close()
print('Number of lines: ' + str(len(lines)))
paper = 0
ribbon = 0
for line in lines:
items = [int(i) for i in line.split('x') if 'x' in line]
if len(items) < 3: break
items.sort()
l, w, h = items
sides = (l*w, w*h, l*h)
paper += sides[0]*2 + sides[1]*2 + sides[2]*2 + min(sides)
ribbon += l*2 + w*2 + l*w*h
print('Total paper: ' + str(paper))
print('Total ribbon: ' + str(ribbon))
|
[
"eddie.kuns@gmail.com"
] |
eddie.kuns@gmail.com
|
2b1bedb198e506521c38280ccfe2c01b71ccd450
|
2d5470377cf53854eb7e899c2cb0c3aecd9505b8
|
/constants.py
|
7a7cac32d7bd31fc360828e98797b278f63a2fa2
|
[] |
no_license
|
ammarinjtk/Neural-Relation-Extraction
|
ed565b5b56b38345d53fb5b5b26eda5392d95380
|
4fbf77e728bb21c0cf5ddb39c1e9fd33acead2cc
|
refs/heads/master
| 2020-04-09T19:52:43.925779
| 2019-12-12T03:39:22
| 2019-12-12T03:39:22
| 160,556,429
| 3
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 954
|
py
|
WORD_EMBEDDING_DIM = 200
POS_EMBEDDING_DIM = 100
DISTANCE_EMBEDDING_DIM = 300
DEPENDENCY_EMBEDDING_DIM = 300
UNKNOWN_WORD = '_UNK_'
PADDING_WORD = '_PAD_'
ELMO_EMBEDDING_DIM = 400
BERT_FEATURE_DIM = 768
# SEED = 239387
SEED_LIST = [345, 166977, 239387, 240825, 250906, 290780, 312510, 439708, 489995,
545617, 585845, 614636, 675969, 820719, 920316, 1187617, 1484458, 3835082,
5064379, 5647183, 5694250, 6333898, 6797546, 7144728, 7461780, 7696045, 8468732,
8730848, 8842617, 9975400]
DATA_DIR_PATH = "./data"
W2V_MODEL_PATH = "/home/mind/ammarinjtk/wikipedia-pubmed-and-PMC-w2v.bin"
ELMO_MODEL_PATH = "/home/mind/ammarinjtk/Neural-Relation-Extraction/entitykeyword_finetune_weights.hdf5"
ELMO_OPTIONS_PATH = "/home/mind/ammarinjtk/Neural-Relation-Extraction/options.json"
BERT_FEATURES_PATH = "/home/mind/ammarinjtk/Neural-Relation-Extraction/finetuned_bert.jsonl"
OUTPUT_DIR_PATH = "./our_predictions"
|
[
"ammarinjtk@outlook.com"
] |
ammarinjtk@outlook.com
|
900ce326d55c5c3faef62bdd31921280baddee7a
|
380a47268c5975473a2e7c38c747bc3bdbd981b1
|
/benchmark/third_party/transformers/tests/repo_utils/test_tests_fetcher.py
|
0541b72d9581f1fd8b9c1808637c7dcc534b90d8
|
[
"Apache-2.0"
] |
permissive
|
FMInference/FlexGen
|
07aa9b1918c19b02077e13ad07e76840843810dd
|
d34f7b4b43ed87a374f394b0535ed685af66197b
|
refs/heads/main
| 2023-07-24T02:29:51.179817
| 2023-07-21T22:38:31
| 2023-07-21T22:38:31
| 602,270,517
| 6,821
| 411
|
Apache-2.0
| 2023-07-07T22:59:24
| 2023-02-15T21:18:53
|
Python
|
UTF-8
|
Python
| false
| false
| 2,759
|
py
|
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import unittest
from git import Repo
git_repo_path = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
transformers_path = os.path.join(git_repo_path, "src", "transformers")
# Tests are run against this specific commit for reproducibility
# https://github.com/huggingface/transformers/tree/07f6690206e39ed7a4d9dbc58824314f7089bb38
GIT_TEST_SHA = "07f6690206e39ed7a4d9dbc58824314f7089bb38"
from tests_fetcher import checkout_commit, clean_code, get_module_dependencies # noqa: E402
class CheckDummiesTester(unittest.TestCase):
def test_clean_code(self):
# Clean code removes all strings in triple quotes
self.assertEqual(clean_code('"""\nDocstring\n"""\ncode\n"""Long string"""\ncode\n'), "code\ncode")
self.assertEqual(clean_code("'''\nDocstring\n'''\ncode\n'''Long string'''\ncode\n'''"), "code\ncode")
# Clean code removes all comments
self.assertEqual(clean_code("code\n# Comment\ncode"), "code\ncode")
self.assertEqual(clean_code("code # inline comment\ncode"), "code \ncode")
def test_checkout_commit(self):
repo = Repo(git_repo_path)
self.assertNotEqual(repo.head.commit.hexsha, GIT_TEST_SHA)
with checkout_commit(repo, GIT_TEST_SHA):
self.assertEqual(repo.head.commit.hexsha, GIT_TEST_SHA)
self.assertNotEqual(repo.head.commit.hexsha, GIT_TEST_SHA)
def test_get_module_dependencies(self):
bert_module = os.path.join(transformers_path, "models", "bert", "modeling_bert.py")
expected_deps = [
"activations.py",
"modeling_outputs.py",
"modeling_utils.py",
"pytorch_utils.py",
"models/bert/configuration_bert.py",
]
expected_deps = set(os.path.join(transformers_path, f) for f in expected_deps)
repo = Repo(git_repo_path)
with checkout_commit(repo, GIT_TEST_SHA):
deps = get_module_dependencies(bert_module)
deps = set(os.path.expanduser(f) for f in deps)
self.assertEqual(deps, expected_deps)
|
[
"sqy1415@gmail.com"
] |
sqy1415@gmail.com
|
b02300f7f4d44d5f678fb6c496e47722a709a37a
|
9ddf25b50d7ac91a6e17566e5dcfd477c7d1ad2d
|
/catkin_ws/devel/lib/python3/dist-packages/shoulderexo/msg/_Torque.py
|
065551410c9a3687b9e48c3b87a3b8bdc8686442
|
[] |
no_license
|
yuihjk7412/ROS_FRAME_FOR_EXO
|
3679ee436430f14dffdcbb479ec4eb2cb088ac71
|
e119e8a0969ddfbb10bb354ff57974b241d48ed4
|
refs/heads/master
| 2021-07-08T00:31:09.844979
| 2020-10-22T09:27:30
| 2020-10-22T09:27:30
| 199,652,574
| 0
| 0
| null | 2019-09-05T03:34:23
| 2019-07-30T13:00:50
|
Makefile
|
UTF-8
|
Python
| false
| false
| 3,932
|
py
|
# This Python file uses the following encoding: utf-8
"""autogenerated by genpy from shoulderexo/Torque.msg. Do not edit."""
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
class Torque(genpy.Message):
_md5sum = "67e3a742abd7e4b159e23cb00d1df783"
_type = "shoulderexo/Torque"
_has_header = False #flag to mark the presence of a Header object
_full_text = """float32 torque1
float32 torque2
int32 port_num
"""
__slots__ = ['torque1','torque2','port_num']
_slot_types = ['float32','float32','int32']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
torque1,torque2,port_num
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(Torque, self).__init__(*args, **kwds)
#message fields cannot be None, assign default values for those that are
if self.torque1 is None:
self.torque1 = 0.
if self.torque2 is None:
self.torque2 = 0.
if self.port_num is None:
self.port_num = 0
else:
self.torque1 = 0.
self.torque2 = 0.
self.port_num = 0
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
_x = self
buff.write(_get_struct_2fi().pack(_x.torque1, _x.torque2, _x.port_num))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
try:
end = 0
_x = self
start = end
end += 12
(_x.torque1, _x.torque2, _x.port_num,) = _get_struct_2fi().unpack(str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
_x = self
buff.write(_get_struct_2fi().pack(_x.torque1, _x.torque2, _x.port_num))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
try:
end = 0
_x = self
start = end
end += 12
(_x.torque1, _x.torque2, _x.port_num,) = _get_struct_2fi().unpack(str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
_struct_I = genpy.struct_I
def _get_struct_I():
global _struct_I
return _struct_I
_struct_2fi = None
def _get_struct_2fi():
global _struct_2fi
if _struct_2fi is None:
_struct_2fi = struct.Struct("<2fi")
return _struct_2fi
|
[
"514522970@qq.com"
] |
514522970@qq.com
|
99d35785724da17460e3fe45148f72499f12b4aa
|
b483c598fa375e9af02348960f210b9f482bd655
|
/cursoemvideo/desafios/Desafio049.py
|
f63ec756f03b607ef01a6eddcbbbbe52a49e72ab
|
[
"MIT"
] |
permissive
|
brunofonsousa/python
|
6f766d08bf193180ea9a4903cb93ffd167db588d
|
8f2f26c77015c0baaa76174e004406b4115272c7
|
refs/heads/master
| 2022-09-30T14:58:01.080749
| 2020-06-08T09:55:35
| 2020-06-08T09:55:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 275
|
py
|
'''
Exercício Python 049: Refaça o DESAFIO 009, mostrando a tabuada de um número que o usuário escolher, só que agora utilizando um laço for.
'''
n = int(input('Digite um número para sua tabuada: '))
for i in range(1,11):
print('{} x {} = {}'.format(n, i, n*i))
|
[
"brunofonsousa@gmail.com"
] |
brunofonsousa@gmail.com
|
a25b814d42997df433f9921542984d2788f3e6b0
|
a74fb42803de854f321b40bf55a07007580321b8
|
/test_file.py
|
e855cda8d95cdb35848d138e44dacf43e5af8541
|
[] |
no_license
|
pritishyuvraj/learning_git
|
7b756e87d394c93ce6149498be6fb472f79e3e3e
|
90453079672af676db26ecbf83f912f66ef85905
|
refs/heads/master
| 2020-03-22T00:08:31.271774
| 2018-07-09T03:38:41
| 2018-07-09T03:38:41
| 139,226,380
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 51
|
py
|
a = 2
b = 4
c = a + b
print (c)
d = c + 1
e = d + 1
|
[
"pyuvraj@fb.com"
] |
pyuvraj@fb.com
|
1ea24b35a310c4d10a022d73322a3be2200f596f
|
e4ff7740492da84d4bfda5ffc51887921a52c865
|
/ear/models/resnet.py
|
7cf1b16a6d3fd6fa29df701ed81f7c38f22a5b2e
|
[] |
no_license
|
daiguoxi/Demo_ear
|
a8bc33ee513f6663b49059e3e7da19104ababb67
|
3c0d031733c9428d96c5e26a6c50ad50f6f372e3
|
refs/heads/master
| 2023-05-07T02:03:27.416169
| 2021-05-28T12:28:25
| 2021-05-28T12:28:25
| 371,668,778
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,530
|
py
|
# coding = utf-8
# @File : resnet.py
# @Cont : resnet网络结构
import torch
import torch.nn as nn
# 调用从url网址加载网络参数的函数
try:
from torch.hub import load_state_dict_from_url
except ImportError:
from torch.utils.model_zoo import load_url as load_state_dict_from_url
# 不同的网络名称
__all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101','resnet152']
# 不同的参数地址
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth'
}
def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):
"""
定义3x3卷积
:param in_planes:输入通道
:param out_planes: 输出通道
:param stride: 步长
:param groups: 控制输入输出之间的连接
:param dilation:空洞卷积的膨胀率
:return:
"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=dilation, groups=groups, bias=False, dilation=dilation)
def conv1x1(in_planes, out_planes, stride=1):
"""
定义1x1卷积
:param in_planes:输入通道
:param out_planes: 输出通道
:param stride: 步长
:return:
"""
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
# 基础模块
class BasicBlock(nn.Module):
expansion = 1
__constants__ = ['downsample']
# 参数预定义
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
base_width=64, dilation=1, norm_layer=None):
super(BasicBlock, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
if groups != 1 or base_width != 64:
raise ValueError('BasicBlock only supports groups=1 and base_width=64')
if dilation > 1:
raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
# Both self.conv1 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = norm_layer(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = norm_layer(planes)
self.downsample = downsample
self.stride = stride
# 前向传播
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
# 残差连接
out += identity
out = self.relu(out)
return out
# Bottleneck的模块
class Bottleneck(nn.Module):
expansion = 4
__constants__ = ['downsample']
# 参数预定义
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
base_width=64, dilation=1, norm_layer=None):
super(Bottleneck, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
width = int(planes * (base_width / 64.)) * groups
# Both self.conv2 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv1x1(inplanes, width)
self.bn1 = norm_layer(width)
self.conv2 = conv3x3(width, width, stride, groups, dilation)
self.bn2 = norm_layer(width)
self.conv3 = conv1x1(width, planes * self.expansion)
self.bn3 = norm_layer(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
# 前向传播
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
# 网络结构
class ResNet(nn.Module):
# 参数预定义
def __init__(self, block, layers, num_classes=8, zero_init_residual=False,
groups=1, width_per_group=64, replace_stride_with_dilation=None,
norm_layer=None):
super(ResNet, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
self._norm_layer = norm_layer
self.inplanes = 64
self.dilation = 1
if replace_stride_with_dilation is None:
# each element in the tuple indicates if we should replace
# the 2x2 stride with a dilated convolution instead
replace_stride_with_dilation = [False, False, False]
if len(replace_stride_with_dilation) != 3:
raise ValueError("replace_stride_with_dilation should be None "
"or a 3-element tuple, got {}".format(replace_stride_with_dilation))
self.groups = groups
self.base_width = width_per_group
self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = norm_layer(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2,
dilate=replace_stride_with_dilation[0])
self.layer3 = self._make_layer(block, 256, layers[2], stride=2,
dilate=replace_stride_with_dilation[1])
self.layer4 = self._make_layer(block, 512, layers[3], stride=2,
dilate=replace_stride_with_dilation[2])
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(512 * block.expansion, num_classes)
# 参数初始化
# 方式1
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# 方式2
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0)
elif isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0)
# 构造网络层
def _make_layer(self, block, planes, blocks, stride=1, dilate=False):
norm_layer = self._norm_layer
downsample = None
previous_dilation = self.dilation
if dilate:
self.dilation *= stride
stride = 1
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
norm_layer(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, self.groups,
self.base_width, previous_dilation, norm_layer))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes, groups=self.groups,
base_width=self.base_width, dilation=self.dilation,
norm_layer=norm_layer))
return nn.Sequential(*layers)
# 前向传播
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = torch.flatten(x, 1)
x = self.fc(x)
return x
# 调用预训练参数
def _resnet(arch, block, layers, pretrained, progress, **kwargs):
model = ResNet(block, layers, **kwargs)
if pretrained:
state_dict = load_state_dict_from_url(model_urls[arch],
progress=progress)
model.load_state_dict(state_dict)
return model
def resnet18(pretrained=False, progress=True, **kwargs):
return _resnet('resnet18', BasicBlock, [2, 2, 2, 2], pretrained, progress,
**kwargs)
def resnet34(pretrained=False, progress=True, **kwargs):
return _resnet('resnet34', BasicBlock, [3, 4, 6, 3], pretrained, progress,
**kwargs)
def resnet50(pretrained=False, progress=True, **kwargs):
return _resnet('resnet50', Bottleneck, [3, 4, 6, 3], pretrained, progress,
**kwargs)
def resnet101(pretrained=False, progress=True, **kwargs):
return _resnet('resnet101', Bottleneck, [3, 4, 23, 3], pretrained, progress,
**kwargs)
def resnet152(pretrained=False, progress=True, **kwargs):
return _resnet('resnet152', Bottleneck, [3, 8, 36, 3], pretrained, progress,
**kwargs)
|
[
"daiguoxi@gmail.com"
] |
daiguoxi@gmail.com
|
2342cdb977881b769c979d66492d5f5164debdee
|
1fbb4bd8aefa855c3c9181c3bc490f5131151a71
|
/test_snippets/parentFolder.py
|
692f49c7c84cf29e4de97499fe93ea4b182b2589
|
[] |
no_license
|
thesraid/pyvmomi
|
e600a24a79b2bcc94e2f9057e0f6680437acb50d
|
250d076c2cc4904707d64bcc485aaf120c6c55f9
|
refs/heads/master
| 2021-06-10T11:40:47.867044
| 2020-07-09T09:40:33
| 2020-07-09T09:40:33
| 90,965,042
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,357
|
py
|
#!/usr/bin/env python
"""
Program to authenticate and print VM details
"""
import atexit
import argparse
import getpass
# Ignore SSL warnings
# http://pyvmomi.2338814.n4.nabble.com/Connect-to-esxi-host-with-self-Signed-certificate-td21.html
import requests
requests.packages.urllib3.disable_warnings()
import ssl
try:
_create_unverified_https_context = ssl._create_unverified_context
except AttributeError:
# Legacy Python that doesn't verify HTTPS certificates by default
pass
else:
# Handle target environment that doesn't support HTTPS verification
ssl._create_default_https_context = _create_unverified_https_context
from pyVim import connect
from pyVmomi import vmodl
from pyVmomi import vim
def get_args():
"""Get command line args from the user.
"""
parser = argparse.ArgumentParser(
description='Standard Arguments for talking to vCenter')
# because -h is reserved for 'help' we use -s for service
parser.add_argument('-s', '--host',
required=True,
action='store',
help='vSphere service to connect to')
# because we want -p for password, we use -o for port
parser.add_argument('-o', '--port',
type=int,
default=443,
action='store',
help='Port to connect on')
parser.add_argument('-u', '--user',
required=True,
action='store',
help='User name to use when connecting to host')
parser.add_argument('-p', '--password',
required=False,
action='store',
help='Password to use when connecting to host')
parser.add_argument('-f', '--folder',
required=True,
action='store',
help='Folder to list')
args = parser.parse_args()
if not args.password:
args.password = getpass.getpass(
prompt='Enter password for host %s and user %s: ' %
(args.host, args.user))
return args
def get_obj(content, vimtype, name):
"""
Return an object by name, if name is None the
first found object is returned
"""
obj = None
container = content.viewManager.CreateContainerView(
content.rootFolder, vimtype, True)
for c in container.view:
if name:
if c.name == name:
obj = c
break
else:
obj = c
break
return obj
def main():
"""
Simple command-line program for listing the vms in a foler.
"""
args = get_args()
try:
service_instance = connect.SmartConnect(host=args.host,
user=args.user,
pwd=args.password,
port=int(args.port))
atexit.register(connect.Disconnect, service_instance)
print " "
session_name = service_instance.content.sessionManager.currentSession.fullName
print "Hello {}".format(session_name)
print "You have successfully logged into {}".format(args.host)
# NOTE (hartsock): only a successfully authenticated session has a
# session key aka session id.
print " "
print " "
# Print list of VMs in the specified folder
FOLDER = args.folder
# This dumps all of the vCenter data into an object
content = service_instance.RetrieveContent()
# get_obj is defined above. It searchs through the content for the specified folder
# It returns a folder object
folder = get_obj(content, [vim.Folder], FOLDER)
# If the folder was found list it's contents
if folder is not None:
depth = 5
while folder.name != "vm":
if depth != 0:
print folder.name
folder = folder.parent
depth = depth-1
else:
print "Error: Specified folder is nested in too many subdirectories (max: 5)"
return -1
else:
print "Folder", FOLDER, "not found"
except vmodl.MethodFault as error:
#print "Caught vmodl fault : " + error.msg
print error.msg
return -1
return 0
# Start program
if __name__ == "__main__":
main()
|
[
"noreply@github.com"
] |
thesraid.noreply@github.com
|
c443aeb9fd5b230fe86f42e359139ce8be319587
|
6f9533673f3349801141c3899fe5d090dd55c7bb
|
/user/Matt/changeSpeedOfficial.py
|
3dafbc55c1459810cc0ee3d9849a65916418971c
|
[] |
no_license
|
henokalem/teamge
|
60ece1636aed22932bd65c5c3ff6e04730a67a9d
|
0e54c058afc2d746d5fd70f0fedcd425eda19b9d
|
refs/heads/master
| 2021-01-11T15:45:50.796048
| 2019-01-09T19:14:10
| 2019-01-09T19:14:10
| 79,923,993
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 107
|
py
|
import changeSpeed
import sys
test = changeSpeed.speedChanger()
test.changeSpeed(sys.argv[1],sys.argv[2])
|
[
"msopata59@gmail.com"
] |
msopata59@gmail.com
|
11b025858233eaea3f653c54b4de638aa117c62a
|
8acd3e44316a0ff955f0d21470f3e60ae9ab8095
|
/BFS.py
|
350b0acfddfc6ae2c78f2c0791ce7093744231dd
|
[] |
no_license
|
hchs710623/test-py
|
d4aac24ae85f2f64034d0cf79856b9842c60a338
|
40a2f4aec77b5675923c3e2b63acc58f0500143b
|
refs/heads/master
| 2020-03-24T09:57:44.545403
| 2018-09-19T16:45:51
| 2018-09-19T16:45:51
| 142,643,382
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,153
|
py
|
import collections
import itertools
class Solution(object):
def shortestPathAllKeys(self, grid):
R, C = len(grid), len(grid[0])
# location['a'] = the coordinates of 'a' on the grid, etc.
location = {v: (r, c)
for r, row in enumerate(grid)
for c, v in enumerate(row)
if v not in '.#'}
def neighbors(r, c):
for cr, cc in ((r-1, c), (r, c-1), (r+1, c), (r, c+1)):
if 0 <= cr < R and 0 <= cc < C:
yield cr, cc
def bfs(source, target, keys = ()):
sr, sc = location[source]
tr, tc = location[target]
seen = [[False] * C for _ in range(R)]
seen[sr][sc] = True
queue = collections.deque([(sr, sc, 0)])
while queue:
r, c, d = queue.popleft()
if r == tr and c == tc: return d
for cr, cc in neighbors(r, c):
if not seen[cr][cc] and grid[cr][cc] != '#':
if grid[cr][cc].isupper() and grid[cr][cc].lower() not in keys:
continue
queue.append((cr,cc,d+1))
seen[cr][cc] = True
return float('inf')
ans = float('inf')
keys = "".join(chr(ord('a') + i) for i in range(len(location) // 2))
for cand in itertools.permutations(keys):
# bns : the built candidate answer, consisting of the sum
# of distances of the segments from '@' to cand[0] to cand[1] etc.
bns = 0
for i, target in enumerate(cand):
source = cand[i-1] if i > 0 else '@'
d = bfs(source, target, cand[:i])
bns += d
if bns >= ans: break
else:
ans = bns
return ans if ans < float('inf') else -1
if __name__=="__main__":
grid = ["@.a.#","###.#","b.A.B"]
grid2 = ["@..aA","..B#.","....b"]
a = Solution()
# result = a.shortestPathAllKeys(grid)
result = a.shortestPathAllKeys(grid2)
print(result)
|
[
"mylin.cs04g@nctu.edu.tw"
] |
mylin.cs04g@nctu.edu.tw
|
bf069a215b11a3007349f39443a33af4924f4ac9
|
fa4aaaa682b9f947260699cef2cccd936bdbd185
|
/pro_minimized.py
|
9269377845980249da6feb133b143caff4d98d34
|
[] |
no_license
|
ravikiranrao/metamath
|
b294ee85c58e793892408bb606deba7bfbd18162
|
e85250fc81dcd8d5495f7f8ee644d30cf9351674
|
refs/heads/master
| 2021-09-27T18:46:05.175817
| 2021-09-27T15:18:53
| 2021-09-27T15:18:53
| 157,056,750
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,232
|
py
|
import networkx as nx
import re
import matplotlib.pyplot as plt
import PyPDF2
from networkx.algorithms import community
from collections import Counter
# Defining variables
G, G1, G2 = nx.DiGraph(), nx.DiGraph(), nx.DiGraph()
pdfFileObj = open('WebPage.pdf', 'rb')
pdfReader = PyPDF2.PdfFileReader(pdfFileObj)
pages = pdfReader.numPages
a, b, c, d, e = [], [], [], [], []
chapter_pages = [0, 48, 68, 108, 128, 154, 192, 226, 252, 280, 422, 470, 504];
# Generating Nodes for the Graph takes parameter as chapter number
color_map = []
def generate_nodes(chapter_number):
text = "" # Contains text of input chapter
if (chapter_number == 100):
for x in range(1, 14):
generate_nodes(x);
else:
page_start = chapter_pages[chapter_number - 1] # starting page of the chapter
page_end = chapter_pages[chapter_number] - 1 # ending page of the chapter
# looping over all the pages
while page_start < page_end:
pageObj = pdfReader.getPage(page_start) # reading text from current page
text += pageObj.extractText().replace('\n', '') # append extracted text to variable text
pattern = re.findall(
"(?:Proposition\d{1,3}|\[Prop\.\d{1,2}\.\d{1,3}|\[Post\.\d|\[Def\.\d{1,2}\.\d{1,3}|\[C\.N\.\d)",
text) # regex pattern to find in the book
# looping through the pattern to distinguish among Proposition, Postulates, Definitions and Common Notions
i = 0 # loop counter
while i < len(pattern):
if "Proposition" in pattern[i]:
e = pattern[i].replace('Proposition', '')
m = str(chapter_number) + "." + e
a.append(m) # append Proposition to list a
elif "[Prop." in pattern[i]:
t = pattern[i].replace('[Prop.', '')
p = t.replace(']', '')
b.append((p, m)) # append above used proposition in b
elif "[Post." in pattern[i]:
y = pattern[i].replace('[Post.', 'p')
x = y.replace(']', '')
b.append((x, m)) # append above used postulates in b
elif "[Def." in pattern[i]:
y = pattern[i].replace('[Def.', 'd')
x = y.replace(']', '')
b.append((x, m)) # append above used definitions in b
elif "[C.N." in pattern[i]:
y = pattern[i].replace('[C.N.', 'c')
x = y.replace(']', '')
b.append((x, m)) # append above used common notions in b
i += 1 # increment the loop counter
page_start += 1 # go to next page
G.add_nodes_from(a) # append list a in Graph G
G.add_edges_from(b) # append list b in Graph1 G
# Colouring nodes
for node in G:
if ("d" in node):
color_map.append('blue')
elif ("p" in node):
color_map.append('green')
else:
color_map.append('red')
num = input("Enter chapter number: (1-13) and 100 for complete book :- ")
generate_nodes(int(num))
nx.draw(G, node_color=color_map, with_labels=True)
# nx.draw(G,with_labels = True, node_color = 'r')
plt.show()
in_degrees = list(G.in_degree(list(G.nodes)))
out_degrees = list(G.out_degree(list(G.nodes)))
print("in degrees of all the nodes:", in_degrees)
print('--------------------------------------------------------------------')
print("out degrees of all the nodes:", out_degrees)
print('--------------------------------------------------------------------')
print("communities:")
communities_generator = community.girvan_newman(G)
top_level_communities = next(communities_generator)
next_level_communities = next(communities_generator)
coms = sorted(map(sorted, next_level_communities))
for i in coms:
print(i)
print('--------------------------------------------------------------------')
def lin_eq(z):
if G.in_degree(z) == 0:
d.append(z)
else:
for i in list(G.predecessors(z)):
c.append((i, z))
lin_eq(i)
num1 = input("Enter the node:")
lin_eq(num1)
G1.add_edges_from(c)
nx.draw(G1, with_labels=True, node_color='r')
plt.show()
print("linear combinations for 1.33 is:", Counter(d))
pdfFileObj.close()
|
[
"2014csb1027@iitrpr.ac.in"
] |
2014csb1027@iitrpr.ac.in
|
3d272deaf1fb9734514e093b81f139fef41c0ff6
|
08c382c2cf8cace902ade2a3d33b3abab3629e9b
|
/fb_api.py
|
da8712cd2ff361045352f744da703fa2ec6f82df
|
[
"BSD-3-Clause"
] |
permissive
|
wing3s/shop_bot
|
4d1966fef32a38f6128f82bf70bbc515deda577f
|
4c6a34538ac8de9999edae190f6269bc6a63c2cf
|
refs/heads/master
| 2021-01-10T02:20:02.953645
| 2016-04-11T01:06:04
| 2016-04-11T01:06:04
| 55,930,711
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,142
|
py
|
import os
import requests
import time
import ConfigParser
import logging
import logging.config
from requests.exceptions import RequestException
from helper import get_logger, base_path
config = ConfigParser.ConfigParser()
config.read(os.path.join(base_path, 'config.ini'))
logger = get_logger('fb_api', __file__)
__author__ = "Wen-Hao Lee"
__email__ = "wing3s@gmail.com"
__copyright__ = "Copyright 2014, Numnum"
class FBBot(object):
graph_url = "https://graph.facebook.com"
cooldown = 120 # sec
search_radius = 500 # m
def search_restaurant(self, lat, lon):
restaurants = self._search_place('restaurant', lat, lon)
steakhouses = self._search_place('steakhouse', lat, lon)
bars = self._search_place('bar', lat, lon)
return restaurants + steakhouses + bars
def _search_place(self, query, lat, lon):
params = {
'q': query,
'type': 'place',
'center': '%s,%s' % (lat, lon),
'distance': self.search_radius,
'limit': 500,
'offset': 0
}
return self.search(params)
def search(self, params):
params['access_token'] = "{app_id}|{app_key}".format(
app_key=config.get('fbAPI', 'key'),
app_id=config.get('fbAPI', 'id'))
try:
r = requests.get(
"%s/%s" % (self.graph_url, 'search'),
params=params)
resp = r.json()
if r.status_code != 200:
resp_err = resp.get('error')
err_code = resp_err.get('code')
if err_code == 4:
logger.warning(
'Reach limit, cooldown %ds' % self.cooldown)
time.sleep(self.cooldown)
return self.search(params)
else:
logger.error(resp)
return None
return resp['data']
except RequestException as err:
logger.error(err)
def fetch(self, fbid):
try:
r = requests.get("%s/%s" % (self.graph_url, fbid))
resp = r.json()
if r.status_code != 200:
resp_err = resp.get('error')
err_code = resp_err.get('code')
if err_code == 4:
logger.warning(
'Reach limit, cooldown %ds' % self.cooldown)
time.sleep(self.cooldown)
return self.fetch(fbid)
elif err_code == 21:
err_msg = resp_err.get('message')
new_fbid_pt = 'page ID'
new_fbid = err_msg[
err_msg.index(new_fbid_pt)+len(new_fbid_pt)+1:
err_msg.index('.')]
logger.warning(
'Get new fbid %s for %s' % (new_fbid, fbid))
return self.fetch(new_fbid)
else:
logger.error([resp, r.url])
return None
return resp
except RequestException as err:
logger.error(err)
|
[
"wing3s@gmail.com"
] |
wing3s@gmail.com
|
bfe5bc53944a45e3c846724c381e265f586251a1
|
74577eb1ba5f1e01d5f651880b28f7872e2ee422
|
/lib/datasets/decoder.py
|
61942440dee614f9bf01216b369f6c62f6924982
|
[
"Apache-2.0"
] |
permissive
|
CV-IP/mgma
|
5b06ec149bfb6f31a0e638676d633e1f454e790b
|
4007b9b0a86b3b926bb7de6a017679b58ea319cf
|
refs/heads/main
| 2023-06-19T09:25:23.464106
| 2021-07-18T17:00:47
| 2021-07-18T17:00:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 14,468
|
py
|
# Code adapted from:
# https://github.com/facebookresearch/SlowFast
import math
import numpy as np
import random
import torch
import torchvision.io as io
def temporal_sampling(frames, start_idx, end_idx, num_samples):
"""
Given the start and end frame index, sample num_samples frames between
the start and end with equal interval.
Args:
frames (tensor): a tensor of video frames, dimension is
`num video frames` x `channel` x `height` x `width`.
start_idx (int): the index of the start frame.
end_idx (int): the index of the end frame.
num_samples (int): number of frames to sample.
Returns:
frames (tersor): a tensor of temporal sampled video frames, dimension is
`num clip frames` x `channel` x `height` x `width`.
"""
index = torch.linspace(start_idx, end_idx, num_samples)
index = torch.clamp(index, 0, frames.shape[0] - 1).long()
frames = torch.index_select(frames, 0, index)
return frames
def get_start_end_idx(video_size, clip_size, clip_idx, num_clips):
"""
Sample a clip of size clip_size from a video of size video_size and
return the indices of the first and last frame of the clip. If clip_idx is
-1, the clip is randomly sampled, otherwise uniformly split the video to
num_clips clips, and select the start and end index of clip_idx-th video
clip.
Args:
video_size (int): number of overall frames.
clip_size (int): size of the clip to sample from the frames.
clip_idx (int): if clip_idx is -1, perform random jitter sampling. If
clip_idx is larger than -1, uniformly split the video to num_clips
clips, and select the start and end index of the clip_idx-th video
clip.
num_clips (int): overall number of clips to uniformly sample from the
given video for testing.
Returns:
start_idx (int): the start frame index.
end_idx (int): the end frame index.
"""
delta = max(video_size - clip_size, 0)
if clip_idx == -1:
# Random temporal sampling.
start_idx = random.uniform(0, delta)
else:
# Uniformly sample the clip with the given index.
start_idx = delta * clip_idx / num_clips
end_idx = start_idx + clip_size - 1
return start_idx, end_idx
def pyav_decode_stream(
container, start_pts, end_pts, stream, stream_name, buffer_size=0
):
"""
Decode the video with PyAV decoder.
Args:
container (container): PyAV container.
start_pts (int): the starting Presentation TimeStamp to fetch the
video frames.
end_pts (int): the ending Presentation TimeStamp of the decoded frames.
stream (stream): PyAV stream.
stream_name (dict): a dictionary of streams. For example, {"video": 0}
means video stream at stream index 0.
buffer_size (int): number of additional frames to decode beyond end_pts.
Returns:
result (list): list of frames decoded.
max_pts (int): max Presentation TimeStamp of the video sequence.
"""
# Seeking in the stream is imprecise. Thus, seek to an ealier PTS by a
# margin pts.
margin = 1024
seek_offset = max(start_pts - margin, 0)
container.seek(seek_offset, any_frame=False, backward=True, stream=stream)
frames = {}
buffer_count = 0
max_pts = 0
for frame in container.decode(**stream_name):
max_pts = max(max_pts, frame.pts)
if frame.pts < start_pts:
continue
if frame.pts <= end_pts:
frames[frame.pts] = frame
else:
buffer_count += 1
frames[frame.pts] = frame
if buffer_count >= buffer_size:
break
result = [frames[pts] for pts in sorted(frames)]
return result, max_pts
def torchvision_decode(
video_handle,
sampling_rate,
num_frames,
clip_idx,
video_meta,
num_clips=10,
target_fps=30,
modalities=("visual",),
max_spatial_scale=0,
):
"""
If video_meta is not empty, perform temporal selective decoding to sample a
clip from the video with TorchVision decoder. If video_meta is empty, decode
the entire video and update the video_meta.
Args:
video_handle (bytes): raw bytes of the video file.
sampling_rate (int): frame sampling rate (interval between two sampled
frames).
num_frames (int): number of frames to sample.
clip_idx (int): if clip_idx is -1, perform random temporal
sampling. If clip_idx is larger than -1, uniformly split the
video to num_clips clips, and select the clip_idx-th video clip.
video_meta (dict): a dict contains VideoMetaData. Details can be found
at `pytorch/vision/torchvision/io/_video_opt.py`.
num_clips (int): overall number of clips to uniformly sample from the
given video.
target_fps (int): the input video may has different fps, convert it to
the target video fps.
modalities (tuple): tuple of modalities to decode. Currently only
support `visual`, planning to support `acoustic` soon.
max_spatial_scale (int): the maximal resolution of the spatial shorter
edge size during decoding.
Returns:
frames (tensor): decoded frames from the video.
fps (float): the number of frames per second of the video.
decode_all_video (bool): if True, the entire video was decoded.
"""
# Convert the bytes to a tensor.
video_tensor = torch.from_numpy(np.frombuffer(video_handle, dtype=np.uint8))
decode_all_video = True
video_start_pts, video_end_pts = 0, -1
# The video_meta is empty, fetch the meta data from the raw video.
if len(video_meta) == 0:
# Tracking the meta info for selective decoding in the future.
meta = io._probe_video_from_memory(video_tensor)
# Using the information from video_meta to perform selective decoding.
video_meta["video_timebase"] = meta.video_timebase
video_meta["video_numerator"] = meta.video_timebase.numerator
video_meta["video_denominator"] = meta.video_timebase.denominator
video_meta["has_video"] = meta.has_video
video_meta["video_duration"] = meta.video_duration
video_meta["video_fps"] = meta.video_fps
video_meta["audio_timebas"] = meta.audio_timebase
video_meta["audio_numerator"] = meta.audio_timebase.numerator
video_meta["audio_denominator"] = meta.audio_timebase.denominator
video_meta["has_audio"] = meta.has_audio
video_meta["audio_duration"] = meta.audio_duration
video_meta["audio_sample_rate"] = meta.audio_sample_rate
fps = video_meta["video_fps"]
if (
video_meta["has_video"]
and video_meta["video_denominator"] > 0
and video_meta["video_duration"] > 0
):
# try selective decoding.
decode_all_video = False
clip_size = sampling_rate * num_frames / target_fps * fps
start_idx, end_idx = get_start_end_idx(
fps * video_meta["video_duration"], clip_size, clip_idx, num_clips
)
# Convert frame index to pts.
pts_per_frame = video_meta["video_denominator"] / fps
video_start_pts = int(start_idx * pts_per_frame)
video_end_pts = int(end_idx * pts_per_frame)
# Decode the raw video with the tv decoder.
v_frames, _ = io._read_video_from_memory(
video_tensor,
seek_frame_margin=1.0,
read_video_stream="visual" in modalities,
video_width=0,
video_height=0,
video_min_dimension=max_spatial_scale,
video_pts_range=(video_start_pts, video_end_pts),
video_timebase_numerator=video_meta["video_numerator"],
video_timebase_denominator=video_meta["video_denominator"],
)
if v_frames.shape == torch.Size([0]):
# failed selective decoding
decode_all_video = True
video_start_pts, video_end_pts = 0, -1
v_frames, _ = io._read_video_from_memory(
video_tensor,
seek_frame_margin=1.0,
read_video_stream="visual" in modalities,
video_width=0,
video_height=0,
video_min_dimension=max_spatial_scale,
video_pts_range=(video_start_pts, video_end_pts),
video_timebase_numerator=video_meta["video_numerator"],
video_timebase_denominator=video_meta["video_denominator"],
)
return v_frames, fps, decode_all_video
def pyav_decode(
container, sampling_rate, num_frames, clip_idx, num_clips=10, target_fps=30
):
"""
Convert the video from its original fps to the target_fps. If the video
support selective decoding (contain decoding information in the video head),
the perform temporal selective decoding and sample a clip from the video
with the PyAV decoder. If the video does not support selective decoding,
decode the entire video.
Args:
container (container): pyav container.
sampling_rate (int): frame sampling rate (interval between two sampled
frames.
num_frames (int): number of frames to sample.
clip_idx (int): if clip_idx is -1, perform random temporal sampling. If
clip_idx is larger than -1, uniformly split the video to num_clips
clips, and select the clip_idx-th video clip.
num_clips (int): overall number of clips to uniformly sample from the
given video.
target_fps (int): the input video may has different fps, convert it to
the target video fps before frame sampling.
Returns:
frames (tensor): decoded frames from the video. Return None if the no
video stream was found.
fps (float): the number of frames per second of the video.
decode_all_video (bool): If True, the entire video was decoded.
"""
# Try to fetch the decoding information from the video head. Some of the
# videos does not support fetching the decoding information, for that case
# it will get None duration.
fps = float(container.streams.video[0].average_rate)
frames_length = container.streams.video[0].frames
duration = container.streams.video[0].duration
if duration is None:
# If failed to fetch the decoding information, decode the entire video.
decode_all_video = True
video_start_pts, video_end_pts = 0, math.inf
else:
# Perform selective decoding.
decode_all_video = False
start_idx, end_idx = get_start_end_idx(
frames_length,
sampling_rate * num_frames / target_fps * fps,
clip_idx,
num_clips,
)
timebase = duration / frames_length
video_start_pts = int(start_idx * timebase)
video_end_pts = int(end_idx * timebase)
frames = None
# If video stream was found, fetch video frames from the video.
if container.streams.video:
video_frames, max_pts = pyav_decode_stream(
container,
video_start_pts,
video_end_pts,
container.streams.video[0],
{"video": 0},
)
container.close()
frames = [frame.to_rgb().to_ndarray() for frame in video_frames]
frames = torch.as_tensor(np.stack(frames))
return frames, fps, decode_all_video
def decode(
container,
sampling_rate,
num_frames,
clip_idx=-1,
num_clips=10,
video_meta=None,
target_fps=30,
backend="pyav",
max_spatial_scale=0,
):
"""
Decode the video and perform temporal sampling.
Args:
container (container): pyav container.
sampling_rate (int): frame sampling rate (interval between two sampled
frames).
num_frames (int): number of frames to sample.
clip_idx (int): if clip_idx is -1, perform random temporal
sampling. If clip_idx is larger than -1, uniformly split the
video to num_clips clips, and select the
clip_idx-th video clip.
num_clips (int): overall number of clips to uniformly
sample from the given video.
video_meta (dict): a dict contains VideoMetaData. Details can be find
at `pytorch/vision/torchvision/io/_video_opt.py`.
target_fps (int): the input video may have different fps, convert it to
the target video fps before frame sampling.
backend (str): decoding backend includes `pyav` and `torchvision`. The
default one is `pyav`.
max_spatial_scale (int): keep the aspect ratio and resize the frame so
that shorter edge size is max_spatial_scale. Only used in
`torchvision` backend.
Returns:
frames (tensor): decoded frames from the video.
"""
# Currently support two decoders: 1) PyAV, and 2) TorchVision.
assert clip_idx >= -1, "Not valied clip_idx {}".format(clip_idx)
try:
if backend == "pyav":
frames, fps, decode_all_video = pyav_decode(
container,
sampling_rate,
num_frames,
clip_idx,
num_clips,
target_fps,
)
elif backend == "torchvision":
frames, fps, decode_all_video = torchvision_decode(
container,
sampling_rate,
num_frames,
clip_idx,
video_meta,
num_clips,
target_fps,
("visual",),
max_spatial_scale,
)
else:
raise NotImplementedError(
"Unknown decoding backend {}".format(backend)
)
except Exception as e:
print("Failed to decode by {} with exception: {}".format(backend, e))
return None
# Return None if the frames was not decoded successfully.
if frames is None or frames.size(0) == 0:
return None
clip_sz = sampling_rate * num_frames / target_fps * fps
start_idx, end_idx = get_start_end_idx(
frames.shape[0],
clip_sz,
clip_idx if decode_all_video else 0,
num_clips if decode_all_video else 1,
)
# Perform temporal sampling from the decoded video.
frames = temporal_sampling(frames, start_idx, end_idx, num_frames)
return frames
|
[
"zhenshengshi@gmail.com"
] |
zhenshengshi@gmail.com
|
2241a4a393031facd3ede7fd7fd26b94149db3f8
|
46ce7046c5b3d33c12abda3f82002145b433884e
|
/macro.py
|
f19380873974c8ee001e2252503b8d40028f9197
|
[] |
no_license
|
htsrjdrouse/labbot_pcr
|
f5386bfef8891007b4d57b11a1e1da97d8a84c0a
|
ec54996e0f60b0c593835e36f016c8e449d719f2
|
refs/heads/master
| 2023-08-21T21:39:31.565620
| 2020-09-10T03:14:11
| 2020-09-10T03:14:11
| 285,470,984
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,765
|
py
|
#runeachmacrocmd(i,dser,aser)
def runeachmacrocmd(cmd,dser,aser):
sim = 0
if len(cmd)>0:
if re.match("^G1",cmd):
gss = re.split("_", cmd)
gcodecmd = gss[0]
tme = gss[1]
if float(tme) < 0.2:
tme = str(0.2)
if sim == 0:
#sersmoothie.readlines()
dser.write(gcodecmd+"\r\n")
time.sleep(float(tme))
gg =dser.readlines()
upublisher(cmd)
else:
print(gcodecmd)
print(tme)
if re.match("^G28",cmd):
gss = re.split("_", cmd)
gcodecmd = gss[0]
tme = gss[1]
print("sim is"+str(sim))
if sim == 0:
#sersmoothie.readlines()
dser.write(gcodecmd+"\r\n")
time.sleep(float(tme))
gg =dser.readlines()
upublisher(cmd)
else:
print(gcodecmd)
print(tme)
if re.match("^M114",cmd):
upublisher(cmd)
if sim == 0:
gg = getposition(dser)
upublisher(gg)
time.sleep(0.2)
else:
print(cmd)
print("1")
if re.match("^[wash|waste|pcv].*", cmd):
microfluidic(cmd,aser)
if re.match("^//",cmd):
upublisher(cmd)
def putmacrolinestogether(reformatmacro):
macrorunready = []
for i in reformatmacro:
if isinstance(i, list):
for j in i:
macrorunready.append(j)
else:
macrorunready.append(i)
return macrorunready
def tmecalc(gcodebatch):
mesg = readnxjson()
coordlog = readschedularjson()
try:
X = coordlog['X'][len(coordlog['X'])-1]
except:
X = mesg['currcoord']['X']
try:
Y = coordlog['Y'][len(coordlog['Y'])-1]
except:
Y = mesg['currcoord']['Y']
try:
Z = coordlog['Z'][len(coordlog['Z'])-1]
except:
Z = mesg['currcoord']['Z']
try:
E = coordlog['E'][len(coordlog['E'])-1]
except:
E = mesg['currcoord']['E']
tmln = []
b = []
tim = 0
poscmds = []
ct = 0
for i in gcodebatch:
i = re.sub("\n|\r", "", i)
dt = {}
#G1 F1800.000 E1.00000
#here I need to have a conditional if to separate non gcodes from gcodes
if re.match('^G1', i):
if re.match("^.*_", i):
cc = re.split("_", i)
ci = cc[0]
tt = cc[1]
else:
ci = i
tt = 0
i = ci
if re.match('^.*F.*', i):
df = re.match('^.*F(.*)$', i)
abf = re.sub('[ |X|x|Y|y|Z|z|E|e].*', '', df.group(1))
pf = float(abf)
if pf > 0:
F = pf
if re.match('^.*[Z|X|Y|E]', i):
dt['F'] = F
ct = ct + 1
dt['ct'] = ct
pe = 0
px = 0
py = 0
pz = 0
if re.match('^.*E', i):
d = re.match('^.*E(.*)', i)
abe = re.sub('[ |X|x|Y|y|Z|z|F|f].*', '', d.group(1))
pe = float(abe)
dt['diffe'] = abs(E-pe)
E = pe
dt['E'] = pe
if re.match('^.*X', i):
dx = re.match('^.*X(.*)', i)
abx = re.sub('[ |E|e|Y|y|Z|z|F|f].*', '', dx.group(1))
px = float(abx)
dt['diffx'] = abs(X-px)
X = px
dt['X'] = px
if re.match('^.*Y', i):
dy = re.match('^.*Y(.*)', i)
aby = re.sub('[ |E|e|X|x|Z|z|F|f].*', '', dy.group(1))
py = float(aby)
dt['diffy'] = abs(Y-py)
Y = py
dt['Y'] = py
if re.match('^.*Z', i):
dz = re.match('^.*Z(.*)', i)
abz = re.sub('[ |E|e|X|x|Y|y|F|f].*', '', dz.group(1))
pz = float(abz)
dt['diffz'] = abs(Z-pz)
Z = pz
dt['Z'] = pz
dt['cmd'] = i
comp = {}
try:
comp['diffx'] = dt['diffx']
except:
pass
try:
comp['diffy'] = dt['diffy']
except:
pass
try:
comp['diffz'] = dt['diffz']
except:
pass
try:
comp['diffe'] = dt['diffe']
except:
pass
sorted_comp = sorted(comp.items(), key=operator.itemgetter(1))
dt['maxdiff'] = sorted_comp[int(len(comp)-1)][1]
if dt['F'] > 0:
dt['time'] = (dt['maxdiff'] / dt['F']) * 60
else:
dt['time'] = 0
if tt > 0:
dt['time'] = float(tt)
tmln.append(i+"_"+str(dt['time']))
tim = tim + dt['time']
poscmds.append(dt)
else:
tmln.append(i)
delaytme = int(tim)+1
return tmln
def gcodesplitter(gcr):
coordlog = readschedularjson()
gtba = []
ba = []
bba = []
tba = []
fl = 0
for i in gcr:
if re.match('^G', i):
try:
cc = re.split('_', i)
ci = cc[0]
ti = cc[1]
except:
ti = 0
coord = jogcoordparser(ci)
if 'X' in coord:
coordlog['X'].append(coord['X'])
if 'Y' in coord:
coordlog['Y'].append(coord['Y'])
if 'Z' in coord:
coordlog['Z'].append(coord['Z'])
if 'E' in coord:
coordlog['E'].append(coord['E'])
writeschedularjson(coordlog)
gtba.append(i)
fl = 1
else:
fl = 0
if fl == 1:
bba.append(i)
if fl == 0:
if len(bba)>0:
tmln = tmecalc(bba)
bba = []
tba.append(tmln)
tba.append(i)
if i == gcr[len(gcr)-1]:
if len(bba)>0 and re.match('^G', i):
tmln = tmecalc(bba)
tba.append(tmln)
reformatmacro = tba
return reformatmacro
def readtaskjobjson():
pcv = open('labbot.programtorun.json')
pcvdata = json.load(pcv)
pcv.close()
return pcvdata
def writeschedularjson(dat):
pcvdatar = json.dumps(dat)
pcv = open('schedular.json','w')
pcv.write(pcvdatar)
pcv.close()
def runmacro(dser,aser):
coordlog = {}
coordlog['X'] =[]
coordlog['Y'] =[]
coordlog['Z'] =[]
coordlog['E'] =[]
#resets teh schedular
writeschedularjson(coordlog)
taskjob = readtaskjobjson()
#reformatmacro = gcodesplitter(taskjob['data'][str(taskjob['track'])])
reformatmacro = gcodesplitter(taskjob['program'])
macroready = putmacrolinestogether(reformatmacro)
for i in macroready:
runeachmacrocmd(i,dser,aser)
def readschedularjson():
pcv = open('schedular.json')
pcvdata = json.load(pcv)
pcv.close()
return pcvdata
|
[
"rjdrouse@htsresources.com"
] |
rjdrouse@htsresources.com
|
1ed28b7939bf6d05618bd755d4ec4e877c264f2e
|
99e20672172fb118c2529d85f277134049d433c0
|
/ex23.py
|
c998a43bc2ce5b3a6814700b3ce75d78b54d7070
|
[] |
no_license
|
jamesmacak/learn-python
|
7b6e7fa50b9723261f6081baa50658cc10c01900
|
b19902de84e0bbd63162623e57336b62878074e1
|
refs/heads/master
| 2020-04-13T12:14:32.836765
| 2019-02-08T02:37:47
| 2019-02-08T02:37:47
| 163,196,677
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 543
|
py
|
import sys
script, input_encoding, error = sys.argv
def main(language_file, encoding, errors):
line = language_file.readline()
if line:
print_line(line, encoding, errors)
return main(language_file, encoding, errors)
def print_line(line, encoding, errors):
next_lang = line.strip()
raw_bytes = next_lang.encode(encoding, errors=errors)
cooked_string = raw_bytes.decode(encoding, errors=errors)
print(raw_bytes, "<===>", cooked_string)
languages = open("languages.txt", encoding="utf-8")
main(languages, input_encoding, error)
|
[
"26549968+jamesmacak@users.noreply.github.com"
] |
26549968+jamesmacak@users.noreply.github.com
|
f7577561d9f52a1764bc894694c3c2e5d6eeffb2
|
c910f446a630c2d8a6f20c98b0e40c59edd35453
|
/model/processing/utils/plot_WRF.py
|
946069585359948e46cb264dbf1047b8469e1741
|
[] |
no_license
|
percylink/wind-thesis
|
efd94f92cc776e9e55f877eb6a3dc7d2fb5c3bfe
|
d05c75e5cfb46324d591707bdd034105ed1df7d5
|
refs/heads/master
| 2020-05-19T22:27:20.032299
| 2015-01-04T20:21:25
| 2015-01-04T20:21:25
| 22,615,603
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 20,807
|
py
|
'''
PAL 5/8/2013 mod Aug 2014
make WRF plots in map perspective
'''
import cPickle
import datetime
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
from mpl_toolkits.basemap import Basemap
import numpy as np
import os
from scipy.interpolate import interp2d
from scipy.io import netcdf as nc
import shutil
import sys
class Plotter:
def __init__(self, file_in=None, file2=None, plot_interval=1, run_name=None, reg_diff='reg', domain='d01'):
# instance parameters
self.plot_interval = plot_interval
self.domain = domain
if run_name is None:
raise Exception('run_name is a required parameter')
self.run_name = run_name
if (reg_diff != 'reg') and (reg_diff != 'diff'):
raise Exception('reg_diff must be "reg" or "diff"')
self.reg_diff = reg_diff # whether netcdf file is regular model output, or diff of 2 runs
self.root_dir = os.path.join(os.environ['SCRATCH'], 'WRF', 'output', self.run_name)
if run_name[:2] == 'CA':
self.hr_diff = 8
elif run_name[:2] == 'KS':
self.hr_diff = 7
elif run_name[:2] == 'DK':
self.hr_diff = -1
print "file:", file_in, ", plot_interval:", self.plot_interval, ", run_name:", self.run_name, ", reg_diff:", reg_diff, ", domain:", self.domain
# physical parameters
self.latent_heat_evap = 2.27e6 # J/kg
# open netcdf file
if file_in is None:
raise Exception('file_in is a required parameter')
self.f = nc.netcdf_file(os.path.join(self.root_dir, file_in), 'r')
if self.reg_diff == 'diff':
self.f_abs = nc.netcdf_file(os.path.join(self.root_dir, file2), 'r') # file with absolute magnitudes for diff case
# get min/max lat and lon
self.minlon = np.floor(np.min(self.f.variables['XLONG'][0,:,:]/10.))*10.
self.maxlon = np.ceil(np.max(self.f.variables['XLONG'][0,:,:]/10.))*10.
self.minlat = np.floor(np.min(self.f.variables['XLAT'][0,:,:]/10.))*10.
self.maxlat = np.ceil(np.max(self.f.variables['XLAT'][0,:,:]/10.))*10.
# list of string times
self.times = [''.join(self.f.variables['Times'][ii,:]) \
for ii in xrange(self.f.variables['Times'].shape[0])]
# subsample masks for winds
self.subx = np.arange(0,getattr(self.f,'WEST-EAST_GRID_DIMENSION')-1,3)
self.suby = np.arange(0,getattr(self.f,'SOUTH-NORTH_GRID_DIMENSION')-1,3)
self.get_pressure_levels()
# get landmask
if self.reg_diff == 'reg':
self.landmask = self.f.variables['LANDMASK'][0, :, :]
elif self.reg_diff == 'diff':
self.landmask = self.f_abs.variables['LANDMASK'][0, :, :]
def get_pressure_levels(self):
if self.reg_diff == 'reg':
self.pressures = self.f.variables['PB'][0,:,0,0] # background pressure profile
elif self.reg_diff == 'diff':
self.pressures = self.f_abs.variables['PB'][0,:,0,0] # background pressure profile
self.maskP = {}
self.P = {}
# get index for ~950 hPa, ~850 hPa, ~500 hPa on unstaggered grid
dif = abs(self.pressures-95000.)
self.maskP['950'] = dif.argmin() # index of minimum difference
self.P['950'] = self.pressures[self.maskP['950']]/100. # actual pressure near 950, in hPa
dif = abs(self.pressures-85000.)
self.maskP['850'] = dif.argmin() # index of minimum difference
self.P['850'] = self.pressures[self.maskP['850']]/100. # actual pressure near 850, in hPa
dif = abs(self.pressures-50000.)
self.maskP['500'] = dif.argmin() # index of minimum difference
self.P['500'] = self.pressures[self.maskP['500']]/100. # actual pressure near 500, in hPa
# get index for ~950 hPa, ~850 hPa, ~500 hPa on vertically staggered grid
if self.reg_diff == 'reg':
self.eta = self.f.variables['ZNW'][0,:]
elif self.reg_diff == 'diff':
self.eta = self.f_abs.variables['ZNW'][0,:]
dif = abs(self.eta-0.95)
self.maskP['950s'] = dif.argmin() # index of minimum difference
self.P['950s'] = self.eta[self.maskP['950s']]*1000. # actual pressure near 950, in hPa
dif = abs(self.eta-0.85)
self.maskP['850s'] = dif.argmin() # index of minimum difference
self.P['850s'] = self.eta[self.maskP['850s']]*1000. # actual pressure near 850, in hPa
dif = abs(self.eta-0.5)
self.maskP['500s'] = dif.argmin() # index of minimum difference
self.P['500s'] = self.eta[self.maskP['500s']]*1000. # actual pressure near 500, in hPa
def setup_map(self, subplot_index=None, fig=None):
f = self.f
a = fig.add_subplot(subplot_index)
m = Basemap(width=f.DX*1.2*getattr(f,'WEST-EAST_GRID_DIMENSION'),\
height=f.DY*1.2*getattr(f,'SOUTH-NORTH_GRID_DIMENSION'),resolution='l',\
projection='lcc',lat_1=f.TRUELAT1,lat_2=f.TRUELAT2,lat_0=f.CEN_LAT,lon_0=f.CEN_LON)
return a, m
def decorate_map(self, m=None, a=None, h=None, fig=None):
m.drawparallels(np.arange(self.minlat, self.maxlat, 5))
m.drawmeridians(np.arange(self.minlon, self.maxlon, 5))
m.drawcoastlines()
m.drawstates()
fig.colorbar(h, ax=a)
def make_filename(self, ii=None, variable=None):
if variable == 'smois':
fname = variable+'.png'
else:
fname = variable+'_'+self.times[ii]+'.png'
if self.reg_diff == 'reg':
file_out = os.path.join(self.plot_dir, variable, fname)
elif self.reg_diff == 'diff':
file_out = os.path.join(self.plot_dir, variable, 'diffCTRL_'+fname)
return file_out
def plot_PH(self, fig=None, subplot_index=None, pressure_level=None, contour_int=None, ii=None):
a, m = self.setup_map(subplot_index=subplot_index, fig=fig)
f = self.f
if type(pressure_level) is not str:
pressure_level = str(int(pressure_level))
x,y = m(f.variables['XLONG'][0, :, :],f.variables['XLAT'][0, :, :])
tmp = f.variables['PH'][ii, self.maskP[pressure_level], :, :] + f.variables['PHB'][ii, self.maskP[pressure_level], :, :]
#if contour_int is not None:
# h = m.pcolormesh(x, y, tmp) #, contour_int)
#else:
h = m.pcolormesh(x, y, tmp)
a.set_ylabel(str(round(self.P[pressure_level]))+' hPa')
a.set_title('geopotential')
self.decorate_map(m=m, a=a, h=h, fig=fig)
def plot_wind(self, fig=None, subplot_index=None, pressure_level=None, contour_int=None, ii=None):
a, m = self.setup_map(subplot_index=subplot_index, fig=fig)
f = self.f
if type(pressure_level) is not str:
pressure_level = str(int(pressure_level))
x,y = m(f.variables['XLONG'][0,:,:],f.variables['XLAT'][0,:,:])
xu,yu = m(f.variables['XLONG_U'][0,:,:],f.variables['XLAT_U'][0,:,:])
xv,yv = m(f.variables['XLONG_V'][0,:,:],f.variables['XLAT_V'][0,:,:])
ui = np.mean([f.variables['U'][ii, self.maskP[pressure_level], :, :-1],
f.variables['U'][ii, self.maskP[pressure_level], :, 1:]], axis=0)
vi = np.mean([f.variables['V'][ii, self.maskP[pressure_level], :-1, :],
f.variables['V'][ii, self.maskP[pressure_level], 1:, :]], axis=0)
#wspeed = (ui**2+vi**2)**0.5
#h = m.contourf(x, y, wspeed, contour_int)
#limits = [np.min(ui)]+range(-8, 9)+[np.max(ui)]
h = m.pcolormesh(x, y, ui) #, limits)
ur,vr = m.rotate_vector(ui, vi, f.variables['XLONG'][0,:,:], f.variables['XLAT'][0,:,:])
m.quiver(x[self.suby, :][:, self.subx],y[self.suby, :][:, self.subx],
ur[self.suby, :][:, self.subx], vr[self.suby, :][:, self.subx])
a.set_title('wind, m/s '+str(round(self.P[pressure_level]))+' hPa')
self.decorate_map(m=m, a=a, h=h, fig=fig)
def plot_T(self, fig=None, subplot_index=None, pressure_level=None, contour_int=None, ii=None):
a, m = self.setup_map(subplot_index=subplot_index, fig=fig)
f = self.f
if type(pressure_level) is not str:
pressure_level = str(int(pressure_level))
x,y = m(f.variables['XLONG'][0,:,:],f.variables['XLAT'][0,:,:])
tmp = f.variables['T'][ii, self.maskP[pressure_level], :, :] + f.variables['T00'][ii]
h = m.pcolormesh(x,y,tmp)
a.set_title('pot. temp. at '+str(round(self.P[pressure_level]))+' hPa')
self.decorate_map(m=m, a=a, h=h, fig=fig)
def plot_q(self, fig=None, subplot_index=None, pressure_level=None, contour_int=None, ii=None):
a, m = self.setup_map(subplot_index=subplot_index, fig=fig)
f = self.f
if type(pressure_level) is not str:
pressure_level = str(int(pressure_level))
x,y = m(f.variables['XLONG'][0,:,:],f.variables['XLAT'][0,:,:])
tmp = f.variables['QVAPOR'][ii, self.maskP[pressure_level], :, :]
h = m.pcolormesh(x,y,tmp)
a.set_title('q at '+str(round(self.P[pressure_level]))+' hPa')
self.decorate_map(m=m, a=a, h=h, fig=fig)
def plot_latent(self, fig=None, subplot_index=None, contour_int=None, ii=None):
a, m = self.setup_map(subplot_index=subplot_index, fig=fig)
f = self.f
x,y = m(f.variables['XLONG'][0,:,:],f.variables['XLAT'][0,:,:])
tmp = f.variables['QFX'][ii, :, :] * self.latent_heat_evap
if contour_int is not None:
h = m.pcolormesh(x,y,tmp,contour_int)
else:
h = m.pcolormesh(x,y,tmp)
a.set_title('LH, W/m2')
self.decorate_map(m=m, a=a, h=h, fig=fig)
def plot_sensible(self, fig=None, subplot_index=None, contour_int=None, ii=None):
a, m = self.setup_map(subplot_index=subplot_index, fig=fig)
f = self.f
x,y = m(f.variables['XLONG'][0,:,:],f.variables['XLAT'][0,:,:])
tmp = f.variables['HFX'][ii, :, :]
if contour_int is not None:
h = m.pcolormesh(x,y,tmp,contour_int)
else:
h = m.pcolormesh(x,y,tmp)
a.set_title('SH, W/m2')
self.decorate_map(m=m, a=a, h=h, fig=fig)
def plot_smois_diff(self, fig=None, subplot_index=None, integrated=True, contour_int=None):
a, m = self.setup_map(subplot_index=subplot_index, fig=fig)
f = self.f
x,y = m(f.variables['XLONG'][0,:,:],f.variables['XLAT'][0,:,:])
smois_diff = f.variables['SMOIS'][-1,:,:,:]-f.variables['SMOIS'][0,:,:,:]
if integrated:
tmp = smois_diff.sum(axis=0)
else:
tmp = smois_diff[0, :, :]
if contour_int is not None:
h = m.pcolormesh(x,y,tmp,contour_int)
else:
h = m.pcolormesh(x,y,tmp)
if integrated:
a.set_title('d(SMOIS), column')
else:
a.set_title('d(SMOIS), surface')
self.decorate_map(m=m, a=a, h=h, fig=fig)
def plot_PH_winds(self, ii=None):
fig = plt.figure()
self.plot_PH(fig=fig, subplot_index=321, pressure_level='950', contour_int=20, ii=ii)
self.plot_PH(fig=fig, subplot_index=323, pressure_level='850', contour_int=20, ii=ii)
self.plot_PH(fig=fig, subplot_index=325, pressure_level='500', contour_int=20, ii=ii)
self.plot_wind(fig=fig, subplot_index=322, pressure_level='950', ii=ii)
self.plot_wind(fig=fig, subplot_index=324, pressure_level='850', ii=ii)
self.plot_wind(fig=fig, subplot_index=326, pressure_level='500', ii=ii)
fig.suptitle(self.times[ii])
file_out = self.make_filename(ii=ii, variable='PH_winds')
print file_out
fig.savefig(file_out)
plt.close(fig)
def plot_wind_only(self, ii=None):
fig = plt.figure()
self.plot_wind(fig=fig, subplot_index=131, pressure_level='950', ii=ii)
self.plot_wind(fig=fig, subplot_index=132, pressure_level='850', ii=ii)
self.plot_wind(fig=fig, subplot_index=133, pressure_level='500', ii=ii)
local_hour = int(self.times[ii][11:13])-self.hr_diff
if local_hour < 0:
local_hour += 24
fig.set_size_inches(15,5)
fig.suptitle('color = u-wind '+self.times[ii]+', local hour '+str(local_hour))
file_out = self.make_filename(ii=ii, variable='winds')
print file_out
fig.savefig(file_out)
plt.close(fig)
def plot_wind_surface(self, ii=None):
fig= plt.figure()
ax = []
vmin = 0
if self.reg_diff == 'reg':
vmax = 10
cmap = plt.get_cmap('jet')
elif self.reg_diff == 'diff':
vmax = 4
cmap = plt.get_cmap('GnBu')
for i_lev in xrange(3):
a, m = self.setup_map(subplot_index=131+i_lev, fig=fig)
ax.append(a)
f = self.f
# get x and y coords
x,y = m(f.variables['XLONG'][0,:,:], f.variables['XLAT'][0,:,:])
xu,yu = m(f.variables['XLONG_U'][0,:,:], f.variables['XLAT_U'][0,:,:])
xv,yv = m(f.variables['XLONG_V'][0,:,:], f.variables['XLAT_V'][0,:,:])
# make approximate x and y grids that are regular (ignore slight variations, which are usually < 10 m)
x_row = np.mean(x, axis=0)
x_approx = np.tile(x_row, (x.shape[0], 1))
y_col = np.mean(y, axis=1)
y_approx = np.tile(np.array([y_col]).T, (1, y.shape[1]))
# get u and v at center grid points
ui = np.mean([f.variables['U'][ii, i_lev, :, :-1],
f.variables['U'][ii, i_lev, :, 1:]], axis=0)
vi = np.mean([f.variables['V'][ii, i_lev, :-1, :],
f.variables['V'][ii, i_lev, 1:, :]], axis=0)
# rotate vectors to x-y projection
ur,vr = m.rotate_vector(ui, vi, f.variables['XLONG'][0,:,:], f.variables['XLAT'][0,:,:])
speed = (ur**2 + vr**2)**0.5
# plot
h = m.pcolormesh(x, y, speed, vmin=vmin, vmax=vmax, cmap=cmap)
m.streamplot(x_approx, y_approx, ur, vr, color='purple')
a.set_title('level '+str(i_lev))
self.decorate_map(m=m, a=a, h=h, fig=fig)
a.contour(x, y, self.landmask, [.5], colors='k')
local_hour = int(self.times[ii][11:13])-self.hr_diff
if local_hour < 0:
local_hour += 24
fig.set_size_inches(20,5)
fig.suptitle('wind (m/s), local hour '+str(local_hour))
file_out = self.make_filename(ii=ii, variable='winds_surface')
print file_out
fig.savefig(file_out)
#plt.show()
#1/0
plt.close(fig)
def plot_PH_only(self, ii=None):
fig = plt.figure()
self.plot_PH(fig=fig, subplot_index=131, pressure_level='950', ii=ii)
self.plot_PH(fig=fig, subplot_index=132, pressure_level='850', ii=ii)
self.plot_PH(fig=fig, subplot_index=133, pressure_level='500', ii=ii)
fig.suptitle(self.times[ii])
file_out = self.make_filename(ii=ii, variable='PH')
print file_out
fig.savefig(file_out)
plt.close(fig)
def plot_T_q(self, ii=None):
fig = plt.figure()
self.plot_T(fig=fig, subplot_index=221, pressure_level='950', ii=ii)
self.plot_T(fig=fig, subplot_index=223, pressure_level='850', ii=ii)
self.plot_q(fig=fig, subplot_index=222, pressure_level='950', ii=ii)
self.plot_q(fig=fig, subplot_index=224, pressure_level='850', ii=ii)
fig.suptitle(self.times[ii])
file_out = self.make_filename(ii=ii, variable='T_q')
print file_out
fig.savefig(file_out)
plt.close(fig)
def plot_T_surface(self, ii=None):
fig= plt.figure()
ax = []
for i_lev in xrange(2):
a, m = self.setup_map(subplot_index=121+i_lev, fig=fig)
ax.append(a)
f = self.f
x,y = m(f.variables['XLONG'][0,:,:], f.variables['XLAT'][0,:,:])
if i_lev == 0:
T = f.variables['TSK'][ii, :, :]
level = 'skin'
elif i_lev == 1:
T = f.variables['T2'][ii, :, :]
level = '2 m'
h = m.pcolormesh(x, y, T)
a.set_title(level+' temperature')
self.decorate_map(m=m, a=a, h=h, fig=fig)
#a.contour(x, y, self.landmask, [0.99], colors='k')
local_hour = int(self.times[ii][11:13])-self.hr_diff
if local_hour < 0:
local_hour += 24
fig.set_size_inches(10,5)
fig.suptitle('surface temperature, local hour '+str(local_hour))
file_out = self.make_filename(ii=ii, variable='T_surface')
print file_out
fig.savefig(file_out)
#plt.show()
#1/0
plt.close(fig)
def plot_sfc_flx(self, ii=None):
fig = plt.figure()
self.plot_latent(fig=fig, subplot_index=121, ii=ii)
self.plot_sensible(fig=fig, subplot_index=122, ii=ii)
fig.set_size_inches(15,7)
fig.suptitle(self.times[ii])
file_out = self.make_filename(ii=ii, variable='sfc_flx')
print file_out
fig.savefig(file_out)
plt.close(fig)
def plot_smois_diff_driver(self):
fig = plt.figure()
self.plot_smois_diff(fig=fig, subplot_index=121, integrated=False)
self.plot_smois_diff(fig=fig, subplot_index=122, integrated=True)
fig.set_size_inches(15,7)
file_out = self.make_filename(variable='smois')
print file_out
fig.savefig(file_out)
plt.close(fig)
def plot_smois_init(self):
fig = plt.figure()
a, m = self.setup_map(subplot_index=111, fig=fig)
f = self.f
x,y = m(f.variables['XLONG'][0,:,:],f.variables['XLAT'][0,:,:])
smois_init = f.variables['SMOIS'][0,0,:,:].copy() # surface
smois_masked = np.ma.masked_where(self.landmask == 0, smois_init)
#contours = list(np.arange(0.08, 0.4, 0.04))
#if smois_init.min() < contours[0]:
# contours = [smois_init.min()]+contours
#if smois_init[smois_init < 1].max() > contours[-1]: # don't use 1 as max
# contours = contours+[smois_init.max()]
h = m.pcolormesh(x, y, smois_masked) #, contours)
a.set_title('initial SMOIS, surface')
self.decorate_map(m=m, a=a, h=h, fig=fig)
#1/0
if not os.path.exists(os.path.join(self.plot_dir, 'smois')):
os.mkdir(os.path.join(self.plot_dir, 'smois'))
fig.savefig(os.path.join(self.plot_dir, 'smois', 'initial_smois.png'))
def run(self, plot_list=[]):
if not isinstance(plot_list, list):
plot_list = [plot_list]
# make output directories and call plotting functions
plot_dir = os.path.join(self.root_dir, 'plots', self.domain)
if not os.path.exists(plot_dir):
os.makedirs(plot_dir)
self.plot_dir = plot_dir
if self.reg_diff == 'reg':
self.plot_smois_init()
for plot in plot_list:
#if os.path.exists(os.path.join(plot_dir, plot)):
# shutil.rmtree(os.path.join(plot_dir, plot))
#os.mkdir(os.path.join(plot_dir, plot))
if not os.path.exists(os.path.join(plot_dir, plot)):
os.mkdir(os.path.join(plot_dir, plot))
if plot == 'smois':
self.plot_smois_diff_driver()
for ii in range(len(self.times))[0::self.plot_interval]:
print self.times[ii]
if plot == 'PH_winds':
self.plot_PH_winds(ii=ii)
elif plot == 'T_q':
self.plot_T_q(ii=ii)
elif plot == 'T_surface':
self.plot_T_surface(ii=ii)
elif plot == 'winds':
self.plot_wind_only(ii=ii)
elif plot == 'winds_surface':
self.plot_wind_surface(ii=ii)
elif plot == 'PH':
self.plot_PH_only(ii=ii)
elif plot == 'sfc_flx':
self.plot_sfc_flx(ii=ii)
#if ii==3:
# 1/0
if __name__ == "__main__":
if len(sys.argv) >= 2:
file_in = sys.argv[1]
else:
raise Exception("Usage: python plot_WRF.py <wrfout file name>")
if len(sys.argv) >2:
plot_interval = int(sys.argv[2])
else:
plot_interval = 1
p = Plotter(file_in=file_in, plot_interval=plot_interval)
p.run(['winds', 'PH', 'T_q'])
|
[
"percy@Percys-MacBook-Pro.local"
] |
percy@Percys-MacBook-Pro.local
|
d5a26391dd2100e00077808d2ee65ebf6939b9ce
|
d1449d0a1778ecc5eea24fa28e8541407345a740
|
/P02_P04_basics/python_building_functions_01_20200813.py
|
bc7bbfeab0398a4fb343af5553ffc4a6c605c58f
|
[] |
no_license
|
EdvardsAm/RTR108_2020
|
7da518978ee26f4d0ab982f204b519ab987de8f8
|
58309d2fc13211ea2424e8c9098368218f997e8b
|
refs/heads/master
| 2020-12-23T13:42:50.845861
| 2020-08-19T10:57:13
| 2020-08-19T10:57:13
| 237,170,468
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 114
|
py
|
def print_lyrics():
print("Im a lumberjack, and Im okay.")
print('I sleep all night and I work all day.')
|
[
"edvardsamelko@gmail.com"
] |
edvardsamelko@gmail.com
|
ede762b0243f59ef13dba36398b3d9c0e2a4fcbe
|
63e6ed1f76daba3dc299edf4cd4b9714a7863804
|
/HW_8_3.py
|
d292ed5ff404f15ee799134b78668efcbcb8a67c
|
[] |
no_license
|
lukowar/study
|
d0cc216113d78157113f6fa53f8a8e0b7f048ac0
|
6552495cd62786c70538774d23ec7a91b9539dcc
|
refs/heads/master
| 2020-12-26T10:18:48.128644
| 2020-03-12T00:53:23
| 2020-03-12T00:53:23
| 237,479,115
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 115
|
py
|
def shorten_to_date(long_date):
return long_date.split(',')[0]
print(shorten_to_date("Wed September 1, 3am"))
|
[
"arsen.lukovskyi@gmail.com"
] |
arsen.lukovskyi@gmail.com
|
f9806c659eca0a8dfef49c696328b04316722b3f
|
38677ccd3951547c9eb0ec5e49026ee734fd3eea
|
/gupta_sim/linear_dyna.py
|
658a09413040cd2150bca5e933e7ec3e913f95f2
|
[] |
no_license
|
PaulEcoffet/gupta_replay_model_master1_internship
|
40c449f1c5b18532071fa72e24531c15342e12de
|
2abc735dfa43a54fe50458ca6f7551e832506a91
|
refs/heads/master
| 2021-03-19T16:01:37.706135
| 2016-06-23T13:27:18
| 2016-06-23T13:27:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,844
|
py
|
# -*- coding: utf8 -*-
from __future__ import print_function
import numpy as np
from Queue import PriorityQueue
# From Sutton et al., 2012
#########################################################
# F numerical radius should be < 1 for convergence !!!! #
# sum_inf alpha = inf #
# sum_inf alpha**2 < inf #
# alpha = alpha_0 * N_0 / (N_0 + t)
#########################################################
gamma = 0.8
alpha_0 = 0.07
N_0 = 1000
def softmax(score, tau, straight_bias=False):
"""
get what action to do with a combination of a softmax and a straight bias
adapted from Minija Tamosiunaite et al. (2008) if `straight_bias` is True.
Note that I no longer use a straight bias which lead to weird behaviours and
I train the agent in a TrainingEnvironment instead which is way better.
The TrainingEnvironment was also used in Gupta et al. (2010)
"""
exp_score = np.exp(score/tau)
assert (not np.any(exp_score == float("+inf")))
prob1 = exp_score/np.sum(exp_score)
if straight_bias:
prob2 = np.array([0.5, 0.183, 0.183, 0.0665, 0.0665, 0.01]) # Geometric sequence with u0 = 0.5, 2 * sum(u_i, i=1..(len(action)-1)/2) = 0.5
else:
prob2 = prob1
res = np.random.choice(len(score), p=0.5*prob1+0.5*prob2)
return res
# TODO : Stop the day / nb_ep_per_day thing and move this logic in the environment
def ep_lindyn_mg(env, theta, F, b, nb_day, nb_ep_per_day, pqueue_in=None, step=0, log=None):
"""
Does a linear dyna variation from Sutton et al. (2012) with replay.
env - The environment to use
theta - The weight vector to compute V(Phi)
F - The transition tables from Phi to Phi', one per action
b - A reward matrix which gives for each values of phi and an action the
expected reward. For instance, if the 32 place cell is at the center of
the environment, and action 8 is "going south", then because it's
forbidden to go south at the center of the enviroment, b[32][8] will
converge to -10. b is somewhat the Q(s, a) matrix.
nb_day - number of "days" before ending the training. Days can also be
understood as the number of replay sessions.
nb_ep_per_day - number of time to do the task before going into "sleep mode".
The task is done `nb_day` * `nb_ep_per_day` in total.
replay_max - Number of experienced feature activations to replay before
waking up.
log - A list in which every place cells activation is recorded, along with
the position of the agent and the position of the goal. While the
agent sleeps, only the feature which is reactivated is logged.
See Sutton et al. 2012 Dyna-Style Planning with Linear Function
Approximation and Prioritized Sweeping for details about the algorithm (it
is the algorithm 3 in the article).
"""
if pqueue_in:
pqueue = pqueue_in
else:
pqueue = PriorityQueue()
for day in range(nb_day):
for episode in range(nb_ep_per_day):
print ("day", day, ", episode", episode)
if log is not None:
log.append("session_begin")
env.reinit()
while not env.end:
step += 1
alpha = alpha_0 * (N_0 + 1)/(N_0 + step)
phi = env.get_features()
#print("theta")
#print(theta)
q = np.array([-np.inf for i in env.action]) # Q of Q-learning
for a in env.possible_actions(): # The impossible actions stay to -inf
q[a] = np.inner(b[a], phi) + gamma * np.inner(theta.T, np.dot(F[a], phi))
a = softmax(q, 20, straight_bias=False)
phi_n, r = env.do_action(a)
delta = r + gamma * np.inner(theta, phi_n) - np.inner(theta, phi)
theta = theta + alpha * delta * phi
F[a] = F[a] + alpha * np.outer((phi_n - np.dot(F[a], phi)), phi)
b[a] = b[a] + alpha * (r - np.inner(b[a], phi)) * phi
for i in range(len(phi)):
if phi[i] != 0:
pqueue.put((-np.abs(delta * phi[i]), i))
has_replayed = False
if log is not None:
log.append([env.get_features(), np.copy(env.pos), np.copy(env.goals[0]), theta])
if env.p > 0:
log.append("sleep")
has_replayed = True
# Replay
p = env.p # Number of replay max
while not pqueue.empty() and p > 0:
unused_prio, i = pqueue.get()
if log is not None:
activation = np.zeros(env.pc.nb_place_cells)
activation[i] = 1
log.append([activation, np.copy(env.pos), np.copy(env.goals[0]), theta])
for j in range(F.shape[2]):
if np.any(F[:, i, j] != 0) or np.any(F[:, j, i] != 0): # Utilisation des lieux futurs possibles mais aussi lieux passées possibles, ne devrait pas gêner convergence
#raw_input()
delta = - np.inf
for a in range(len(env.action)):
cur = b[a][j] + gamma * np.inner(theta, F[a, j, :]) - theta[j]
if cur > delta:
delta = cur
theta[j] = theta[j] + alpha * delta
pqueue.put((-np.abs(delta), j))
p -= 1
if log is not None and has_replayed:
log.append("end")
return theta, b, F, pqueue, step
|
[
"ecoffet.paul@gmail.com"
] |
ecoffet.paul@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.