blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 288 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 684 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 147 values | src_encoding stringclasses 25 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 128 12.7k | extension stringclasses 142 values | content stringlengths 128 8.19k | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
e848dbcd393d04149e44c247f5a4581502207f3c | 98c6ea9c884152e8340605a706efefbea6170be5 | /examples/data/Assignment_3/chnjea007/question3.py | 0bc5afd1bdca7889b5168b1f137bc1c573eb5d55 | [] | no_license | MrHamdulay/csc3-capstone | 479d659e1dcd28040e83ebd9e3374d0ccc0c6817 | 6f0fa0fa1555ceb1b0fb33f25e9694e68b6a53d2 | refs/heads/master | 2021-03-12T21:55:57.781339 | 2014-09-22T02:22:22 | 2014-09-22T02:22:22 | 22,372,174 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 541 | py | # Assignment 3 question 3
message = input("Enter the message:\n")
repeat = eval(input("Enter the message repeat count:\n"))
thickness = eval(input("Enter the frame thickness:\n"))
for r in range (thickness):
print("|" * r,"+", "-" * ((len(message) + 2) + (thickness * 2) - 2 - r * 2), "+", "|" * r, sep = "")
for r in range (repeat):
print ("|" * thickness, message, "|" * thickness)
for r in range (thickness - 1, -1, -1):
print("|" * r,"+", "-" * ((len(message) + 2) + (thickness * 2) - 2 - r * 2), "+", "|" * r, sep = "") | [
"jarr2000@gmail.com"
] | jarr2000@gmail.com |
2b2d0ae0edba4a1583fff16fce9629c63291e3dc | 520b9a66a71e16c77beeaa28d9bc59a03cf77e79 | /shop/models.py | 92ecbc32c3cb702262f8b6657c6672214f313b16 | [] | no_license | gmachielsen/MadebyLoni | 0e40a6cc970a6ef7cc414e6e1f9e640dcbba5076 | e13706e7d61780ac0b1af9f6f8caf7f908d9ace3 | refs/heads/master | 2022-12-10T18:37:25.522641 | 2020-02-04T21:48:55 | 2020-02-04T21:48:55 | 238,231,154 | 0 | 1 | null | 2022-12-08T03:33:22 | 2020-02-04T14:52:57 | JavaScript | UTF-8 | Python | false | false | 2,476 | py | from django.db import models
from django.urls import reverse
from django.conf import settings
# Create your models here.
class Category(models.Model):
name = models.CharField(max_length=250, unique=True)
slug = models.SlugField(max_length=250, unique=True)
description = models.TextField(blank=True)
image = models.ImageField(upload_to='category', blank=True)
class Meta:
ordering = ('name',)
verbose_name = 'category'
verbose_name_plural = 'categories'
def get_url(self):
return reverse('shop:products_by_category', args=[self.slug])
def __str__(self):
return '{}'.format(self.name)
class Product(models.Model):
SIZES = (
('X', 'No size selected'),
('4', 'Size 4'),
('6', 'Size 6'),
('8', 'Size 8'),
('10','Size 10'),
('12', 'Size 12'),
('14', 'Size 14'),
('16', 'Size 16'),
)
TYPES = (
('X', 'Select type'),
('D', 'Dresses'),
('B', 'Bracelets')
)
COLOURS = (
('X', 'Select colour'),
('Black', 'Black'),
('White', 'White'),
('Red', 'Red'),
('Orange', 'Orange'),
('Yellow', 'Yellow'),
('Green', 'Green'),
('Blue', 'Blue'),
('Purple', 'Purple'),
('Pink', 'Pink'),
('Brown', 'Brown'),
('Different', 'Different'),
)
name = models.CharField(max_length=250, unique=True)
slug = models.SlugField(max_length=250, unique=True)
description = models.TextField(blank=True)
category = models.ForeignKey(Category, on_delete=models.CASCADE)
price = models.DecimalField(max_digits=10, decimal_places=2)
image = models.ImageField(upload_to='product', blank=True)
size = models.CharField(blank=True, null=True, max_length=1, default='X', choices=SIZES)
type = models.CharField(blank=True, null=True, max_length=1, default='X', choices=TYPES)
color = models.CharField(blank=True, null=True, max_length=1, default='X', choices=COLOURS)
stock = models.IntegerField()
available = models.BooleanField(default=True)
created = models.DateTimeField(auto_now_add=True)
updated = models.DateTimeField(auto_now=True)
views = models.IntegerField(default=0)
class Meta:
ordering = ('name',)
verbose_name = 'product'
verbose_name_plural = 'products'
def get_url(self):
return reverse('shop:ProdCatDetail', args=[self.category.slug, self.slug])
def __str__(self):
return '{}'.format(self.name)
class ProductImages(models.Model):
productpictures = models.ForeignKey(Product, related_name='product', on_delete=models.CASCADE)
images = models.ImageField(upload_to='product_images', blank=True)
| [
"g.machielsen@gmail.com"
] | g.machielsen@gmail.com |
6d2be8063c9baf1a1d101e3bc3a6370ab68af4a9 | 09e57dd1374713f06b70d7b37a580130d9bbab0d | /benchmark/startQiskit_Class941.py | b998894584f82c1b8d88727a51595834cf9c1b86 | [
"BSD-3-Clause"
] | permissive | UCLA-SEAL/QDiff | ad53650034897abb5941e74539e3aee8edb600ab | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | refs/heads/main | 2023-08-05T04:52:24.961998 | 2021-09-19T02:56:16 | 2021-09-19T02:56:16 | 405,159,939 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,927 | py | # qubit number=5
# total number=37
import cirq
import qiskit
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2,floor, sqrt, pi
import numpy as np
import networkx as nx
def build_oracle(n: int, f) -> QuantumCircuit:
# implement the oracle O_f^\pm
# NOTE: use U1 gate (P gate) with \lambda = 180 ==> CZ gate
# or multi_control_Z_gate (issue #127)
controls = QuantumRegister(n, "ofc")
oracle = QuantumCircuit(controls, name="Zf")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.h(controls[n])
if n >= 2:
oracle.mcu1(pi, controls[1:], controls[0])
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
classical = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classical)
prog.h(input_qubit[0]) # number=3
prog.h(input_qubit[1]) # number=4
prog.h(input_qubit[2]) # number=5
prog.h(input_qubit[2]) # number=34
prog.cz(input_qubit[3],input_qubit[2]) # number=35
prog.h(input_qubit[2]) # number=36
prog.y(input_qubit[2]) # number=33
prog.h(input_qubit[3]) # number=6
prog.h(input_qubit[4]) # number=21
Zf = build_oracle(n, f)
repeat = floor(sqrt(2 ** n) * pi / 4)
for i in range(1):
prog.append(Zf.to_gate(), [input_qubit[i] for i in range(n)])
prog.h(input_qubit[0]) # number=1
prog.h(input_qubit[1]) # number=2
prog.h(input_qubit[2]) # number=7
prog.h(input_qubit[3]) # number=8
prog.h(input_qubit[3]) # number=30
prog.cz(input_qubit[4],input_qubit[3]) # number=31
prog.h(input_qubit[3]) # number=32
prog.h(input_qubit[2]) # number=29
prog.cx(input_qubit[1],input_qubit[0]) # number=22
prog.cx(input_qubit[3],input_qubit[1]) # number=25
prog.x(input_qubit[0]) # number=23
prog.cx(input_qubit[1],input_qubit[0]) # number=24
prog.x(input_qubit[1]) # number=10
prog.x(input_qubit[2]) # number=11
prog.x(input_qubit[3]) # number=12
prog.x(input_qubit[1]) # number=27
if n>=2:
prog.mcu1(pi,input_qubit[1:],input_qubit[0])
prog.x(input_qubit[0]) # number=13
prog.x(input_qubit[1]) # number=14
prog.x(input_qubit[2]) # number=15
prog.x(input_qubit[3]) # number=16
prog.h(input_qubit[0]) # number=17
prog.h(input_qubit[1]) # number=18
prog.h(input_qubit[2]) # number=19
prog.h(input_qubit[3]) # number=20
prog.h(input_qubit[0])
prog.h(input_qubit[1])
prog.h(input_qubit[2])
prog.h(input_qubit[3])
# circuit end
return prog
if __name__ == '__main__':
key = "00000"
f = lambda rep: str(int(rep == key))
prog = make_circuit(5,f)
backend = BasicAer.get_backend('statevector_simulator')
sample_shot =7924
info = execute(prog, backend=backend).result().get_statevector()
qubits = round(log2(len(info)))
info = {
np.binary_repr(i, qubits): round((info[i]*(info[i].conjugate())).real,3)
for i in range(2 ** qubits)
}
backend = FakeVigo()
circuit1 = transpile(prog,backend,optimization_level=2)
writefile = open("../data/startQiskit_Class941.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.depth(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
| [
"wangjiyuan123@yeah.net"
] | wangjiyuan123@yeah.net |
826f136ed78cf1861b035cddfa114d9947177a7a | 39fa403d46a4456a07c761e1aaa8af2d418c5f87 | /apps/data_taking_scripts/2015-10-jpl-park/sweep_and_stream_at_min_s21_two_groups.py | 8cc78c9c3ad1e76c37429bd861fd77636dfed4a8 | [
"BSD-2-Clause"
] | permissive | vapor36/kid_readout | 72d94d96e964d6a2eef3aa57ed6fc814946cfe46 | 07202090d468669200cab78297122880c1c03e87 | refs/heads/master | 2020-12-12T13:32:47.267337 | 2018-11-11T15:36:40 | 2018-11-11T15:36:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,481 | py | __author__ = 'gjones'
import time
import sys
import numpy as np
from kid_readout.roach import heterodyne
from kid_readout.utils import data_file, sweeps
from kid_readout.equipment import hittite_controller, lockin_controller
hittite = hittite_controller.hittiteController(addr='192.168.0.200')
lockin = lockin_controller.lockinController()
print lockin.get_idn()
ri = heterodyne.RoachHeterodyne(adc_valon='/dev/ttyUSB0')
ri.iq_delay = 0
ri.set_lo(1410.0)
#group_1_lo = 1020.0
#group_2_lo = 1410.0
#all_f0s = np.load('/data/readout/resonances/2016-01-13-jpl-2015-10-park-dark-32-resonances-split-at-1300.npy') -0.5
#group_1_f0 = all_f0s[all_f0s < 1300]
#group_2_f0 = all_f0s[all_f0s > 1300]
"""
all_f0s = np.load('/data/readout/resonances/2016-02-12-jpl-park-100nm-32-resonances.npy')
group_1_f0 = all_f0s[all_f0s<1500]
group_2_f0 = all_f0s[all_f0s>1800]
group_1_lo = 1220.0
group_2_lo = 1810.0
"""
all_f0s = np.load('/data/readout/resonances/2016-02-24-jpl-park-2015-10-40nm-al-niobium-gp-two-groups.npy')
group_1_f0 = all_f0s[all_f0s<1300]
group_2_f0 = all_f0s[all_f0s>1300]
group_1_lo = 1030.0
group_2_lo = 1420.0
#responsive_resonances = np.load('/data/readout/resonances/2015-11-26-jpl-nevins-responsive-resonances.npy')
suffix = "sweep_and_stream"
mmw_source_modulation_freq = ri.set_modulation_output(rate=7)
mmw_source_frequency = -1 #148e9
hittite.set_freq(mmw_source_frequency/12.0)
mmw_atten_turns = (4.5, 4.5)
#print "modulating at: {}".format(mmw_source_modulation_freq),
atonce = 16
df = data_file.DataFile(suffix=suffix)
df.nc.mmw_atten_turns = mmw_atten_turns
for group_num,(lo,f0s) in enumerate(zip([group_1_lo,group_2_lo],[group_1_f0,group_2_f0])):
print "group",group_num,"lo",lo,"min f0",f0s.min()
ri.set_lo(lo)
nsamp = 2**16
step = 1
nstep = 64
f0binned = np.round(f0s * nsamp / 512.0) * 512.0 / nsamp
offset_bins = np.arange(-(nstep + 1), (nstep + 1)) * step
offsets = offset_bins * 512.0 / nsamp
measured_freqs = sweeps.prepare_sweep(ri, f0binned, offsets, nsamp=nsamp)
for atten_index,dac_atten in enumerate([0,20]):
print "at dac atten", dac_atten
ri.set_dac_atten(dac_atten)
ri.set_modulation_output('low')
df.log_hw_state(ri)
df.log_adc_snap(ri)
sweep_data = sweeps.do_prepared_sweep(ri, nchan_per_step=atonce, reads_per_step=2)
df.add_sweep(sweep_data)
fmins = []
for k in range(len(f0s)):
fr, s21, errors = sweep_data.select_index(k)
fmins.append(fr[np.abs(s21).argmin()])
fmins.sort()
ri.add_tone_freqs(np.array(fmins))
ri.select_bank(ri.tone_bins.shape[0] - 1)
# ri.set_tone_freqs(responsive_resonances[:32],nsamp=2**15)
ri.select_fft_bins(range(len(f0s)))
ri._sync()
time.sleep(0.5)
print "taking data with source on"
# raw_input("press enter to start")
ri.set_modulation_output('low')
df.log_hw_state(ri)
nsets = len(f0s) / atonce
tsg = None
for iset in range(nsets):
selection = range(len(f0s))[iset::nsets]
ri.select_fft_bins(selection)
ri._sync()
time.sleep(0.4)
t0 = time.time()
dmod, addr = ri.get_data(256) # about 30 seconds of data
# x, y, r, theta = lockin.get_data()
tsg = df.add_timestream_data(dmod, ri, t0, tsg=tsg)
df.sync()
print "taking sweep with source on"
ri.set_modulation_output('high')
df.log_hw_state(ri)
df.log_adc_snap(ri)
sweep_data = sweeps.do_prepared_sweep(ri, nchan_per_step=atonce, reads_per_step=2)
df.add_sweep(sweep_data)
fmins = []
for k in range(len(f0s)):
fr, s21, errors = sweep_data.select_index(k)
fmins.append(fr[np.abs(s21).argmin()])
fmins.sort()
ri.add_tone_freqs(np.array(fmins))
ri.select_bank(ri.tone_bins.shape[0] - 1)
# ri.set_tone_freqs(responsive_resonances[:32],nsamp=2**15)
ri.select_fft_bins(range(len(f0s)))
ri._sync()
time.sleep(0.5)
print "taking timestream with source off"
# raw_input("press enter to start")
ri.set_modulation_output('high')
df.log_hw_state(ri)
nsets = len(f0s) / atonce
tsg = None
for iset in range(nsets):
selection = range(len(f0s))[iset::nsets]
ri.select_fft_bins(selection)
ri._sync()
time.sleep(0.4)
t0 = time.time()
dmod, addr = ri.get_data(256) # about 30 seconds of data
# x, y, r, theta = lockin.get_data()
tsg = df.add_timestream_data(dmod, ri, t0, tsg=tsg)
df.sync()
#raw_input("finished")
print "taking data with source modulated"
ri.set_modulation_output(7)
df.log_hw_state(ri)
nsets = len(f0s) / atonce
tsg = None
for iset in range(nsets):
selection = range(len(f0s))[iset::nsets]
ri.select_fft_bins(selection)
ri._sync()
time.sleep(0.4)
t0 = time.time()
dmod, addr = ri.get_data(16) # about 2 seconds of data
x, y, r, theta = lockin.get_data()
tsg = df.add_timestream_data(dmod, ri, t0, tsg=tsg,zbd_voltage=r,mmw_source_freq=mmw_source_frequency)
df.sync()
#ri.set_modulation_output('high')
df.close() | [
"glenn.caltech@gmail.com"
] | glenn.caltech@gmail.com |
6c0a86308a5a8a1586671cc3b900cc1e6306f8d9 | 028d788c0fa48a8cb0cc6990a471e8cd46f6ec50 | /Python-Web/pythons/pythons/pythons_auth/forms.py | 37236c9031fdea32b32534fdb04a213e7b4490ee | [] | no_license | Sheko1/SoftUni | d6b8e79ae545116f4c0e5705ad842f12d77a9c9d | a9fbeec13a30231b6a97c2b22bb35257ac1481c0 | refs/heads/main | 2023-07-13T15:39:48.826925 | 2021-08-21T12:51:02 | 2021-08-21T12:51:02 | 317,266,200 | 2 | 3 | null | null | null | null | UTF-8 | Python | false | false | 669 | py | from django import forms
from django.contrib.auth import authenticate
from django.contrib.auth.forms import AuthenticationForm
from django.core.exceptions import ValidationError
class LoginForm(AuthenticationForm):
user = None
username = forms.CharField(
max_length=150
)
password = forms.CharField(
widget=forms.PasswordInput()
)
def clean(self):
super().clean()
self.user = authenticate(username=self.cleaned_data['username'], password=self.cleaned_data['password'])
if not self.user:
raise ValidationError('Wrong username or password!!')
def save(self):
return self.user
| [
"martinkypar@gmail.com"
] | martinkypar@gmail.com |
92e841ff0bf1be4e124ac812dc415c948c1bca8e | f51c6d0cebb27c377ce9830deec4b727b9b2ee90 | /AI/04_Plot/heatmap/heatmap1.py | a737755c2ee0a26831af3838ed9931f17c38f058 | [] | no_license | dbbudd/Python-Experiments | 1c3c1322583aaaf2016a2f2f3061e6d034c5d1c8 | b6d294bf11a5c92b8578d16aa2f63cc27fc47b07 | refs/heads/master | 2020-04-17T02:21:36.693593 | 2019-01-17T00:18:34 | 2019-01-17T00:18:34 | 166,130,283 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 316 | py | #!/usr/bin/env python
import matplotlib.pyplot as plt
import matplotlib
import matplotlib.patches as mpatches
import matplotlib.cm
import pylab as pl
data = pl.random((25,25)) # 25x25 matrix of values
fig = plt.figure()
ax = fig.add_subplot(111, xlim = (0,3), ylim = (0,3))
pl.pcolor(data)
pl.colorbar()
pl.show() | [
"dbbudd@gmail.com"
] | dbbudd@gmail.com |
550f9a58d50db74c3fb57272a3a0564aca4205a3 | 3c52eda991b4a37e2b807dd1e05f07139637c758 | /examples/client_server.py | 080e7b03ea846b1f8525127d5609ed971d3c8e54 | [
"Apache-2.0"
] | permissive | pgiri/pycos | ebea05b045f15f505eff5cf175798c0cf2b4a1db | 6594c311a02490ae0701fa741b508c335f305816 | refs/heads/master | 2022-12-25T21:53:15.091319 | 2022-12-18T17:27:05 | 2022-12-18T17:27:05 | 91,977,091 | 52 | 9 | NOASSERTION | 2020-02-19T01:47:09 | 2017-05-21T17:58:23 | Python | UTF-8 | Python | false | false | 888 | py | #!/usr/bin/env python
# client and server tasks communicating with message passing
# (asynchronous concurrent programming);
# see https://pycos.sourceforge.io/pycos.html for details.
import random
import pycos
def server_proc(task=None):
task.set_daemon()
while True:
msg = yield task.receive()
print('Received %s' % (msg))
def client_proc(server, n, task=None):
global msg_id
for i in range(3):
yield task.suspend(random.uniform(0.5, 3))
# although multiple clients execute this method, locking is not
# necessary, as a task not preempted (unlike Python threads) and runs
# till 'yield'
msg_id += 1
server.send('msg_id %d: client %d, msg %d' % (msg_id, n, i))
msg_id = 0
# create server
server = pycos.Task(server_proc)
# create 10 clients
for i in range(10):
pycos.Task(client_proc, server, i)
| [
"pgiri@yahoo.com"
] | pgiri@yahoo.com |
daea595ccbb9c88ee865036583ea376a5607127f | 00e804d17f4882e10c192bccebc6f90d60a78162 | /test/verif.py | 1762cadff3a340214e58ca062096aac3f716d3b7 | [] | no_license | devnandito/dataProcess | 2b57006b5f39c47b292e18293db9bdecdfee0744 | b5da91184bf6d8702f74cabbef46e2b4b25b16ac | refs/heads/master | 2023-02-16T21:03:07.412468 | 2021-01-18T21:32:26 | 2021-01-18T21:32:26 | 324,022,239 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,499 | py | import pandas as pd, json, csv, re, os, sys
from datetime import datetime, timedelta
if __name__ == '__main__':
now = datetime.now()
ihour = now.hour
iminute = now.minute
isecond = now.second
start = timedelta(hours=ihour, minutes=iminute, seconds=isecond)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
while True:
initial = input('Enter options start/quit:')
initial = initial.lower()
if initial == 'quit':
break
elif initial == 'start':
file_log = os.path.join(BASE_DIR, 'set/vjsonveriflog.txt')
f = open(file_log, "r")
f1 = f.readlines()
list_log = []
for x in f1:
list_log.append(x)
count = int(list_log[0])
fname = list_log[1]
f.close()
file_json = input('Enter file json:')
output_json = input('Enter output file:')
data_frame1 = pd.read_json(file_json).to_dict(orient='records')
data_join = list()
line = 0
counts = dict()
activities = dict()
sources = dict()
for row in data_frame1:
line += 1
ci = row['ci']
if ci not in counts:
counts[ci] = 1
activities[ci] = row['activity']
sources[ci] = row['source']
data_join.append({
'ci': row['ci'],
'ruc': row['ruc'],
'name': row['name'],
'activity': activities[ci],
'status': row['status'],
'salary': row['salary'],
'source': sources[ci],
'count': counts[ci]
})
print('New: {}, Line: {}'.format(ci,line))
else:
counts[ci] = counts[ci] + 1
activities[ci] = activities[ci] + '/' + row['activity'] + '-' + str(counts[ci])
sources[ci] = sources[ci] + '/' + row['source']
for i in range(len(data_join)):
if data_join[i]['ci'] == ci:
data_join[i]['activity'] = activities[ci]
data_join[i]['source'] = sources[ci]
data_join[i]['count'] = counts[ci]
else:
break
print('Duplicated: {}, Line: {}'.format(ci,line))
ofile = os.path.join(BASE_DIR, 'set/results/'+output_json)
with open(ofile, 'w+') as outfile:
json.dump(data_join, outfile, indent=4)
now = datetime.now()
ohour = now.hour
ominute = now.minute
osecond = now.second
end = timedelta(hours=ohour, minutes=ominute, seconds=osecond)
timerun = end - start
message = '''
Time start: {} \n
Runtime: {} \n
Time finish: {} \n
File: {}
'''.format(start, timerun, end, ofile)
print(message)
count += 1
f = open(file_log, 'w')
f.write(str(count)+'\n')
f.write(str(list_log[1]))
f.close()
else:
continue
| [
"fhersa@gmail.com"
] | fhersa@gmail.com |
40c97a61ac24b8475ed30e89689f8f2dea1aff73 | 1aa94863e9c2667ab937ebc23bcbe467c1c17424 | /homeworks/hw_op_1/parentheses.py | 306f3a5a08bf894fce5a98212565cfb7a0f29036 | [] | no_license | cyr1z/learn_python | 9d6648f10a1babd3bcff7cb3e19e63942518953a | 188ae51737f3b47e9acaaebf9a91530b2fa60194 | refs/heads/master | 2022-12-09T05:24:53.494187 | 2020-09-05T20:17:20 | 2020-09-05T20:17:20 | 281,864,043 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,098 | py | # 3. Write a Python class to find validity of a string of parentheses, '(', ')', '{', '}', '[' and '].'
# ' These brackets must be close in the correct order, for example "()" and "()[]{}" are valid '
# but "[)", "({[)]" and ' '"{{{" are invalid.
class Parentheses:
open_list = ["[", "{", "("]
close_list = ["]", "}", ")"]
def is_parentheses(self, string):
stack = []
for i in string:
if i in self.open_list:
stack.append(i)
if i in self.close_list:
if self.close_list.index(i) == self.open_list.index(stack[-1]):
stack.pop()
else:
return False
if stack:
return False
return True
balanced_string = '123 (14) [2, 3: {16, 9}, (90, a[1])]'
unbalanced_string = '44 (5t6y) [2, 3: {16, 9}, {(90, a[1]))]'
print(Parentheses().is_parentheses(balanced_string)) # True
print(Parentheses().is_parentheses(unbalanced_string)) # False
print(Parentheses().is_parentheses('{}')) # True
print(Parentheses().is_parentheses('{')) # False
| [
"cyr@zolotarev.pp.ua"
] | cyr@zolotarev.pp.ua |
c0a93e368941fa37baddd6151ac2f3deaa92f650 | 1adc05008f0caa9a81cc4fc3a737fcbcebb68995 | /hardhat/recipes/rpkg/nipals.py | 75d104e2c794d340891ef8ba4750caa91d159918 | [
"MIT",
"BSD-3-Clause"
] | permissive | stangelandcl/hardhat | 4aa995518697d19b179c64751108963fa656cfca | 1ad0c5dec16728c0243023acb9594f435ef18f9c | refs/heads/master | 2021-01-11T17:19:41.988477 | 2019-03-22T22:18:44 | 2019-03-22T22:18:52 | 79,742,340 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 689 | py | from .base import RPackageBase
from ..base import SourceMixin
class NipalsRecipe(RPackageBase):
def __init__(self, *args, **kwargs):
super(NipalsRecipe, self).__init__(*args, **kwargs)
self.sha256 = 'bae93f8254166ee62ced3ae372c25271' \
'3945f5fc51f8303ba574744264ed3241'
self.name = 'r-nipals'
self.version = '0.4'
self.url = 'https://cran.r-project.org/src/contrib/' \
'nipals_$version.tar.gz'
self.configure_strip_cross_compile()
class NipalsSourceRecipe(SourceMixin, NipalsRecipe):
def __init__(self, *args, **kwargs):
super(NipalsSourceRecipe, self).__init__(*args, **kwargs)
| [
"clayton.stangeland@gmail.com"
] | clayton.stangeland@gmail.com |
0c5af194c9baa72bf561be3829d0c96e8dfe3a76 | a67a987ed078da0a1de2908c8c0e08070dee65b1 | /genice/lattices/Struct57.py | 8ab6ddccad2875c664059d05fa3fe325fbd074fa | [] | no_license | Python3pkg/GenIce | ef1ce7ee2997c10e08dde75ac36050a653cd4fc5 | 1e9458b7bf8e0fd2ad5d0c4f8987cea0ae7ca0b0 | refs/heads/master | 2021-01-21T17:31:51.595858 | 2017-05-21T14:09:32 | 2017-05-21T14:09:32 | 91,962,047 | 0 | 0 | null | 2017-05-21T14:09:28 | 2017-05-21T14:09:28 | null | UTF-8 | Python | false | false | 6,675 | py | """
Data source: Dutour Sikirić, Mathieu, Olaf Delgado-Friedrichs, and Michel Deza. “Space Fullerenes: a Computer Search for New Frank-Kasper Structures” Acta Crystallographica Section A Foundations of Crystallography 66.Pt 5 (2010): 602–615.
Cage composition:
(12,14,15,16) = (12,12,0,4,)
"""
pairs="""
8 79
132 124
126 134
61 149
49 142
48 36
10 159
136 104
110 3
28 18
29 3
41 149
20 140
67 72
28 136
34 68
139 81
152 71
20 12
131 133
104 49
72 14
109 53
152 133
37 73
86 121
13 133
134 145
108 55
106 54
147 127
135 151
32 153
21 45
85 132
8 55
86 137
92 48
41 112
126 14
69 9
159 142
109 47
83 94
86 96
17 58
13 19
102 143
109 159
46 149
28 82
78 50
115 70
108 142
97 120
44 53
24 61
17 158
5 31
115 126
0 105
137 101
60 89
31 53
40 95
48 156
117 76
26 49
35 3
35 73
96 103
84 89
110 11
127 68
121 41
78 36
157 52
144 96
65 148
69 112
20 30
83 46
29 89
16 73
92 102
22 74
100 155
109 148
100 157
63 62
123 47
117 87
108 15
106 21
124 75
1 150
156 143
107 25
22 87
11 0
106 74
52 111
146 111
133 81
25 57
91 140
59 130
125 116
96 87
144 49
26 103
0 129
68 71
2 131
27 34
18 137
148 57
115 153
144 33
67 73
61 39
131 45
125 87
43 45
7 18
23 8
147 24
8 113
62 122
29 127
75 46
141 3
154 25
74 77
1 54
86 139
11 113
34 155
9 104
107 32
22 57
156 50
44 66
60 135
93 101
93 88
6 103
148 77
156 32
100 72
29 130
102 123
10 26
136 78
90 61
107 93
134 32
7 40
26 76
150 56
84 158
154 82
123 129
89 138
125 131
124 40
35 99
27 98
5 159
151 146
55 70
121 39
64 132
134 66
90 98
128 84
52 38
92 4
67 62
94 152
81 39
78 65
93 85
38 79
104 51
120 98
130 24
64 46
80 119
55 51
97 157
88 9
13 95
18 41
7 54
125 139
23 42
74 103
30 79
2 95
27 40
10 77
37 63
12 35
152 43
16 59
33 105
36 77
52 84
147 19
13 68
150 75
114 124
16 94
136 101
7 56
138 111
6 116
145 53
118 70
15 138
155 62
116 76
129 118
16 141
4 47
145 65
9 76
141 71
150 122
48 25
63 75
119 38
97 128
153 51
108 11
64 139
129 151
42 143
141 135
83 81
12 90
27 63
60 59
50 51
110 138
54 82
2 22
132 137
44 146
58 122
17 71
67 140
30 99
4 117
119 14
21 19
144 101
91 79
1 45
107 65
60 118
85 69
23 50
56 149
64 112
106 95
114 154
157 30
142 105
72 99
33 88
58 43
126 113
114 1
114 2
58 128
0 115
4 57
19 39
116 112
59 140
44 123
80 146
98 56
97 34
91 14
117 33
23 145
31 111
15 5
21 6
120 122
128 127
6 121
147 43
66 143
80 158
28 69
47 105
91 70
12 130
90 37
99 113
94 24
31 42
85 154
110 151
37 83
42 38
15 118
20 120
92 10
66 119
135 158
100 80
88 153
17 155
82 36
102 5
"""
waters="""
0.8125 0.80241 0.19017
0.5 0.25631 0.72057
0.8125 0.19759 0.80983
0.0 0.57611 0.125
0.6875 0.97648 0.9375
0.3125 0.82131 0.94195
0.3125 0.17869 0.05805
0.1875 0.24211 0.56695
0.1875 0.75789 0.43305
0.5 0.01882 0.28211
0.3125 0.97648 0.9375
0.0 0.74369 0.22057
0.1875 0.50402 0.3125
0.0 0.33606 0.9033
0.6875 0.67813 0.49612
0.3125 0.75789 0.06695
0.6875 0.50402 0.1875
0.6875 0.49598 0.8125
0.1875 0.17869 0.44195
0.1875 0.32187 0.99612
0.3125 0.53806 0.4375
0.3125 0.24211 0.93305
0.875 0.11065 0.87233
0.1875 0.82131 0.55805
0.375 0.41372 0.15533
0.6875 0.03293 0.6875
0.3125 0.02352 0.0625
0.0 0.37164 0.59468
0.3125 0.10594 0.47228
0.1875 0.53806 0.0625
0.1875 0.60732 0.49879
0.1875 0.77679 0.81517
0.6875 0.89406 0.52773
0.8125 0.96707 0.1875
0.0 0.44091 0.71903
0.0 0.55909 0.28097
0.3125 0.03293 0.6875
0.875 0.41372 0.34468
0.375 0.69233 0.6533
0.125 0.30767 0.1533
0.0 0.27487 0.62678
0.3125 0.22321 0.31517
0.3125 0.77679 0.68483
0.5 0.37164 0.90533
0.8125 0.77679 0.81517
0.5 0.27487 0.87322
0.625 0.30767 0.3467
0.8125 0.89406 0.97228
0.5 0.98118 0.7179
0.1875 0.96707 0.1875
0.3125 0.89406 0.52773
0.375 0.88936 0.37233
0.25 0.63854 0.75
0.0 0.82602 0.783
0.3125 0.19759 0.69017
0.3125 0.80241 0.30983
0.3125 0.32187 0.50388
0.8125 0.03293 0.8125
0.5 0.44091 0.78097
0.5 0.55909 0.21903
0.5 0.62836 0.09468
0.25 0.36147 0.25
0.6875 0.46195 0.5625
0.8125 0.39268 0.50121
0.6875 0.22321 0.31517
0.0 0.95296 0.625
0.6875 0.77679 0.68483
0.6875 0.53806 0.4375
0.0 0.42389 0.875
0.5 0.10124 0.37767
0.5 0.74369 0.27944
0.8125 0.46195 0.9375
0.8125 0.60732 0.49879
0.8125 0.50402 0.3125
0.125 0.11065 0.87233
0.6875 0.32187 0.50388
0.5 0.04705 0.125
0.1875 0.03293 0.8125
0.1875 0.97648 0.5625
0.3125 0.67813 0.49612
0.75 0.63854 0.75
0.875 0.30767 0.1533
0.375 0.11065 0.62767
0.75 0.36147 0.25
0.375 0.58629 0.84468
0.6875 0.10594 0.47228
0.0 0.17399 0.21701
0.8125 0.10594 0.02773
0.6875 0.96707 0.3125
0.3125 0.60732 0.00121
0.125 0.41372 0.34468
0.5 0.66394 0.4033
0.5 0.95296 0.875
0.8125 0.02352 0.4375
0.625 0.41372 0.15533
0.0 0.25631 0.77944
0.0 0.10124 0.12233
0.1875 0.49598 0.6875
0.1875 0.39268 0.50121
0.0 0.62836 0.40533
0.875 0.58629 0.65533
0.0 0.04705 0.375
0.5 0.85198 0.87678
0.1875 0.10594 0.02773
0.3125 0.96707 0.3125
0.875 0.88936 0.12767
0.1875 0.19759 0.80983
0.8125 0.97648 0.5625
0.1875 0.80241 0.19017
0.0 0.89877 0.87767
0.0 0.66394 0.0967
0.125 0.69233 0.8467
0.5 0.17399 0.283
0.0 0.72513 0.37322
0.6875 0.19759 0.69017
0.6875 0.80241 0.30983
0.5 0.14802 0.12322
0.6875 0.02352 0.0625
0.5 0.72513 0.12678
0.625 0.69233 0.6533
0.3125 0.46195 0.5625
0.1875 0.22321 0.18483
0.5 0.42389 0.625
0.6875 0.82131 0.94195
0.8125 0.24211 0.56695
0.6875 0.17869 0.05805
0.8125 0.75789 0.43305
0.1875 0.46195 0.9375
0.3125 0.49598 0.8125
0.6875 0.75789 0.06695
0.3125 0.50402 0.1875
0.6875 0.24211 0.93305
0.8125 0.17869 0.44195
0.8125 0.32187 0.99612
0.8125 0.82131 0.55805
0.6875 0.60732 0.00121
0.1875 0.02352 0.4375
0.0 0.14802 0.37678
0.1875 0.67813 0.00388
0.8125 0.22321 0.18483
0.5 0.57611 0.375
0.8125 0.53806 0.0625
0.125 0.88936 0.12767
0.5 0.82602 0.71701
0.0 0.01882 0.2179
0.0 0.85198 0.62322
0.875 0.69233 0.8467
0.3125 0.39268 0.99879
0.0 0.98118 0.78211
0.375 0.30767 0.3467
0.5 0.33606 0.5967
0.8125 0.67813 0.00388
0.6875 0.39268 0.99879
0.625 0.88936 0.37233
0.625 0.11065 0.62767
0.8125 0.49598 0.6875
0.5 0.89877 0.62233
0.125 0.58629 0.65533
0.625 0.58629 0.84468
0.1875 0.89406 0.97228
"""
coord= "relative"
cages="""
12 0.0 -0.21676 1.01068
14 0.0 0.43193 0.12387
14 -0.25 -0.65222 -0.25
12 0.25 0.09409 0.25
12 0.5 0.21676 0.51068
12 -0.25 -0.09409 -0.25
14 0.25 0.65222 0.25
16 0.5 0.11291 -0.12842
12 0.5 -0.5 1.0
14 0.5 -0.291 0.8787
14 0.25 -0.65222 0.75
14 0.5 0.291 0.1213
16 0.0 0.11291 0.62842
12 0.25 -0.09409 0.75
14 0.5 0.43193 0.37613
12 0.0 0.5 0.5
14 0.0 -0.291 -0.3787
12 0.0 0.21676 -0.01068
16 0.0 -0.11291 -0.62842
12 -0.5 -0.21676 -0.51068
14 -0.25 0.65222 -0.75
12 0.5 0.0 0.5
14 0.0 -0.43193 -0.12387
12 -0.25 0.09409 -0.75
14 0.5 -0.43193 0.62387
16 0.5 -0.11291 1.12842
14 0.0 0.291 0.3787
12 0.0 0.0 0.0
"""
bondlen = 3
celltype = 'rect'
cell = """
13.167286191434481 31.492589961461622 18.629903136229707
"""
density = 0.6190653349484135
| [
"vitroid@gmail.com"
] | vitroid@gmail.com |
0a1d6f058d79fafc887ca3d6f1ded85f8244d634 | 04b1803adb6653ecb7cb827c4f4aa616afacf629 | /tools/md_browser/gitiles_ext_blocks.py | b1a53795e1750d12737e6a32144106c9d9ba5cac | [
"BSD-3-Clause",
"LGPL-2.0-or-later",
"LGPL-2.1-only",
"APSL-2.0",
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0",
"LGPL-2.0-only",
"LicenseRef-scancode-unknown",
"MIT",
"Zlib"
] | permissive | Samsung/Castanets | 240d9338e097b75b3f669604315b06f7cf129d64 | 4896f732fc747dfdcfcbac3d442f2d2d42df264a | refs/heads/castanets_76_dev | 2023-08-31T09:01:04.744346 | 2021-07-30T04:56:25 | 2021-08-11T05:45:21 | 125,484,161 | 58 | 49 | BSD-3-Clause | 2022-10-16T19:31:26 | 2018-03-16T08:07:37 | null | UTF-8 | Python | false | false | 2,797 | py | # Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Implements Gitiles' notification, aside and promotion blocks.
This extention makes the Markdown parser recognize the Gitiles' extended
blocks notation. The syntax is explained at:
https://gerrit.googlesource.com/gitiles/+/master/Documentation/markdown.md#Notification_aside_promotion-blocks
"""
from markdown.blockprocessors import BlockProcessor
from markdown.extensions import Extension
from markdown.util import etree
import re
class _GitilesExtBlockProcessor(BlockProcessor):
"""Process Gitiles' notification, aside and promotion blocks."""
RE_START = re.compile(r'^\*\*\* (note|aside|promo) *\n')
RE_END = re.compile(r'\n\*\*\* *\n?$')
def __init__(self, *args, **kwargs):
self._last_parent = None
BlockProcessor.__init__(self, *args, **kwargs)
def test(self, parent, block):
return self.RE_START.search(block) or self.RE_END.search(block)
def run(self, parent, blocks):
raw_block = blocks.pop(0)
match_start = self.RE_START.search(raw_block)
if match_start:
# Opening a new block.
rest = raw_block[match_start.end():]
if self._last_parent:
# Inconsistent state (nested starting markers). Ignore the marker
# and keep going.
blocks.insert(0, rest)
return
div = etree.SubElement(parent, 'div')
# Setting the class name is sufficient, because doc.css already has
# styles for these classes.
div.set('class', match_start.group(1))
self._last_parent = parent
blocks.insert(0, rest)
self.parser.parseBlocks(div, blocks)
return
match_end = self.RE_END.search(raw_block)
if match_end:
# Ending an existing block.
# Process the text preceding the ending marker in the current context
# (i.e. within the div block).
rest = raw_block[:match_end.start()]
self.parser.parseBlocks(parent, [rest])
if not self._last_parent:
# Inconsistent state (the ending marker is found but there is no
# matching starting marker).
# Let's continue as if we did not see the ending marker.
return
last_parent = self._last_parent
self._last_parent = None
self.parser.parseBlocks(last_parent, blocks)
return
class _GitilesExtBlockExtension(Extension):
"""Add Gitiles' extended blocks to Markdown."""
def extendMarkdown(self, md, md_globals):
md.parser.blockprocessors.add('gitilesextblocks',
_GitilesExtBlockProcessor(md.parser),
'_begin')
def makeExtension(*args, **kwargs):
return _GitilesExtBlockExtension(*args, **kwargs)
| [
"sunny.nam@samsung.com"
] | sunny.nam@samsung.com |
54a3fad2138ebacedcf8939c0b6196bb4d9b3dbb | edcd74f8f65119bdbe737360c2ca33b4a6da160a | /python/problem-google-code-jam/2018_b.py | 98699ff0f46a03da210a2e0def950bde3e33e108 | [] | no_license | hyunjun/practice | 72e83de6a1d5e04ddcd16526f16110ea2dd00373 | 5376dd48b1cefb4faba9d2ef6a8a497b6b1d6c67 | refs/heads/master | 2023-08-31T07:00:37.320351 | 2023-08-17T07:29:24 | 2023-08-17T07:29:24 | 2,704,126 | 3 | 2 | null | 2022-12-14T20:25:07 | 2011-11-03T18:28:44 | Python | UTF-8 | Python | false | false | 804 | py | # https://blog.naver.com/ndb796/221247631646
class Solution:
def troubleSort(self, inp):
if inp is None or 0 == len(inp):
return []
idx = len(inp) - 1
while 0 < idx:
tmpIdx = idx
while 1 < tmpIdx and inp[tmpIdx - 2] > inp[tmpIdx]:
inp[tmpIdx - 2], inp[tmpIdx] = inp[tmpIdx], inp[tmpIdx - 2]
tmpIdx -= 2
idx -= 1
idx = 0
while idx < len(inp) - 1:
if inp[idx] > inp[idx + 1]:
return idx
idx += 1
return 'OK'
s = Solution()
data = [([5, 6, 8, 4, 3], 'OK'), ([8, 9, 7], 1)]
for inp, expected in data:
real = s.troubleSort(inp)
print('inp {}, expected {}, real {}, result {}'.format(inp, expected, real, expected == real))
| [
"morpheus.0@kakaocorp.com"
] | morpheus.0@kakaocorp.com |
ff7a09345bd02f4ebdab16d53b8302d3dce3e7ee | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2453/60870/282722.py | 88542fe2fc3834eef30ed5e16b8c623fc16f52c9 | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 137 | py | array = input().split(',')
array = [int(x) for x in array]
num = int(input())
if num in array:
print('True')
else:
print('False') | [
"1069583789@qq.com"
] | 1069583789@qq.com |
cb485c661040f1cf200e0437e69f0fea29343ef5 | caa175a933aca08a475c6277e22cdde1654aca7b | /tests/test_version.py | e7294ded840af2ff9d2315172936d70b828d4787 | [
"MIT"
] | permissive | simonsobs/acondbs | 01d68ae40866461b85a6c9fcabdfbea46ef5f920 | d18c7b06474b0dacb1dcf1c6dbd1e743407645e2 | refs/heads/main | 2023-07-07T04:33:40.561273 | 2023-06-28T22:08:00 | 2023-06-28T22:08:00 | 239,022,783 | 0 | 1 | MIT | 2023-06-26T20:36:39 | 2020-02-07T21:07:46 | Python | UTF-8 | Python | false | false | 139 | py | import acondbs
def test_version() -> None:
'''test if the version string is attached to the module'''
assert acondbs.__version__
| [
"tai.sakuma@gmail.com"
] | tai.sakuma@gmail.com |
fa497cff7ed5d5a4a2209a08b49b728ff7acf821 | 855501a4cb8a54e0c977d53e6f5d76d8938f99cb | /Quicksort 2 - Sorting.py | 299b80349cb57d654b3016753c09c626f9a9cdef | [] | no_license | Beowulfdgo/HackerRank | 3d7713f68a595af76d857ac9955ae55565b8391f | e4384253f27eee296e0cad39a402cadf47c90164 | refs/heads/master | 2023-05-31T05:30:21.425792 | 2021-06-29T08:47:11 | 2021-06-29T08:47:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 450 | py | def quicksort(ar):
if len(ar) < 2:
return ar
lt, eq, rt = [], [], []
for item in ar:
if item < ar[0]:
lt.append(item)
elif item > ar[0]:
rt.append(item)
else:
eq.append(item)
sub = quicksort(lt) + eq + quicksort(rt)
print(' '.join([str(x) for x in sub]))
return(sub)
n = input().strip().split()
ar = [int(x) for x in input().strip().split()]
quicksort(ar)
| [
"54479676+CormacKrum@users.noreply.github.com"
] | 54479676+CormacKrum@users.noreply.github.com |
a7b4e3f4a3f9ff40a6a3bb1db9cfec26f6dbcff9 | 0e083f405af00029c9ec31849f0f7f81c56844b5 | /demo/python/pipeline.py | f949a910f602bf36b5b2c010c68e3d8b50f4771f | [
"Apache-2.0"
] | permissive | open-mmlab/mmdeploy | 39b9e7b611caab2c76a6142fcb99f0bf1d92ad24 | 5479c8774f5b88d7ed9d399d4e305cb42cc2e73a | refs/heads/main | 2023-09-01T21:29:25.315371 | 2023-08-31T09:59:29 | 2023-08-31T09:59:29 | 441,467,833 | 2,164 | 605 | Apache-2.0 | 2023-09-14T10:39:04 | 2021-12-24T13:04:44 | Python | UTF-8 | Python | false | false | 1,952 | py | # Copyright (c) OpenMMLab. All rights reserved.
import argparse
import json
import cv2
from mmdeploy_runtime import Context, Device, Model, Pipeline
def parse_args():
parser = argparse.ArgumentParser(
description='Demo of MMDeploy SDK pipeline API')
parser.add_argument('device', help='name of device, cuda or cpu')
parser.add_argument('det_model_path', help='path of detection model')
parser.add_argument('cls_model_path', help='path of classification model')
parser.add_argument('image_path', help='path to test image')
args = parser.parse_args()
return args
def main():
args = parse_args()
det_model = Model(args.det_model_path)
reg_model = Model(args.cls_model_path)
config = dict(
type='Pipeline',
input='img',
tasks=[
dict(
type='Inference',
input='img',
output='dets',
params=dict(model=det_model)),
dict(
type='Pipeline',
# flatten dets ([[a]] -> [a]) and broadcast img
input=['boxes=*dets', 'imgs=+img'],
tasks=[
dict(
type='Task',
module='CropBox',
input=['imgs', 'boxes'],
output='patches'),
dict(
type='Inference',
input='patches',
output='labels',
params=dict(model=reg_model))
],
# unflatten labels ([a] -> [[a]])
output='*labels')
],
output=['dets', 'labels'])
device = Device(args.device)
pipeline = Pipeline(config, Context(device))
img = cv2.imread(args.image_path)
output = pipeline(dict(ori_img=img))
print(json.dumps(output, indent=4))
if __name__ == '__main__':
main()
| [
"noreply@github.com"
] | open-mmlab.noreply@github.com |
7dc37255f0bda7cbe80a39dbbbb19e810041493e | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03049/s443206428.py | 3f28d62a3383c1b8d8566f4abf3168dd0e4cec49 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 745 | py | N = int(input())
count_ab = 0
count_start_b = 0
count_finish_a = 0
count_start_b_finish_a = 0
for i in range(N):
s = input()
count_ab += s.count('AB')
# print(s, s[0], s[-1])
if s[0] == 'B' and s[-1] == 'A':
count_start_b_finish_a += 1
elif s[0] == 'B':
count_start_b += 1
elif s[-1] == 'A':
count_finish_a += 1
# print(count_start_b_finish_a, count_finish_a, count_start_b)
if not count_start_b_finish_a:
print(count_ab + min(count_finish_a, count_start_b))
elif max(count_finish_a, count_start_b) > 0 and count_start_b_finish_a:
print(count_ab + count_start_b_finish_a + min(count_finish_a, count_start_b))
elif count_start_b_finish_a:
print(count_ab + count_start_b_finish_a - 1) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
7a68101804a74608cbce1f189b3552132caaf3b5 | e95236dd5ca3c39c39586b5cafeacd06d923e20b | /models/btcmodel.py | e516c93d630307aa632512597fdf051e994c708a | [] | no_license | raoden1/Minotaur | 14bc7e56ca2437af3ec4d6af3c85fc1062bade65 | 8787244cc6ac9cd6347e84705a2908026ec78f25 | refs/heads/master | 2020-04-26T11:42:16.314463 | 2019-02-02T19:20:30 | 2019-02-02T19:20:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,455 | py | import datetime
from binance.client import Client
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
import numpy as np
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM
import matplotlib.patches as mpatches
import matplotlib.pyplot as plt
# Instanciate Binance client
client = Client('API_KEY', 'SECRET_KEY')
# get data
symbol = 'BTCUSDT'
BTC = client.get_historical_klines(symbol=symbol, interval=Client.KLINE_INTERVAL_30MINUTE, start_str="1 year ago UTC")
BTC = pd.DataFrame(BTC, columns=['Open time', 'Open', 'High', 'Low', 'Close', 'Volume', 'Close time', 'Quote asset volume', 'Number of trades', 'Taker buy base asset volume', 'Taker buy quote asset volume', 'Ignore'])
BTC['Open time'] = pd.to_datetime(BTC['Open time'], unit='ms')
BTC.set_index('Open time', inplace=True)
BTC['Close']=BTC['Close'].astype(float)
data = BTC.iloc[:,3:4].astype(float).values
scaler= MinMaxScaler()
data= scaler.fit_transform(data)
training_set = data[:10000]
test_set = data[10000:]
# Data preprocessing (Dividing datasets to training and testing data)
X_train = training_set[0:len(training_set)-1]
y_train = training_set[1:len(training_set)]
X_test = test_set[0:len(test_set)-1]
y_test = test_set[1:len(test_set)]
X_train = np.reshape(X_train, (len(X_train), 1, X_train.shape[1]))
X_test = np.reshape(X_test, (len(X_test), 1, X_test.shape[1]))
# Init the model
model = Sequential()
model.add(LSTM(256, return_sequences=True, input_shape=(X_train.shape[1], X_train.shape[2])))
model.add(LSTM(256))
model.add(Dense(1))
# Compile the model
model.compile(loss='mean_squared_error', optimizer='adam')
model.fit(X_train, y_train, epochs=50, batch_size=16, shuffle=False)
# Save the model
model.save('bitcoin_model.h5')
# Perform predictions on test data
predicted_price = model.predict(X_test)
predicted_price = scaler.inverse_transform(predicted_price)
real_price = scaler.inverse_transform(y_test)
# Display graph of our prediction
plt.figure(figsize=(10,4))
red_patch = mpatches.Patch(color='red', label='Predicted Price of Bitcoin')
blue_patch = mpatches.Patch(color='blue', label='Real Price of Bitcoin')
plt.legend(handles=[blue_patch, red_patch])
plt.plot(predicted_price, color='red', label='Predicted Price of Bitcoin')
plt.plot(real_price, color='blue', label='Real Price of Bitcoin')
plt.title('Predicted vs. Real Price of Bitcoin')
plt.xlabel('Time')
plt.ylabel('Price')
plt.show()
| [
"merwanedr@gmail.com"
] | merwanedr@gmail.com |
1d6acadc9d5be41109f9aa503af6f638f48eb0fd | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02963/s382016550.py | b17e4a66ac52bc376bdd76a24fac8abb77cb2820 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 197 | py | S=int(input())
if S<10**18:
X1,Y1=0,0
mod=10**9
X2=mod
Y3=S//mod + 1
Y2=1
X3=mod-S%mod
else:
X1,Y1,X2,Y3=0,0,0,0
Y2=10**9
X3=10**9
print(X1,Y1,X2,Y2,X3,Y3)
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
1d99f2aa5cd3ff105870bfab76919d26926ef28b | c29b838371729ac04744b40d486f0b55212990b6 | /Spider-Learn/Spider/scrapyuniversal/scrapyuniversal/spiders/china.py | 9e6f76e736b259561061cc615cecc85c5d0e24cc | [] | no_license | Sugarsugarzz/PyPractice | 93c3155a94d162c9eabf0d1a641d28bc6d639c22 | d91b7d6ca996792fe409c08862fa9da5b1dc319b | refs/heads/master | 2023-02-13T01:51:24.909947 | 2021-01-20T02:57:22 | 2021-01-20T02:57:22 | 163,177,428 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,991 | py | # -*- coding: utf-8 -*-
import scrapy
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule
from scrapyuniversal.items import NewsItem
from scrapyuniversal.loader import ChinaLoader
class ChinaSpider(CrawlSpider):
name = 'china'
allowed_domains = ['tech.china.com']
start_urls = ['http://tech.china.com/articles']
# 定义的Rule已经实现页面的打开详情页和翻页
rules = (
Rule(LinkExtractor(allow='article\/.*.\.html', restrict_xpaths='//div[@id="left_side"]//div[@class="con_item"]'),
callback='parse_item'),
Rule(LinkExtractor(restrict_xpaths='//div[@id="pageStyle"]//a[contains(., "下一页")]'))
)
# 通常写法:
# def parse_item(self, response):
# item = NewsItem()
# item['title'] = response.xpath('//h1[@id="chan_newsTitle"]/text()').extract_first()
# item['url'] = response.url
# item['text'] = ''.join(response.xpath('//div[@id="chan_newsDetail"]//text()').extract()).strip()
# item['datatime'] = response.xpath('//div[@id="chan_newsInfo"]/text()').re_first('(\d+-\d+-\d+\s\d+:\d+:\d+)')
# item['source'] = response.xpath('//div[@id="chan_newsInfo"]/text()').re_first('来源:(.*)').strip()
# item['website'] = '中华网'
# yield item
# 以上写法不规整,下面采用ItemLoader实现配置化提取
def parse_item(self, response):
loader = ChinaLoader(item=NewsItem(), response=response)
loader.add_xpath('title', '//h1[@id="chan_newsTitle"]/text()')
loader.add_value('url', response.url)
loader.add_xpath('text', '//div[@id="chan_newsDetail"]//text()')
loader.add_xpath('datetime', '//div[@id="chan_newsInfo"]/text()', re='(\d+-\d+-\d+\s\d+:\d+:\d+)')
loader.add_xpath('source', '//div[@id="chan_newsInfo"]/text()', re='来源:(.*)')
loader.add_value('website', '中华网')
yield loader.load_item()
| [
"406857586@qq.com"
] | 406857586@qq.com |
0f2ffe38ae9cf44d15befe3270fbe6ca5ee63422 | 3bda0851de1224b524fbcddece1f502a67e9def9 | /test/test_markdown_blank_lines.py | 695aea9b92244d67e2b23590352def8d0c3975e2 | [
"MIT"
] | permissive | jtprince/pymarkdown | 65b3f3b06c88bc0d4652a990bd134ef6996bcc15 | 17304b3ef580ec71678c450ab6c2a1e669b4e90a | refs/heads/main | 2023-04-29T15:51:05.761440 | 2021-05-16T22:55:11 | 2021-05-16T22:55:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,344 | py | """
https://github.github.com/gfm/#paragraphs
"""
import pytest
from .utils import act_and_assert
@pytest.mark.gfm
def test_blank_lines_197():
"""
Test case 197: Blank lines at the beginning and end of the document are also ignored.
"""
# Arrange
source_markdown = """\a\a
aaa
\a\a
# aaa
""".replace(
"\a", " "
)
expected_tokens = [
"[BLANK(1,1): ]",
"[BLANK(2,1):]",
"[para(3,1):]",
"[text(3,1):aaa:]",
"[end-para:::True]",
"[BLANK(4,1): ]",
"[BLANK(5,1):]",
"[atx(6,1):1:0:]",
"[text(6,3):aaa: ]",
"[end-atx::]",
"[BLANK(7,1):]",
"[BLANK(8,1): ]",
]
expected_gfm = """<p>aaa</p>
<h1>aaa</h1>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_blank_lines_197a():
"""
Test case 197a: Extra blanks to test
"""
# Arrange
source_markdown = """\a\a
\a
aaa
""".replace(
"\a", " "
)
expected_tokens = [
"[BLANK(1,1): ]",
"[BLANK(2,1): ]",
"[para(3,1):]",
"[text(3,1):aaa:]",
"[end-para:::True]",
"[BLANK(4,1):]",
]
expected_gfm = """<p>aaa</p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
| [
"jack.de.winter@outlook.com"
] | jack.de.winter@outlook.com |
8b2c1d35e6d1eb3c36cb9ae0333b23f8507f3f4f | 44b3d66dce1b8b87ed7a20b9f2a57d5c40a6c010 | /enso/utils/__init__.py | 22ab49ca975d091327f74789edc96e9100955cdf | [
"BSD-2-Clause"
] | permissive | blackdaemon/enso-launcher-continued | 0b203567c9670d5a6fa95b546d7edf64953ee94c | 346f82811e77caf73560619cdeb16afabfbf1fce | refs/heads/master | 2020-06-03T16:29:31.579370 | 2019-05-22T22:39:32 | 2019-05-22T22:39:32 | 30,513,152 | 7 | 4 | null | null | null | null | UTF-8 | Python | false | false | 2,739 | py | import traceback
from contextlib import contextmanager
try:
from contextlib import suppress
except ImportError:
@contextmanager
def suppress(*exceptions):
"""Provides the ability to not have to write try/catch blocks when just
passing on the except.
Thanks to Raymond Hettinger from "Transforming Code into Beautiful
Idiotmatic Python"
This will be included in the standard library in 3.4.
Args:
exceptions: A list of exceptions to ignore
Example:
.. code-block:: python
# instead of...
try:
do_something()
except:
pass
# use this:
with suppress(Exception):
do_something()
"""
assert exceptions, "'exceptions' parameter in suppress() can't be empty!"
try:
yield
except exceptions:
pass
# Deprecated
ignored = suppress
def __do_once(ignore_args, func, *args, **kwargs):
""" Execute the function just once """
global __DO_ONCE_CACHE
stack = traceback.extract_stack()
stack.pop()
stack.pop()
code_location = "|".join(str(i) for i in stack.pop()[:-1])
cache_id = "{0}|{1}|{2}".format(
code_location,
func,
"|".join(str(arg) for arg in args) if not ignore_args else "",
"|".join(kwargs.values()) if not ignore_args else "",
)
try:
if cache_id in __DO_ONCE_CACHE:
return
except NameError:
__DO_ONCE_CACHE = {}
try:
return func(*args, **kwargs)
finally:
__DO_ONCE_CACHE[cache_id] = 1
# TODO: Move to decorators module
def call_once(func):
""" Function decorator. Execute the function just once,
no matter the arguments values
"""
def func_wrapper(*args, **kwargs):
return __do_once(True, func, *args, **kwargs)
return func_wrapper
def do_once(func, *args, **kwargs):
""" Execute the function just once, no matter the arguments values """
return __do_once(True, func, *args, **kwargs)
# TODO: Move to decorators module
def call_once_for_given_args(func):
""" Function decorator. Execute the function just once (with given argument values).
Using the function with different argument values will execute it again.
"""
def func_wrapper(*args, **kwargs):
return __do_once(False, func, *args, **kwargs)
return func_wrapper
def do_once_for_given_args(func, *args, **kwargs):
""" Execute the function just once (with given argument values)
Using the function with different argument values will execute it again.
"""
return __do_once(False, func, *args, **kwargs)
| [
"pavelvitis@gmail.com"
] | pavelvitis@gmail.com |
1778e5aef03806c0b569aea2e647ba4dd0beba08 | 786de89be635eb21295070a6a3452f3a7fe6712c | /CalibManager/tags/V00-00-77/src/NotificationDBForCL.py | db374f5242f2d042a61fa4e7d4602826bf1f6928 | [] | no_license | connectthefuture/psdmrepo | 85267cfe8d54564f99e17035efe931077c8f7a37 | f32870a987a7493e7bf0f0a5c1712a5a030ef199 | refs/heads/master | 2021-01-13T03:26:35.494026 | 2015-09-03T22:22:11 | 2015-09-03T22:22:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,144 | py | #--------------------------------------------------------------------------
# File and Version Information:
# $Id$
#
# Description:
# Module NotificationDBForCL.py...
#
#------------------------------------------------------------------------
"""
This software was developed for the SIT project. If you use all or
part of it, please give an appropriate acknowledgment.
@see
@version $Id$
@author Mikhail S. Dubrovin
"""
#------------------------------
# Module's version from SVN --
#------------------------------
__version__ = "$Revision$"
# $Source$
#--------------------------------
# Imports of standard modules --
#--------------------------------
from NotificationDB import *
#------------------------------
class NotificationDBForCL (NotificationDB):
"""Is intended for submission of notification records in db
"""
def __init__(self) :
NotificationDB.__init__(self, table='calibrun')
#------------------------------
if __name__ == "__main__" :
ndb = NotificationDBForCL()
main_test(ndb)
ndb.close()
sys.exit ( 'End of test NotificationDBForCL' )
#------------------------------
| [
"dubrovin@SLAC.STANFORD.EDU@b967ad99-d558-0410-b138-e0f6c56caec7"
] | dubrovin@SLAC.STANFORD.EDU@b967ad99-d558-0410-b138-e0f6c56caec7 |
a98f0fb1c4d3a3e117fcdbeb3051d91faf064b7e | 368be25e37bafa8cc795f7c9f34e4585e017091f | /.history/app_fav_books/models_20201113162142.py | c210c05a528dd41d08ad9cbd4d2ed524b38985db | [] | no_license | steven-halla/fav_books_proj | ebcfbfda0e7f3cdc49d592c86c633b1d331da513 | 512005deb84ac906c9f24d4ab0939bd0db096716 | refs/heads/master | 2023-03-30T09:37:38.016063 | 2021-04-02T20:27:22 | 2021-04-02T20:27:22 | 354,125,658 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,033 | py | from django.db import models
import re
class UserManager(models.Manager):
def basic_validator(self, postData):
errors = {}
if len(post_data['first_name']) < 3:
errors['first_name'] = "First name must be 3 characters"
if post_data['first_name'].isalpha() == False:
errors['first_name'] = "letters only"
if len(post_data['last_name']) < 3:
errors['last_name'] = "Last name must be 3 characters"
if post_data['last_name'].isalpha() == False:
errors['last_name'] = "letters only"
if len(post_data['email']) < 8:
errors['email'] = "Email must contain 8 characters"
if post_data['email'].find("@") == -1:
errors['email'] = "email must contain @ and .com"
if post_data['email'].find(".com") == -1:
errors['email'] = "email must contain @ and .com"
EMAIL_REGEX = re.compile(r'^[a-zA-Z0-9.+_-]+@[a-zA-Z0-9._-]+\.[a-zA-Z]+$')
# test whether a field matches the pattern
if not EMAIL_REGEX.match(postData['email']):
errors['email'] = "Invalid email address!"
return errors
if post_data['password'] != post_data['confirm_password']:
errors['pass_match'] = "password must match confirm password"
if len(post_data['password']) < 8:
errors['pass_length'] = "password must be longer than 8 characters"
# Create your models here.
class User(models.Model):
first_name = models.CharField(max_length=20)
last_name = models.CharField(max_length=20)
email = models.CharField(max_length=20)
password = models.CharField(max_length=20)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
class Books(models.Model):
title = models.CharField(max_length=20)
desc = models.CharField(max_length=40)
likes
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
| [
"69405488+steven-halla@users.noreply.github.com"
] | 69405488+steven-halla@users.noreply.github.com |
9a4a00470f775aab477c5809d1cbada4f45d60c0 | 487ce91881032c1de16e35ed8bc187d6034205f7 | /codes/CodeJamCrawler/16_3_1/kochkarash/rc1.py | 1bb08aa5d72a0c470695223cde3bfcb31d8173c2 | [] | no_license | DaHuO/Supergraph | 9cd26d8c5a081803015d93cf5f2674009e92ef7e | c88059dc66297af577ad2b8afa4e0ac0ad622915 | refs/heads/master | 2021-06-14T16:07:52.405091 | 2016-08-21T13:39:13 | 2016-08-21T13:39:13 | 49,829,508 | 2 | 0 | null | 2021-03-19T21:55:46 | 2016-01-17T18:23:00 | Python | UTF-8 | Python | false | false | 815 | py | import sys
fileinput = sys.stdin
import StringIO
#fileinput = StringIO.StringIO(inputstr)
from heapq import *
import string
A=string.ascii_uppercase
A=[a for a in A]
T=int(fileinput.readline().strip())
for t in range(T):
N=fileinput.readline().strip()
N=int(N)
P=fileinput.readline().strip().split()
P=[int(p) for p in P]
h = []
S=0
for n in range(N):
heappush(h, (-P[n], A[n]))
S += P[n]
O=[]
while True:
p=heappop(h)
if p[0]==0:
break
S=S-1
heappush(h, (-(-p[0]-1), p[1]))
o=p[1]
if -h[0][0]>S/2:
p=heappop(h)
S=S-1
heappush(h, (-(-p[0]-1), p[1]))
o=o+p[1]
O.append(o)
O=" ".join(O)
print "Case #%s: %s" % (t+1, O)
| [
"[dhuo@tcd.ie]"
] | [dhuo@tcd.ie] |
2fba65138dedfa05ac5795d0149d804b47a9398c | 6846a0469efc79b89edc8f856944d5a8005d7244 | /id_0017.py | c70e7b4a000fac5e79b162fb6191d28364c95eea | [] | no_license | CGenie/project_euler | 42cb966e13645339490046eb44a729660ae0c092 | cc90edd061b0f4d9e076d5a684b842c202a6812a | refs/heads/master | 2020-06-05T00:41:49.266961 | 2014-01-13T19:11:31 | 2014-01-13T19:11:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,748 | py | #!/usr/bin/python2
# #####################################################################
# id_0017.py
#
# Przemyslaw Kaminski <cgenie@gmail.com>
# Time-stamp: <>
######################################################################
def write_number(n):
numbers = {1: 'one',
2: 'two',
3: 'three',
4: 'four',
5: 'five',
6: 'six',
7: 'seven',
8: 'eight',
9: 'nine',
10: 'ten',
11: 'eleven',
12: 'twelve',
13: 'thirteen',
14: 'fourteen',
15: 'fifteen',
16: 'sixteen',
17: 'seventeen',
18: 'eighteen',
19: 'nineteen',
20: 'twenty',
30: 'thirty',
40: 'forty',
50: 'fifty',
60: 'sixty',
70: 'seventy',
80: 'eighty',
90: 'ninety',
100: 'hundred'
}
if n <= 20:
return numbers[n]
if 20 < n <= 99:
s = str(n)
D = int(s[0])
d = int(s[1])
r = numbers[D*10]
if d > 0:
r += numbers[d]
return r
if 99 < n <= 999:
s = str(n)
h = int(s[0])
D = int(s[1])
d = int(s[2])
r = numbers[h] + numbers[100]
if D*10 + d > 0:
r += "and" + write_number(D*10 + d)
return r
if n == 1000:
return 'onethousand'
if __name__ == '__main__':
l = 0
for x in range(1, 1001):
print "x = " + str(x) + ", write_number = " + write_number(x)
l += len(write_number(x))
print l
| [
"cgenie@gmail.com"
] | cgenie@gmail.com |
8071aa8e26876969192138ed0b63995bbe14ae2b | cf1476710c4117865fe459f0d698520321810c56 | /cerveceria/migrations/0005_cerveza_nombre.py | 2677cbe9dfe5211b057f198c9021030bc0cb6a9b | [] | no_license | BoiwkoMartin/boiwkosbeers | 42b6ab47573ee1b282bbfbe915b17a449c9b8038 | 74f6ec93d684badc80a8c62844479fc978548acd | refs/heads/main | 2023-04-09T03:36:00.812684 | 2021-04-15T23:55:59 | 2021-04-15T23:55:59 | 358,417,276 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 429 | py | # Generated by Django 3.1.7 on 2021-03-05 19:37
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('cerveceria', '0004_remove_cerveza_nombre'),
]
operations = [
migrations.AddField(
model_name='cerveza',
name='nombre',
field=models.CharField(default="Boiwko's", max_length=50),
),
]
| [
"vagrant@vagrant.vm"
] | vagrant@vagrant.vm |
6c103c6cdb892cf317c59775a2c53c3c793c326b | 3ee1bb0d0acfa5c412b37365a4564f0df1c093fb | /keras/keras40_mnist2_cnn.py | 9b67f325ca5a78b12e49628a58edede98eb0bff1 | [] | no_license | moileehyeji/Study | 3a20bf0d74e1faec7a2a5981c1c7e7861c08c073 | 188843c6415a4c546fdf6648400d072359d1a22b | refs/heads/main | 2023-04-18T02:30:15.810749 | 2021-05-04T08:43:53 | 2021-05-04T08:43:53 | 324,901,835 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,855 | py |
# 실습
# 지표는 acc (0.985 이상)
# 응용
# y_test 10개와 y_pred 10개를 출력하시오
# y_test[:10] = (???)
# y_pred[:10] = (???)
import numpy as np
import matplotlib.pyplot as plt
# 1. mnist 데이터 셋
from tensorflow.keras.datasets import mnist
(x_train, y_train), (x_test, y_test) = mnist.load_data()
print(x_train.shape, y_train.shape) #(60000, 28, 28) (60000,) ->흑백(60000, 28, 28, 1)
print(x_test.shape, y_test.shape) #(10000, 28, 28) (10000,)
print(x_train[0])
print(y_train[0])
print(x_train[0].shape) #(28, 28)
# plt.imshow(x_train[0], 'gray')
# plt.imshow(x_train[0])
# plt.show()
# X 전처리
# 이미지의 특성을 찾아 숫자를 맞춰야 함 3차원--> 4차원 --> float타입 변경 -->/255 (0~1 수렴) ~~~~~~>전처리
x_train = x_train.reshape(60000, 28, 28, 1).astype('float')/255.
x_test = x_test.reshape(10000, 28, 28, 1)/255.
# (x_test.reshape(x_test.shape[0], x_test.shape[1], x_test.shape[2], 1))
# 다중분류
# Y 전처리
from tensorflow.keras.utils import to_categorical
y_train = to_categorical(y_train)
y_test = to_categorical(y_test)
#2. 모델구성
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Conv2D, MaxPooling2D, Dense, Flatten, Dropout
model = Sequential()
model.add(Conv2D(filters=500, kernel_size=(2,2), padding='same', input_shape = (28,28,1)))
model.add(MaxPooling2D(pool_size=2))
model.add(Dropout(0.2))
model.add(Conv2D(filters=200, kernel_size=2, padding='same', strides=2))
model.add(MaxPooling2D(pool_size=2))
model.add(Conv2D(filters=100, kernel_size=2, padding='same', strides=4))
model.add(Flatten())
model.add(Dense(520, activation='relu'))
model.add(Dense(200, activation='relu'))
# model.add(Dense(150, activation='relu'))
# model.add(Dense(100, activation='relu'))
# model.add(Dense(50, activation='relu'))
model.add(Dense(15, activation='relu'))
model.add(Dense(10, activation='softmax'))
# 3. 컴파일, 훈련
from tensorflow.keras.callbacks import EarlyStopping
early = EarlyStopping(monitor='acc', patience=20, mode= 'auto')
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics='acc')
model.fit(x_train, y_train, epochs=100, batch_size=90, callbacks=[early])
# 4. 평가, 예측
loss = model.evaluate(x_test, y_test, batch_size=90)
print(loss)
x_pre = x_test[:10]
y_pre = model.predict(x_pre)
y_pre = np.argmax(y_pre, axis=1)
y_test_pre = np.argmax(y_test[:10], axis=1)
print('y_pred[:10] : ', y_pre)
print('y_test[:10] : ', y_test_pre)
print(x_test[10].shape)
""" import matplotlib.pyplot as plt
plt.imshow(x_test[10], 'gray')
plt.show() """
'''
mnist_CNN :
[0.15593186020851135, 0.9835000038146973]
y_pred[:10] : [7 2 1 0 4 1 4 9 5 9]
y_test[:10] : [7 2 1 0 4 1 4 9 5 9]
'''
| [
"noreply@github.com"
] | moileehyeji.noreply@github.com |
2673e7c0021d9a2293deb9d460d04ff8919609da | cbf0f5dbbbea5be1c46d777c939eb21f0a65c434 | /Repository/w13/ex3_w13.py | 7bcaadf3064d7c137b19e7663533186fce898a0b | [] | no_license | ARBaranov/Third-semester | 2c128ceee257bcee1bb96c7a65b3f388898db8d5 | 9128f8eab9a73d8d8439e4727b3f499df049bec6 | refs/heads/master | 2023-08-11T03:25:28.266061 | 2021-09-18T23:34:35 | 2021-09-18T23:34:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,113 | py | # 1 a 1 2 b ---> a, b
# z 2 y ---> z, y
REGEXP_1 = ''
# aaa bbb ccc ---> aaa, bbb, ccc
# ddd eee fgh ---> ddd, eee, fgh
# a1b c2d e3f ---> a1b, c2d, e3f
REGEXP_2 = ''
# a aa aaa ---> aa, aaa
# b bb bbb ---> bb, bbb
# a bb aaa ---> bb, aaa
REGEXP_3 = ''
# 1.1.1.1 aaaa bbbbb ---> 1.1.1.1
# a.a.a.a bbbb 2.2.2.2 ---> 2.2.2.2
# 3.3.3.3 cccc 4.4.4.4 ---> 3.3.3.3, 4.4.4.4
# 255.23.0.1 cccc 4.4.4.4 ---> 255.23.0.1, 4.4.4.4
# 255.0.23.1 cccc 4.4.4.4 ---> 255.0.23.1, 4.4.4.4
REGEXP_4 = ''
# aaa Abbb ccc ---> Abbb
# Aaa Abbb ccc ---> Aaa, Abbb
# Caa Cbb Accc ---> Accc
REGEXP_5 = ''
# a b c d e f ---> a, b, e, f
# abcdef ---> a, b, e, f
# adf ---> a, f
# acf ---> a, f
REGEXP_6 = ''
# aaa +1.0 bb ---> +1.0
# aaa -1.0 bb ---> -1.0
# aaa -123.234 bb +111.999 ---> -123.234, +111.999
REGEXP_7 = ''
# aaa 18-04-2016 bbb ---> 18-04-2016
# aaa 18.04.2016 bbb ---> 18.04.2016
# aaa 18-04-ABCD bbb 18.04.2016 ---> 18.04.2016
# aaa 18/04/ABCD bbb 18/04/2016 ---> 18/04/2016
# aaa 18/04/ABCD bbb 18/4/2016 ---> 18/4/2016
REGEXP_8 = '' | [
"123"
] | 123 |
96d3df244cea79335053732ec879dd09d42ceb26 | 0613b082bd90462e190bc51943356ce6ce990815 | /attendance/migrations/0004_worksheet.py | 5180b56f06b1064cd4bee92effa57a857c9d7479 | [] | no_license | Hamidnet220/salary | 1068aac4bc921436c03b627899370a86ca5e99be | 4dc1f32dfa1d990e6c9f527b4a8d0e1df939262a | refs/heads/master | 2020-05-04T18:09:24.086491 | 2019-04-22T20:22:32 | 2019-04-22T20:22:32 | 179,342,004 | 0 | 1 | null | 2019-04-11T10:43:29 | 2019-04-03T17:53:36 | Python | UTF-8 | Python | false | false | 1,008 | py | # Generated by Django 2.2 on 2019-04-19 06:03
import django.contrib.postgres.fields
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('wage', '0007_auto_20190415_1650'),
('baseinfo', '0006_auto_20190419_0525'),
('attendance', '0003_delete_worksheet'),
]
operations = [
migrations.CreateModel(
name='Worksheet',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('work_days_stat', django.contrib.postgres.fields.ArrayField(base_field=models.IntegerField(blank=True, null=True), size=31)),
('employee', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='baseinfo.Employee')),
('wage', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='wage.Wage')),
],
),
]
| [
"kiani.hamidreza@gmail.com"
] | kiani.hamidreza@gmail.com |
4fc606787313fb230101c49ae628fcb273a6111f | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02997/s386871412.py | 7432b9652080f5697617f98cbfca83378aa831a4 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 436 | py | N, K = map(int, input().split())
if K>N*(N-1)//2-(N-1):
print(-1)
exit()
is_ban = [[False]*N for _ in range(N)]
for i in range(1, N):
for j in range(i+1, N):
if K==0:
break
is_ban[i][j] = True
K -= 1
ans = []
for i in range(N):
for j in range(i+1, N):
if not is_ban[i][j]:
ans.append((i, j))
print(len(ans))
for u, v in ans:
print(u+1, v+1) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
8e36788a9166458db4757af09f813273790f05c5 | 0af29dc561a34a8191f456ec24f6a77bea104b89 | /recurrent-neural-networks/neural-language-models/character-language-model-generator/modelv2.py | ed3cb95374e52c5f2969993e62ddb2fd3467ca76 | [] | no_license | cheeyeo/Machine_learning_portfolio | c4eea8390b2540706d9b8e9df0b491f3f434494b | 927cc9eb3de394dcaa00a4178d873df9798921e4 | refs/heads/master | 2020-05-04T11:16:22.188024 | 2019-05-25T14:18:58 | 2019-05-25T14:18:58 | 179,104,765 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 518 | py | from keras.models import Sequential
from keras.layers import LSTM, Dense, TimeDistributed
from keras.optimizers import Adam
# Defines a seq2seq model
def define_model_v2(seq_len, vocab_size):
model = Sequential()
model.add(LSTM(100, input_shape=(seq_len, vocab_size), return_sequences=True))
model.add(TimeDistributed(Dense(vocab_size, activation='softmax')))
opt = Adam(lr=0.01, clipvalue=5.0)
model.compile(loss='categorical_crossentropy', optimizer=opt, metrics=['acc'])
model.summary()
return model
| [
"ckyeo.1@gmail.com"
] | ckyeo.1@gmail.com |
a15f6c7f7005994b341fad929657892042159151 | e9dc0573d42ee003c563c12ba5e4241a70e88b29 | /old_documents/lms_app/migrations/0005_auto_20210823_1009.py | 131277a5649cc235bf712d562e2fff3d6ec21ab3 | [] | no_license | kamaliselvarajk/aspire | 279eed753db940d8feb3066e0885896d98549d3e | 3b61bad965a11877c4b63a7b93ea5f76f24ac96f | refs/heads/main | 2023-08-24T12:25:08.927712 | 2021-10-14T17:22:17 | 2021-10-14T17:22:17 | 383,044,001 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,121 | py | # Generated by Django 3.2.5 on 2021-08-23 04:39
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('lms_app', '0004_leaverequest'),
]
operations = [
migrations.RemoveField(
model_name='leaverequest',
name='status',
),
migrations.AlterField(
model_name='leaverequest',
name='manager_name',
field=models.CharField(max_length=50),
),
migrations.CreateModel(
name='LeaveApprove',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('status', models.CharField(max_length=20)),
('cancel_reason', models.CharField(max_length=100)),
('manager_name', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"kamali.selvaraj@aspiresys.com"
] | kamali.selvaraj@aspiresys.com |
3bbf1c97245dd0a764aca70754af6b208acc406d | 45de3aa97525713e3a452c18dcabe61ac9cf0877 | /src/secondaires/navigation/masques/point_visible/__init__.py | a2b98c97ed671cc44f4f3eb9ca4d18ccd338fba7 | [
"BSD-3-Clause"
] | permissive | stormi/tsunami | 95a6da188eadea3620c70f7028f32806ee2ec0d1 | bdc853229834b52b2ee8ed54a3161a1a3133d926 | refs/heads/master | 2020-12-26T04:27:13.578652 | 2015-11-17T21:32:38 | 2015-11-17T21:32:38 | 25,606,146 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,741 | py | # -*-coding:Utf-8 -*
# Copyright (c) 2012 LE GOFF Vincent
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Fichier contenant le masque <point_visible>."""
from primaires.format.fonctions import contient, supprimer_accents
from primaires.interpreteur.masque.masque import Masque
from primaires.interpreteur.masque.fonctions import *
from primaires.interpreteur.masque.exceptions.erreur_validation \
import ErreurValidation
from secondaires.navigation.constantes import *
from secondaires.navigation.visible import Visible
class VPointVisible(Masque):
"""Masque <point_visible>.
On attend un point observable en paramètre.
"""
nom = "point_visible"
nom_complet = "direction"
def init(self):
"""Initialisation des attributs"""
self.points = None
self.retour = ""
def repartir(self, personnage, masques, commande):
"""Répartition du masque."""
point = liste_vers_chaine(commande)
self.a_interpreter = point
commande[:] = []
masques.append(self)
return True
def valider(self, personnage, dic_masques):
"""Validation du masque"""
Masque.valider(self, personnage, dic_masques)
point = self.a_interpreter
salle = personnage.salle
if not hasattr(salle, "navire"):
return False
navire = salle.navire
etendue = navire.etendue
alt = etendue.altitude
portee = get_portee(salle)
if point:
point = supprimer_accents(point)
limite = 45
precision = 5
if point == "arriere":
direction = 180
elif point == "babord":
direction = -90
elif point == "tribord":
direction = 90
elif point in ("avant", "devant"):
direction = 0
else:
raise ErreurValidation("|err|Direction invalide.|ff|")
else:
direction = 0
limite = 90
precision = 15
# On récupère les points
points = Visible.observer(personnage, portee, precision,
{"": navire})
msg = points.formatter(direction, limite)
self.points = points
self.retour = msg
| [
"kredh@free.fr"
] | kredh@free.fr |
4a9e98c8b6df75b5063e618920505c559fb5d06e | 417448ce51e21233736c2f5eab7a0960c8cdcb3f | /Selenium/Tutorial/Amazon_Script.py | 3bafb73cafc9bed749ffec6917175a393dd3ddb3 | [] | no_license | ravalrupalj/Small_Projects | dcfd4da5a2e3dab4ccb16c693da7ecfd28c30312 | 89e0232503049468e181b387391abfbc523933a0 | refs/heads/master | 2022-12-25T04:18:06.296291 | 2020-10-08T01:58:13 | 2020-10-08T01:58:13 | 268,957,256 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,883 | py | from selenium import webdriver
driver= webdriver.Chrome(executable_path='C:\\Users\\raval\\Documents\\chromedriver_win32\\chromedriver.exe')
driver.get('https://www.amazon.ca/')
driver.maximize_window()
driver.implicitly_wait(5)
driver.find_element_by_id("twotabsearchtextbox").send_keys("ashlin wallet")
driver.find_element_by_css_selector("input[value='Go']").click()
driver.find_element_by_xpath("//img[@alt='ASHLIN RFID Blocking Wallet| Made with #1 Grade Napa Genuine Leather Excellent Credit Card Protector |10 Credit Card Pockets']").click()
driver.find_element_by_id("add-to-cart-button").click()
driver.back()
driver.back()
driver.find_element_by_xpath("(//img[@alt=\"Ashlin RFID Blocking Men's SLIM BI-fold Wallet - 100% Genuine Leather wallet with lined currency compartment\"])[1]").click()
driver.find_element_by_id("add-to-cart-button").click()
driver.back()
driver.back()
driver.find_element_by_xpath("(//img[@alt='ASHLIN RFID Blocking Wallet| Made with #1 Grade Napa Genuine Leather Excellent Credit Card Protector |10 Credit Card Pockets'])[2]").click()
driver.find_element_by_id("add-to-cart-button").click()
driver.back()
driver.back()
driver.find_element_by_xpath("(//img[@alt=\"ASHLIN Men's Bi-fold Wallet - 100% Lambskin Napa | Double Billfold Section | Midnight Black [5748-07-01]\"])[1]").click()
driver.find_element_by_id("add-to-cart-button").click()
driver.find_element_by_xpath("//a[@id='hlb-view-cart-announce']").click()
assert "ASHLIN Men's Bi-fold Wallet - 100% Lambskin Napa | Double Billfold Section | Midnight Black [5748-07-01]"==driver.find_element_by_xpath("(//span[@class='a-size-medium sc-product-title a-text-bold'])[1]").text
print(driver.find_element_by_xpath("(//span[@class='a-size-medium sc-product-title a-text-bold'])[1]").text)
assert "ASHLIN RFID Blocking Wallet| Made with #1 Grade Napa Genuine Leather Excellent Credit Card Protector |10 Credit Card Pockets" ==driver.find_element_by_xpath("(//span[@class='a-size-medium sc-product-title a-text-bold'])[2]").text
print(driver.find_element_by_xpath("(//span[@class='a-size-medium sc-product-title a-text-bold'])[2]").text)
assert "Ashlin RFID Blocking Men's SLIM BI-fold Wallet - 100% Genuine Leather wallet with lined currency compartment" == driver.find_element_by_xpath("(//span[@class='a-size-medium sc-product-title a-text-bold'])[3]").text
print(driver.find_element_by_xpath("(//span[@class='a-size-medium sc-product-title a-text-bold'])[3]").text)
#assert "CDN$ 70.89" == driver.find_element_by_xpath("//span[@class='a-size-medium a-color-base sc-price sc-white-space-nowrap']").text
print(driver.find_element_by_xpath("//span[@class='a-size-medium a-color-base sc-price sc-white-space-nowrap']").text)
#driver.find_element_by_id("hlb-ptc-btn").click()
#driver.find_element_by_id("ap_email").send_keys("ravalrupalj@gmail.com")
#driver.find_element_by_id("continue").click() | [
"63676082+ravalrupalj@users.noreply.github.com"
] | 63676082+ravalrupalj@users.noreply.github.com |
1600aa38bd05891af088262a94401d768b4f2f10 | 5e64335865c817eec677a2236709f3e73eb3ca9f | /utilities/tests/TestProject/pythonScripts/RunTests.py | e76fb6ce23b5a196337e2ea6d76fa93d94e7a562 | [] | no_license | Neurosim-lab/osb-model-validation | 3631ede96d811006fcc5f381faf1739f3e72a3e0 | d661b96682d7229ec94778380dd7978be254bb60 | refs/heads/master | 2020-03-28T11:15:13.957592 | 2018-09-10T19:20:37 | 2018-09-10T19:20:37 | 148,193,132 | 1 | 0 | null | 2018-09-10T17:29:54 | 2018-09-10T17:29:54 | null | UTF-8 | Python | false | false | 2,538 | py | #
#
# File to test current configuration of project.
#
# To execute this type of file, type 'nC.bat -python XXX.py' (Windows)
# or './nC.sh -python XXX.py' (Linux/Mac). Note: you may have to update the
# NC_HOME and NC_MAX_MEMORY variables in nC.bat/nC.sh
#
# Author: Padraig Gleeson
#
#
import sys
import os
try:
from java.io import File
except ImportError:
print "Note: this file should be run using ..\\..\\..\\nC.bat -python XXX.py' or '../../../nC.sh -python XXX.py'"
print "See http://www.neuroconstruct.org/docs/python.html for more details"
quit()
sys.path.append(os.environ["NC_HOME"]+"/pythonNeuroML/nCUtils")
import ncutils as nc
projFile = File(os.getcwd(), "../TestProject.ncx")
print "Project file for this test: "+ projFile.getAbsolutePath()
############## Main settings ##################
simConfigs = []
simConfigs.append("Default Simulation Configuration")
simDt = 0.001
simulators = ["NEURON"]
# simulators = ["NEURON", "LEMS"]
numConcurrentSims = 4
varTimestepNeuron = False
plotSims = True
plotVoltageOnly = True
analyseSims = True
runInBackground = True
verbose = False
#############################################
def testAll(argv=None):
if argv is None:
argv = sys.argv
print "Loading project from "+ projFile.getCanonicalPath()
simManager = nc.SimulationManager(projFile,
numConcurrentSims,
verbose)
simManager.runMultipleSims(simConfigs = simConfigs,
simDt = simDt,
simulators = simulators,
runInBackground = runInBackground)
simManager.reloadSims(plotVoltageOnly = plotVoltageOnly,
plotSims = plotSims,
analyseSims = analyseSims)
# These were discovered using analyseSims = True above.
# They need to hold for all simulators
spikeTimesToCheck = {'SampleCellGroup_0' : [21.6, 35.171, 48.396, 61.602, 74.807]}
spikeTimeAccuracy = 0.0
report = simManager.checkSims(spikeTimesToCheck = spikeTimesToCheck,
spikeTimeAccuracy = spikeTimeAccuracy)
print report
return report
if __name__ == "__main__":
testAll()
| [
"p.gleeson@gmail.com"
] | p.gleeson@gmail.com |
22fb6522d48eab678fbf0b4989078a87265c6b77 | e91f477713556f14b288b89ecce89754d4bd93f7 | /ML/rl/rl_utils.py | 7566686d6b7a40292181a9ba78acee9e48bc8b65 | [
"MIT"
] | permissive | PepSalehi/algorithms | 715603ad16c320c0f1d32c544062b71b11814263 | 1c20f57185e6324aa840ccff98e69764b4213131 | refs/heads/master | 2020-12-28T23:24:39.542742 | 2019-02-01T05:17:56 | 2019-02-01T05:17:56 | 14,173,271 | 0 | 0 | MIT | 2019-02-01T05:17:57 | 2013-11-06T13:27:34 | Python | UTF-8 | Python | false | false | 2,462 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Utility functions for Reinforcement Learning."""
# core modules
import logging
import os
# 3rd party modules
import yaml
# General code for loading ML configuration files
def load_cfg(yaml_filepath):
"""
Load a YAML configuration file.
Parameters
----------
yaml_filepath : str
Returns
-------
cfg : dict
"""
# Read YAML experiment definition file
with open(yaml_filepath, 'r') as stream:
cfg = yaml.load(stream)
cfg = make_paths_absolute(os.path.dirname(yaml_filepath), cfg)
return cfg
def make_paths_absolute(dir_, cfg):
"""
Make all values for keys ending with `_path` absolute to dir_.
Parameters
----------
dir_ : str
cfg : dict
Returns
-------
cfg : dict
"""
for key in cfg.keys():
if key.endswith("_path"):
cfg[key] = os.path.join(dir_, cfg[key])
cfg[key] = os.path.abspath(cfg[key])
if not os.path.isfile(cfg[key]):
logging.error("%s does not exist.", cfg[key])
if type(cfg[key]) is dict:
cfg[key] = make_paths_absolute(dir_, cfg[key])
return cfg
def test_agent(cfg, env, agent):
"""Calculate average reward."""
cum_reward = 0.0
for episode in range(cfg['testing']['nb_epochs']):
agent.reset()
observation_previous = env.reset()
is_done = False
while not is_done:
action = agent.act(observation_previous, no_exploration=True)
observation, reward, is_done, _ = env.step(action)
cum_reward += reward
observation_previous = observation
return cum_reward / cfg['testing']['nb_epochs']
def get_parser():
"""Get parser object."""
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
parser = ArgumentParser(description=__doc__,
formatter_class=ArgumentDefaultsHelpFormatter)
parser.add_argument("--env",
dest="environment_name",
help="OpenAI Gym environment",
metavar="ENVIRONMENT",
default="FrozenLake-v0")
parser.add_argument("--agent",
dest="agent_cfg_file",
required=True,
metavar="AGENT_YAML",
help="Configuration file for the agent")
return parser
| [
"info@martin-thoma.de"
] | info@martin-thoma.de |
f1654e6ae542ec6bf12b305385f26c7ef3610381 | 964c83b67a45717874292468ded6d85ed69c2c9f | /reg_sign_in_out/views.py | 361b417c0ba82d171eab42791f39bc27ad0eb7b3 | [] | no_license | deshiyan1010/Colabratory | bddf6d8a21a568b33827ce3ca8763930764f7197 | 9edce90ba993e06d1e5f1cdef268d33af51f1dd5 | refs/heads/master | 2022-12-17T19:28:33.328936 | 2020-09-21T17:29:43 | 2020-09-21T17:29:43 | 281,600,531 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,364 | py | from django.shortcuts import render
from reg_sign_in_out.models import *
from . import forms
from django.contrib.auth import authenticate,login,logout
from django.http import HttpResponseRedirect, HttpResponse
from django.urls import reverse
from django.contrib.auth.decorators import login_required
from django.views.decorators.csrf import csrf_protect
import razorpay
def index(request):
return render(request,"index.html")
@login_required
def special(request):
return HttpResponse("In!")
@login_required
def user_logout(request):
logout(request)
return HttpResponseRedirect(reverse('index'))
@csrf_protect
def registration(request):
registered = False
if request.method == "POST":
form = forms.UserForm(request.POST)
profileform = forms.RegistrationForm(request.POST,request.FILES)
if form.is_valid() and profileform.is_valid():
user = form.save()
user.set_password(user.password)
user.save()
profile = profileform.save(commit=False)
profile.user = user
profile.save()
registered = True
return HttpResponseRedirect(reverse('reg_sign_in_out:payment'))
else:
print(form.errors,profileform.errors)
return render(request,"reg_sign_in_out/registration.html",{"tried":"True",
"registered":registered,
"profile_form":profileform,
"user_form":form,
})
else:
user = forms.UserForm()
profileform = forms.RegistrationForm()
return render(request,"reg_sign_in_out/registration.html",{"registered":registered,
"profile_form":profileform,
"user_form":user,
})
@csrf_protect
def user_login(request):
if request.method == "POST":
username = request.POST.get('username')
password = request.POST.get('password')
user = authenticate(username=username,password=password)
if user:
if user.is_active:
login(request,user)
return HttpResponseRedirect(reverse('posts:home'))
else:
return render(request,"reg_sign_in_out/login.html",{'tried':'True'})
else:
return render(request,"reg_sign_in_out/login.html")
@login_required
def payment(request):
obj = Registration.objects.get(user__username=request.user.username)
if obj.paid==False:
client = razorpay.Client(auth = ('rzp_test_iCqL53D2oVdlIL', 'A5bcZDlcdxB6qz5K6O4i5eD1'))
payment = client.order.create({'amount':10000000, 'currency':'INR', 'payment_capture':'1'})
if request.method=="POST":
obj.paid = True
obj.order_id = request.POST["razorpay_order_id"]
obj.save()
return HttpResponseRedirect(reverse('profilepage:profilepage'))
return render(request,"reg_sign_in_out/payment.html",{'payment':payment})
else:
return render(request,"reg_sign_in_out/paid.html") | [
"vinayakamikkal@gmail.com"
] | vinayakamikkal@gmail.com |
9d6644181164a6acae72a68b2658055bb2528631 | 0ca7c7bdb297439554777e126ae8a2999962b7fe | /venv/Lib/site-packages/gevent/tests/test__refcount.py | d3c3430105bc53cd5a82709d8d68fb2181c48835 | [] | no_license | YazLuna/APIExpressJobs | 6c0857f63180bf5163d11fa9d1a411e44a4ba46f | cd52bc8d0d60100091637ef79f78cc79d58a1495 | refs/heads/master | 2023-06-13T02:50:57.672295 | 2021-06-18T14:57:53 | 2021-06-18T14:57:53 | 367,244,876 | 0 | 1 | null | 2021-06-18T14:57:53 | 2021-05-14T04:05:43 | Python | UTF-8 | Python | false | false | 6,028 | py | # Copyright (c) 2008 AG Projects
# Author: Denis Bilenko
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""This test checks that underlying socket instances (gevent.socket.socket._sock)
are not leaked by the hub.
"""
from __future__ import print_function
from _socket import socket as c_socket
import sys
if sys.version_info[0] >= 3:
# Python3 enforces that __weakref__ appears only once,
# and not when a slotted class inherits from an unslotted class.
# We mess around with the class MRO below and violate that rule
# (because socket.socket defines __slots__ with __weakref__),
# so import socket.socket before that can happen.
__import__('socket')
Socket = c_socket
else:
class Socket(c_socket):
"Something we can have a weakref to"
import _socket
_socket.socket = Socket
from gevent import monkey; monkey.patch_all()
import gevent.testing as greentest
from gevent.testing import support
from gevent.testing import params
try:
from thread import start_new_thread
except ImportError:
from _thread import start_new_thread
from time import sleep
import weakref
import gc
import socket
socket._realsocket = Socket
SOCKET_TIMEOUT = 0.1
if greentest.RESOLVER_DNSPYTHON:
# Takes a bit longer to resolve the client
# address initially.
SOCKET_TIMEOUT *= 2
if greentest.RUNNING_ON_CI:
SOCKET_TIMEOUT *= 2
class Server(object):
listening = False
client_data = None
server_port = None
def __init__(self, raise_on_timeout):
self.raise_on_timeout = raise_on_timeout
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
self.server_port = support.bind_port(self.socket, params.DEFAULT_BIND_ADDR)
except:
self.close()
raise
def close(self):
self.socket.close()
self.socket = None
def handle_request(self):
try:
self.socket.settimeout(SOCKET_TIMEOUT)
self.socket.listen(5)
self.listening = True
try:
conn, _ = self.socket.accept()
except socket.timeout:
if self.raise_on_timeout:
raise
return
try:
self.client_data = conn.recv(100)
conn.send(b'bye')
finally:
conn.close()
finally:
self.close()
class Client(object):
server_data = None
def __init__(self, server_port):
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.server_port = server_port
def close(self):
self.socket.close()
self.socket = None
def make_request(self):
try:
self.socket.connect((params.DEFAULT_CONNECT, self.server_port))
self.socket.send(b'hello')
self.server_data = self.socket.recv(100)
finally:
self.close()
class Test(greentest.TestCase):
__timeout__ = greentest.LARGE_TIMEOUT
def run_interaction(self, run_client):
server = Server(raise_on_timeout=run_client)
wref_to_hidden_server_socket = weakref.ref(server.socket._sock)
client = None
start_new_thread(server.handle_request)
if run_client:
client = Client(server.server_port)
start_new_thread(client.make_request)
# Wait until we do our business; we will always close
# the server; We may also close the client.
# On PyPy, we may not actually see the changes they write to
# their dicts immediately.
for obj in server, client:
if obj is None:
continue
while obj.socket is not None:
sleep(0.01)
# If we have a client, then we should have data
if run_client:
self.assertEqual(server.client_data, b'hello')
self.assertEqual(client.server_data, b'bye')
return wref_to_hidden_server_socket
def run_and_check(self, run_client):
wref_to_hidden_server_socket = self.run_interaction(run_client=run_client)
greentest.gc_collect_if_needed()
if wref_to_hidden_server_socket():
from pprint import pformat
print(pformat(gc.get_referrers(wref_to_hidden_server_socket())))
for x in gc.get_referrers(wref_to_hidden_server_socket()):
print(pformat(x))
for y in gc.get_referrers(x):
print('-', pformat(y))
self.fail('server socket should be dead by now')
def test_clean_exit(self):
self.run_and_check(True)
self.run_and_check(True)
def test_timeout_exit(self):
self.run_and_check(False)
self.run_and_check(False)
if __name__ == '__main__':
greentest.main()
| [
"ale_200200@hotmail.com"
] | ale_200200@hotmail.com |
08971a923b58b73eb2808bccbadaaf7dcaaaa8e1 | f305f84ea6f721c2391300f0a60e21d2ce14f2a5 | /前端笔记/thirtysecondsofcode/python/python的typings/15_可变元组.py | 388a8783be183c592d8de18c96e96c90e7e186e7 | [] | no_license | 981377660LMT/algorithm-study | f2ada3e6959338ae1bc21934a84f7314a8ecff82 | 7e79e26bb8f641868561b186e34c1127ed63c9e0 | refs/heads/master | 2023-09-01T18:26:16.525579 | 2023-09-01T12:21:58 | 2023-09-01T12:21:58 | 385,861,235 | 225 | 24 | null | null | null | null | UTF-8 | Python | false | false | 596 | py | from typing import Tuple
def f(t: Tuple[int, str]) -> None:
t = 1, 'foo' # OK
# t = 'foo', 1 # Type check error
# as immutable, varying-length sequences
# 元组也可以用作不变的、可变长度的序列
def print_squared(t: Tuple[int, ...]) -> None:
for n in t:
print(n, n ** 2)
print_squared(()) # OK
print_squared((1, 3, 5)) # OK
print_squared([1, 2]) # Error: only a tuple is valid
# 通常使用 Sequence [ t ]代替 Tuple [ t,... ]是一个更好的主意,
# 因为 Sequence 也可以兼容 list 和其他非 Tuple 序列。
| [
"lmt2818088@gmail.com"
] | lmt2818088@gmail.com |
d56d529290d5fca12af0043705c03bfcf41f290b | c39f999cae8825afe2cdf1518d93ba31bd4c0e95 | /PYME/DSView/modules/shell.py | 9debe00a7baf593cae3782bd4eeda236c0d7d023 | [] | no_license | WilliamRo/CLipPYME | 0b69860136a9b2533f2f29fc29408d7471cb934d | 6596167034c727ad7dad0a741dd59e0e48f6852a | refs/heads/master | 2023-05-11T09:50:58.605989 | 2023-05-09T02:17:47 | 2023-05-09T02:17:47 | 60,789,741 | 3 | 1 | null | 2016-06-17T08:52:44 | 2016-06-09T16:30:14 | Python | UTF-8 | Python | false | false | 1,225 | py | #!/usr/bin/python
##################
# shell.py
#
# Copyright David Baddeley, 2011
# d.baddeley@auckland.ac.nz
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##################
import wx.py.shell
def Plug(dsviewer):
sh = wx.py.shell.Shell(id=-1,
parent=dsviewer, pos=wx.Point(0, 0), size=wx.Size(618, 451), style=0, locals=dsviewer.__dict__,
introText='note that help, license etc below is for Python, not PYME\n\n')
sh.Execute('from pylab import *')
sh.Execute('from PYME.DSView import View3D, ViewIm3D')
dsviewer.AddPage(page=sh, select=False, caption='Console')
dsviewer.sh = sh | [
"willi4m@zju.edu.cn"
] | willi4m@zju.edu.cn |
b7a1cf6a73df1281ceebfdd3a8392fd02d8d3f68 | 01c39e5ac5398658f56e069a1f4c0142496a07f9 | /master/serializer.py | 17a5718aad1cbe8914485af00a9935eb95682aa7 | [] | no_license | vshaladhav97/first_kick | f95c0f402e7f0e869c05c1abf58404bb9a7b7863 | 367cccca72f0eae6c3ccb70fabb371dc905f915e | refs/heads/master | 2023-08-21T05:25:33.211862 | 2021-10-12T11:04:56 | 2021-10-12T11:04:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,543 | py | from django.db.models import fields
from rest_framework import serializers
from .models import (Company, AddressDetail, Location, AgeGroup, Months, PlayingSurface,
CourseType, EventType, WeekDay, ClassStatus, Ages)
class AddressDetailSerializer(serializers.ModelSerializer):
class Meta:
model = AddressDetail
fields = (
'id',
'address_line_1',
'address_line_2',
'address_line_3',
'town',
'country',
)
class CompanySerializer(serializers.ModelSerializer):
class Meta:
model = Company
fields = (
'id',
'company_name',
)
class LocationSerializer(serializers.ModelSerializer):
company = CompanySerializer(read_only=True)
class Meta:
model = Location
fields = (
'id',
'location',
'company',
'address_line_1',
'address_line_1',
'address_line_2',
'address_line_3',
'town',
'country',
'postal_code'
)
class LocationForAnalyticsSerializer(serializers.ModelSerializer):
class Meta:
model = Location
fields = (
'id',
'location',
)
class CompanySerializer(serializers.ModelSerializer):
class Meta:
model = Company
fields = (
'id',
'company_name',
)
class LocationDataTableSerializer(serializers.ModelSerializer):
class Meta:
model = Location
fields = (
'id',
'company',
'location',
'address_line_1',
'town',
'postal_code',
'playing_surface',
)
def to_representation(self, instance):
data = super().to_representation(instance)
company = Company.objects.get(pk=data['company'])
data['company'] = company.company_name
# playing_surface = PlayingSurface.objects.get(pk=data['playing_surface'])
# data['playing_surface'] = playing_surface.surface
return data
class LocationDataTableSerializerForPrepolated(serializers.ModelSerializer):
class Meta:
model = Location
fields = (
'id',
'company',
'location',
'address_line_1',
'town',
'postal_code',
'playing_surface',
)
def to_representation(self, instance):
data = super().to_representation(instance)
company = Company.objects.get(pk=data['company'])
data['company'] = company.company_name
playing_surface = PlayingSurface.objects.get(pk=data['playing_surface'])
data['playing_surface'] = playing_surface.surface
data['playing_surface_id'] = playing_surface.id
return data
class AgeGroupSerializer(serializers.ModelSerializer):
class Meta:
model = AgeGroup
fields = (
'id',
'age_group_text',
)
class CourseTypeSerializer(serializers.ModelSerializer):
class Meta:
model = CourseType
fields = (
'id',
'course_name',
'course_description',
'course_title',
)
class EvenTypeSerializer(serializers.ModelSerializer):
class Meta:
model = EventType
fields = (
'id',
'type_name',
)
class WeekDaySerializer(serializers.ModelSerializer):
class Meta:
model = WeekDay
fields = (
'id',
'weekday',
)
class ClassStatusSerializer(serializers.ModelSerializer):
class Meta:
model = ClassStatus
fields = (
'id',
'status_name',
)
class MonthSerializer(serializers.ModelSerializer):
class Meta:
model = Months
fields = (
'id',
'month',
)
class AgeSerializer(serializers.ModelSerializer):
class Meta:
model = Ages
fields = (
'id',
'age',
)
class PlayingSurfaceSerializer(serializers.ModelSerializer):
class Meta:
model = PlayingSurface
fields = (
'id',
'surface',
)
class CompanyNameDropdownSelectionSerializer(serializers.ModelSerializer):
"""Company Name serializer for dropdown"""
class Meta:
model = Company
fields = ("id", "company_name",) | [
"adhavv0@gmail.com"
] | adhavv0@gmail.com |
83441941cb7936a690cd4274636b264724009523 | a6a27234bb623c047fe86e91c720a50ba6ab641f | /sctt/sctt/calibration/first_cracking_stress.py | ce27bff06a658a7fad9fc085cab9aabb124fabbf | [] | no_license | liyingxiong/sctt | 4f87b1bdeb09eafb2831e699fd82b4a0d9db9099 | f3f9af2be80d39a70668a8bbf9f1e1458ee0fbc3 | refs/heads/master | 2021-01-23T03:28:17.314156 | 2018-12-11T11:18:33 | 2018-12-11T11:18:33 | 20,015,992 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,385 | py | '''
Identify the first cracking stress according to the derivatives of the stress-strain diagram.
@author: Yingxiong
'''
import numpy as np
from scipy.interpolate import interp1d, UnivariateSpline
from matplotlib import pyplot as plt
from scipy.optimize import curve_fit
from lmfit import minimize, Parameters, Parameter, report_fit
eps_max_lst = []
for j in range(5):
filepath1 = 'D:\\data\\TT-6C-0' + str(j + 1) + '.txt'
data = np.loadtxt(filepath1, delimiter=';')
eps_max_lst.append(
np.amax(-data[:, 2] / 2. / 250. - data[:, 3] / 2. / 250.))
eps_max = np.amin(eps_max_lst)
eps_arr = np.linspace(0, eps_max, 1000)
sig_lst = []
params = Parameters()
params.add('k1', value=0., min=0.)
params.add('k2', value=0., min=0.)
params.add('k3', value=0., min=0.)
params.add('a', value=0., min=5e-5, max=0.001)
params.add('b', value=0., min=0.001, max=0.005)
def f(params, x, data):
k1 = params['k1'].value
k2 = params['k2'].value
k3 = params['k3'].value
a = params['a'].value
b = params['b'].value
return k1 * x * (x <= a) + (k1 * a + k2 * (x - a)) * (x > a) * (x <= b) + (k1 * a + k2 * (b - a) + k3 * (x - b)) * (x > b) - data
for j in range(5):
filepath1 = 'D:\\data\\TT-6C-0' + str(j + 1) + '.txt'
data = np.loadtxt(filepath1, delimiter=';')
interp_exp = interp1d(-data[:, 2] / 2. / 250. - data[:, 3] / 2. / 250.,
data[:, 1] / 2., bounds_error=False, fill_value=0.)
# interp_exp = UnivariateSpline(-data[:, 2] / 2. / 250. - data[:, 3] / 2. / 250.,
# data[:, 1] / 2., k=3)
# popt, pcov = curve_fit(f, eps_arr, interp_exp(eps_arr))
# k1, k2, a = popt
# e = f(eps_arr, k1, k2, a)
# plt.plot(eps_arr, e)
sig_lst.append(interp_exp(eps_arr))
result = minimize(
f, params, method='powell', args=(eps_arr, interp_exp(eps_arr)))
final = interp_exp(eps_arr) + result.residual
print(params)
# print interp_exp(params['a'].value)
print((interp_exp(params['a'].value) * 25 / (25 * 0.985 + 2.7)))
if j == 1:
plt.plot(eps_arr, interp_exp(eps_arr))
plt.plot(eps_arr, final, 'k--')
# plt.plot(eps_arr, interp_exp(eps_arr))
sig_avg = np.sum(sig_lst, axis=0) / 5.
# plt.plot(eps_arr, sig_avg)
for k in range(5):
dsig = np.gradient(sig_lst[k])
# plt.plot(eps_arr, dsig)
plt.show()
| [
"rostislav.chudoba@rwth-aachen.de"
] | rostislav.chudoba@rwth-aachen.de |
4dc6e3283fd599472b323f22fc8998f493be658c | 215e491c9962f2e199f7f84a5743196f21da0332 | /week-01-unit-testing/examples/calculator/calculator_test.py | 722e6f3bf9e8548430069e813b522daf6f74fe24 | [] | no_license | kstager/Python300-SystemDevelopmentWithPython-Fall-2014 | dd07190e6a55470c44bbb366cb95a1d716b86866 | 7e85ef68bc59d311ec748b333e0bad9357b88855 | refs/heads/master | 2020-12-31T06:32:20.948094 | 2014-11-12T01:34:56 | 2014-11-12T01:34:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 300 | py | import unittest
import calculator_functions as calc
class TestCalculatorFunctions(unittest.TestCase):
def setUp(self):
self.x = 2
self.y = 3
def test_add(self):
self.assertEqual(calc.add(self.x, self.y), 5)
if __name__ == "__main__":
unittest.main()
| [
"joseph.sheedy@gmail.com"
] | joseph.sheedy@gmail.com |
a5551807162c6a3b053a0c940419c974b142c505 | 4db29e0d5f2e050d21bbf67042c713d8fa0421b0 | /com/mason/redis/part_two/chapter06/chapter0612.py | 0320f31d7421bb54908fed4b9d89d3079f14ce83 | [] | no_license | MasonEcnu/RedisInAction | 80e5556554c7e390264edd391042b09271cbfca4 | 710fd0316c6aee857acd350a092b657465096ed1 | refs/heads/master | 2020-07-08T17:24:39.540181 | 2019-09-30T04:14:49 | 2019-09-30T04:14:49 | 203,731,184 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,857 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# 通讯录自动补全
# 我们将把有序集合里面的所有分值都设置为 0 — 这种做法使得我们可以使用有序集合的
# 另一个特性:当所有成员的分值都相同时,有序集合将根据成员的名字来进行排序;而当所有成
# 员的分值都是 0 的时候,成员将按照字符串的二进制顺序进行排序。
import bisect
import uuid
import redis
from redis import Redis
from com.mason.redis_client import redisClient
valid_characters = "`abcdefghijklmnopqrstuvwxyz{"
def find_prefix_range(prefix: str):
# [-1:] 取字符串或列表的最后一个元素
# [:-1] 取到字符串或列表的最后一个元素(不包含最后一个元素)
# [a,b)前闭后开区间
pos = bisect.bisect_left(valid_characters, prefix[-1:])
suffix = valid_characters[(pos or 1) - 1]
return prefix[:-1] + suffix + '{', prefix + '{'
# print(find_prefix_range("prefix"))
def autocomplete_on_prefix(conn: Redis, guild, prefix):
# 根据给定的前缀计算出查找的范围
start, end = find_prefix_range(prefix)
identifier = str(uuid.uuid4())
start += identifier
end += identifier
zset_name = "members:" + guild
# 将范围的起始和结束元素添加到有序集合中
conn.zadd(zset_name, {start: 0, end: 0})
pipe = conn.pipeline(True)
items = []
while 1:
try:
pipe.watch(zset_name)
# 找到开始和结束元素在有序列表中的排名
start_rank = pipe.zrank(zset_name, start)
end_rank = pipe.zrank(zset_name, end)
# 程序最多只会取出 10 个元素
query_range = min(start_rank + 9, end_rank - 2)
pipe.multi()
pipe.zrem(zset_name, start, end)
pipe.zrange(zset_name, start_rank, query_range)
items = pipe.execute()[-1]
break
except redis.exceptions.WatchError:
# 如果自动补全集合被其他客户端修改过
# 则重试
continue
# 如果有其他自动补全操作正在执行,那么从获
# 取到的元素里面移除起始元素和结束元素
return [item for item in items if "{" not in item]
def join_guild(conn: Redis, guild, user):
conn.zadd("members:" + guild, {user: 0})
def leave_guild(conn: Redis, guild, user):
conn.zrem("members:" + guild, user)
guild = "10086"
redisClient.delete("members:" + guild)
join_guild(redisClient, guild, "mason")
join_guild(redisClient, guild, "yahaha")
join_guild(redisClient, guild, "lilei")
join_guild(redisClient, guild, "hmeimei")
join_guild(redisClient, guild, "mmmeee")
join_guild(redisClient, guild, "lulala")
print(autocomplete_on_prefix(redisClient, guild, "mma"))
redisClient.delete("members:" + guild)
| [
"364207187@qq.com"
] | 364207187@qq.com |
b22f9dbd128e6619dfe9a6447d989c2eb3054788 | c15a28ae62eb94dbf3ed13e2065195e572a9988e | /Cook book/src/8/how_to_define_an_interface_or_abstract_base_class/example.py | 2d4af5f1e99b6b30415fd216144a85ceba321fa2 | [] | no_license | xuyuchends1/python | 10798c92840a1a59d50f5dc5738b2881e65f7865 | 545d950a3d2fee799902658e8133e3692939496b | refs/heads/master | 2021-01-25T07:07:04.812140 | 2020-02-28T09:25:15 | 2020-02-28T09:25:15 | 93,647,064 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,206 | py | # Defining a simple abstract base class
from abc import ABCMeta, abstractmethod
class IStream(metaclass=ABCMeta):
@abstractmethod
def read(self, maxbytes=-1):
pass
@abstractmethod
def write(self, data):
pass
# Example implementation
class SocketStream(IStream):
def read(self, maxbytes=-1):
print('reading')
def write(self, data):
print('writing')
# Example of type checking
def serialize(obj, stream):
if not isinstance(stream, IStream):
raise TypeError('Expected an IStream')
print('serializing')
# Examples
if __name__ == '__main__':
# Attempt to instantiate ABC directly (doesn't work)
try:
a = IStream()
except TypeError as e:
print(e)
# Instantiation of a concrete implementation
a = SocketStream()
a.read()
a.write('data')
# Passing to type-check function
serialize(None, a)
# Attempt to pass a file-like object to serialize (fails)
import sys
try:
serialize(None, sys.stdout)
except TypeError as e:
print(e)
# Register file streams and retry
import io
IStream.register(io.IOBase)
serialize(None, sys.stdout)
| [
"xuyuchends@163.com"
] | xuyuchends@163.com |
9769824aa87397a44d261ff48b366b0009967fbf | fa6fa9e154a205d575eda6615e8b62f4cce77a3d | /office365/sharepoint/permissions/permission_kind.py | 699fa02451b6d9b81eba7759583a17ab35a9b527 | [
"MIT"
] | permissive | beliaev-maksim/Office365-REST-Python-Client | 7f94b7b40227de1192bfc0cb325107482caf443c | b2fd54701d83cc91eb5ba3a0ec352a93ded24885 | refs/heads/master | 2023-08-14T20:47:51.972883 | 2021-09-05T12:44:47 | 2021-09-05T12:44:47 | 283,984,055 | 0 | 0 | MIT | 2020-07-31T08:30:48 | 2020-07-31T08:30:48 | null | UTF-8 | Python | false | false | 1,026 | py | class PermissionKind:
"""Specifies permissions that are used to define user roles."""
def __init__(self):
pass
EmptyMask = 0
ViewListItems = 1
AddListItems = 2
EditListItems = 3
DeleteListItems = 4
ApproveItems = 5
OpenItems = 6
ViewVersions = 7
DeleteVersions = 8
CancelCheckout = 9
ManagePersonalViews = 10
ManageLists = 12
ViewFormPages = 13
AnonymousSearchAccessList = 14
Open = 17
ViewPages = 18
AddAndCustomizePages = 19
ApplyThemeAndBorder = 20
ApplyStyleSheets = 21
ViewUsageData = 22
CreateSSCSite = 23
ManageSubwebs = 24
CreateGroups = 25
ManagePermissions = 26
BrowseDirectories = 27
BrowseUserInfo = 28
AddDelPrivateWebParts = 29
UpdatePersonalWebParts = 30
ManageWeb = 31
AnonymousSearchAccessWebLists = 32
UseClientIntegration = 37
UseRemoteAPIs = 38
ManageAlerts = 39
CreateAlerts = 40
EditMyUserInfo = 41
EnumeratePermissions = 63
FullMask = 65
| [
"vvgrem@gmail.com"
] | vvgrem@gmail.com |
4e725d904e1ecd922e7eecc433b83b4e5488f4c2 | cb882d5bc1a22b6d22a2d18a4ece8ec362f81b4b | /app/migrations/0004_product_user.py | 7ff7069f9a12115a709a07f6048c77bcabccd0b3 | [] | no_license | Vazimax/simple_djangoapp | 076806ac70f136f2a964db068b7b904f9429073e | 08e0f3f4b3830c6b1cde6bea69849775bd7ba578 | refs/heads/main | 2023-05-24T09:08:30.191567 | 2021-06-08T14:03:31 | 2021-06-08T14:03:31 | 375,030,411 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 929 | py | # Generated by Django 3.1.7 on 2021-06-07 19:22
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('app', '0003_female_male'),
]
operations = [
migrations.CreateModel(
name='Product',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=100)),
],
),
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('products', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='app.product')),
],
),
]
| [
"aboubakr.elhabti@gmail.com"
] | aboubakr.elhabti@gmail.com |
761e15410951d9ff18fef66225b3b1ca4c4188ec | 940a0f48027eefbfe028f2116aeb9702e7122f05 | /setup.py | 094555fbf31f039650d3781c5d1435c39416c7e2 | [] | no_license | umeboshi2/useless | 1d0f67335fad0897619859a2095fd32e649b0994 | 630c7491cfb7a70765878779f8496046d6ac18df | refs/heads/master | 2021-01-02T08:21:14.679866 | 2013-01-22T21:44:00 | 2013-01-22T21:44:00 | 7,497,326 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 722 | py | import sys
from distutils.core import setup
PACKAGES = ['base', 'debian', 'sqlgen', 'db', 'kdebase', 'kdedb']
package = None
if sys.argv[1] in PACKAGES:
package = sys.argv[1]
del sys.argv[1]
pd = {'' : 'src'}
if package is not None:
packages = ['useless/'+package]
if package == 'base':
packages = ['useless'] + packages
else:
packages = []
package = 'dummy'
url = 'http://useless.berlios.de'
setup(name='useless-'+package,
version="0.2",
description = 'useless packages and modules for basic stuff',
author='Joseph Rawson',
author_email='umeboshi@gregscomputerservice.com',
url=url,
package_dir = {'' : '.'},
packages = packages
)
| [
"umeboshi@70758ab2-d2f7-0310-a994-9f7f813c4004"
] | umeboshi@70758ab2-d2f7-0310-a994-9f7f813c4004 |
089a86f2514e7fdb5dd0bfdb50fbb19280ef6f2f | d81dc8eda4aed1e66a2275ddd7463eaa90789ff4 | /Gesture Recognition/Video.py | 894fa066f68358ce312358d9fced5003a5219204 | [] | no_license | ai3DVision/BlendedJointAttention | c01b8b6b0c33923e2e7c6719765c427c1c5e5439 | 2bf9445d7749c9f138df950aea9dd101c8713ff4 | refs/heads/master | 2020-03-15T23:42:18.307922 | 2016-08-25T13:26:53 | 2016-08-25T13:26:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,336 | py | import cv2
import sys
# Defining cascade variables
faceCascade1 = cv2.CascadeClassifier('../haarcascades/haarcascade_frontalface_alt2.xml')
nosecascade = cv2.CascadeClassifier('../haarcascades/haarcascade_mcs_nose.xml')
# Video capture via webcam
cam = cv2.VideoCapture(-1)
cam.set(3,640)
cam.set(4,480)
video_capture = cam
a = list()
b = list()
while True:
# Capture frame-by-frame
ret, frame = video_capture.read()
if ret:
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces1 = faceCascade1.detectMultiScale(gray, 1.1, 5)
# Draw a rectangle around the faces
for (x, y, w, h) in faces1:
cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 0, 255), 2)
roi_gray = gray[y:y+h, x:x+w]
roi_color = frame[y:y+h, x:x+w]
nose = nosecascade.detectMultiScale(roi_gray,1.3,5)
for (ex,ey,ew,eh) in nose:
cv2.rectangle(roi_color,(ex,ey),(ex+ew,ey+eh),(0,0,0),2)
a.append(ex+ew/2)
b.append(ey+eh/2)
# Display the resulting frame
for i in range(len(a)):
cv2.circle(frame, (x+a[i],y+b[i]),1,(128,0,127),2)
cv2.imshow('Video', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# Release video capture
video_capture.release()
cv2.destroyAllWindows()
| [
"agarwalsoumitra1504@gmail.com"
] | agarwalsoumitra1504@gmail.com |
c9ee57c779cbb050035f866898ceb6d98ee4abdf | bac7a7507933ac5bb38b41bbe2a587764da3cf94 | /snappy_wrappers/wrappers/erds_sv2/merge_genotypes/wrapper.py | 96047b855627a47db6f2ddbbcfd18ed4f895f093 | [
"MIT"
] | permissive | Pregelnuss/snappy-pipeline | 923b0f36117a2f55ee52f9a8564ed3bb82a8be16 | 31200eba84bff8e459e9e210d6d95e2984627f5c | refs/heads/master | 2023-06-19T07:24:04.736033 | 2021-05-27T07:24:05 | 2021-05-27T07:24:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 873 | py | # -*- coding: utf-8 -*-
"""Wrapper for running ERDS+SV2 merge genotypes step
"""
from snakemake.shell import shell
__author__ = "Manuel Holtgrewe"
__email__ = "manuel.holtgrewe@bihealth.de"
shell(
r"""
# -----------------------------------------------------------------------------
# Redirect stderr to log file by default and enable printing executed commands
exec &> >(tee -a "{snakemake.log}")
set -x
# -----------------------------------------------------------------------------
bcftools merge \
-m id \
-O z \
-o {snakemake.output.vcf} \
{snakemake.input}
$(which tabix) --version
$(which tabix) -f {snakemake.output.vcf}
pushd $(dirname {snakemake.output.vcf})
md5sum $(basename {snakemake.output.vcf}) >$(basename {snakemake.output.vcf}).md5
md5sum $(basename {snakemake.output.vcf}).tbi >$(basename {snakemake.output.vcf}).tbi.md5
"""
)
| [
"manuel.holtgrewe@bihealth.de"
] | manuel.holtgrewe@bihealth.de |
56c7e189b97621aba5e9156b2624bb12ef4c9007 | 0fcc6353edee4eed7a1ea4b1c89a00bfcf03e851 | /PythonFunctions/venv/Scripts/easy_install-3.7-script.py | cc2142893207f4e9d244ff35fbb13add41769553 | [] | no_license | GANESH0080/Python-Practice-Again | 81d8048c23d338a99bb17fa86a9f87b3057bfe52 | 6565911d14a22d0f33a41b417026c31a0a066be5 | refs/heads/master | 2020-09-20T03:40:45.462869 | 2019-11-27T07:19:24 | 2019-11-27T07:19:24 | 224,368,129 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 458 | py | #!D:\PythonPracticeAgain\PythonFunctions\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==39.1.0','console_scripts','easy_install-3.7'
__requires__ = 'setuptools==39.1.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==39.1.0', 'console_scripts', 'easy_install-3.7')()
)
| [
"ganusalunkhe@gmail.com"
] | ganusalunkhe@gmail.com |
a7386795586d024e974682a2c99a5f99c1659a8f | a5698f82064aade6af0f1da21f504a9ef8c9ac6e | /huaweicloud-sdk-cbr/huaweicloudsdkcbr/v1/model/show_members_detail_request.py | d11649680f2ceb7aa7e1af032ed7496f05cb6aa0 | [
"Apache-2.0"
] | permissive | qizhidong/huaweicloud-sdk-python-v3 | 82a2046fbb7d62810984399abb2ca72b3b47fac6 | 6cdcf1da8b098427e58fc3335a387c14df7776d0 | refs/heads/master | 2023-04-06T02:58:15.175373 | 2021-03-30T10:47:29 | 2021-03-30T10:47:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,919 | py | # coding: utf-8
import pprint
import re
import six
class ShowMembersDetailRequest:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'backup_id': 'str',
'dest_project_id': 'str',
'image_id': 'str',
'status': 'str',
'vault_id': 'str',
'limit': 'int',
'marker': 'str',
'offset': 'int',
'sort': 'str'
}
attribute_map = {
'backup_id': 'backup_id',
'dest_project_id': 'dest_project_id',
'image_id': 'image_id',
'status': 'status',
'vault_id': 'vault_id',
'limit': 'limit',
'marker': 'marker',
'offset': 'offset',
'sort': 'sort'
}
def __init__(self, backup_id=None, dest_project_id=None, image_id=None, status=None, vault_id=None, limit=None, marker=None, offset=None, sort=None):
"""ShowMembersDetailRequest - a model defined in huaweicloud sdk"""
self._backup_id = None
self._dest_project_id = None
self._image_id = None
self._status = None
self._vault_id = None
self._limit = None
self._marker = None
self._offset = None
self._sort = None
self.discriminator = None
self.backup_id = backup_id
if dest_project_id is not None:
self.dest_project_id = dest_project_id
if image_id is not None:
self.image_id = image_id
if status is not None:
self.status = status
if vault_id is not None:
self.vault_id = vault_id
if limit is not None:
self.limit = limit
if marker is not None:
self.marker = marker
if offset is not None:
self.offset = offset
if sort is not None:
self.sort = sort
@property
def backup_id(self):
"""Gets the backup_id of this ShowMembersDetailRequest.
:return: The backup_id of this ShowMembersDetailRequest.
:rtype: str
"""
return self._backup_id
@backup_id.setter
def backup_id(self, backup_id):
"""Sets the backup_id of this ShowMembersDetailRequest.
:param backup_id: The backup_id of this ShowMembersDetailRequest.
:type: str
"""
self._backup_id = backup_id
@property
def dest_project_id(self):
"""Gets the dest_project_id of this ShowMembersDetailRequest.
:return: The dest_project_id of this ShowMembersDetailRequest.
:rtype: str
"""
return self._dest_project_id
@dest_project_id.setter
def dest_project_id(self, dest_project_id):
"""Sets the dest_project_id of this ShowMembersDetailRequest.
:param dest_project_id: The dest_project_id of this ShowMembersDetailRequest.
:type: str
"""
self._dest_project_id = dest_project_id
@property
def image_id(self):
"""Gets the image_id of this ShowMembersDetailRequest.
:return: The image_id of this ShowMembersDetailRequest.
:rtype: str
"""
return self._image_id
@image_id.setter
def image_id(self, image_id):
"""Sets the image_id of this ShowMembersDetailRequest.
:param image_id: The image_id of this ShowMembersDetailRequest.
:type: str
"""
self._image_id = image_id
@property
def status(self):
"""Gets the status of this ShowMembersDetailRequest.
:return: The status of this ShowMembersDetailRequest.
:rtype: str
"""
return self._status
@status.setter
def status(self, status):
"""Sets the status of this ShowMembersDetailRequest.
:param status: The status of this ShowMembersDetailRequest.
:type: str
"""
self._status = status
@property
def vault_id(self):
"""Gets the vault_id of this ShowMembersDetailRequest.
:return: The vault_id of this ShowMembersDetailRequest.
:rtype: str
"""
return self._vault_id
@vault_id.setter
def vault_id(self, vault_id):
"""Sets the vault_id of this ShowMembersDetailRequest.
:param vault_id: The vault_id of this ShowMembersDetailRequest.
:type: str
"""
self._vault_id = vault_id
@property
def limit(self):
"""Gets the limit of this ShowMembersDetailRequest.
:return: The limit of this ShowMembersDetailRequest.
:rtype: int
"""
return self._limit
@limit.setter
def limit(self, limit):
"""Sets the limit of this ShowMembersDetailRequest.
:param limit: The limit of this ShowMembersDetailRequest.
:type: int
"""
self._limit = limit
@property
def marker(self):
"""Gets the marker of this ShowMembersDetailRequest.
:return: The marker of this ShowMembersDetailRequest.
:rtype: str
"""
return self._marker
@marker.setter
def marker(self, marker):
"""Sets the marker of this ShowMembersDetailRequest.
:param marker: The marker of this ShowMembersDetailRequest.
:type: str
"""
self._marker = marker
@property
def offset(self):
"""Gets the offset of this ShowMembersDetailRequest.
:return: The offset of this ShowMembersDetailRequest.
:rtype: int
"""
return self._offset
@offset.setter
def offset(self, offset):
"""Sets the offset of this ShowMembersDetailRequest.
:param offset: The offset of this ShowMembersDetailRequest.
:type: int
"""
self._offset = offset
@property
def sort(self):
"""Gets the sort of this ShowMembersDetailRequest.
:return: The sort of this ShowMembersDetailRequest.
:rtype: str
"""
return self._sort
@sort.setter
def sort(self, sort):
"""Sets the sort of this ShowMembersDetailRequest.
:param sort: The sort of this ShowMembersDetailRequest.
:type: str
"""
self._sort = sort
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ShowMembersDetailRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"hwcloudsdk@huawei.com"
] | hwcloudsdk@huawei.com |
fb9b2f8ef498d54f9d568b07bf798b3b11b828e0 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/1/usersdata/66/180/submittedfiles/formula.py | e164e5a9a7884303cf825c60c1229245d2385105 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 176 | py | # -*- coding: utf-8 -*-
from __future__ import division
p=input("digite p")
i=input("digite i")
n=input("digite n")
v=(P*((1+I)**N)-1)/T
print ( "o valor de v eh: %.2f" %V)
| [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
d95fc2686d35f61aa399c1c3f823176e317ea474 | 699a43917ce75b2026a450f67d85731a0f719e01 | /12_int_to_roman/interger_to_roman.py | e51bac0b5257bb65541b113406b145206f685e01 | [] | no_license | wusanshou2017/Leetcode | 96ab81ae38d6e04739c071acfc0a5f46a1c9620b | c4b85ca0e23700b84e4a8a3a426ab634dba0fa88 | refs/heads/master | 2021-11-16T01:18:27.886085 | 2021-10-14T09:54:47 | 2021-10-14T09:54:47 | 107,402,187 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 553 | py | class Solution:
def __init__(self):
self.dic_roman={"I":1,"V":5,"X":10,"L":50,"C":100,"D":500,"M":1000}
self.dic_int2roman={ value:key for key,value in self.dic_roman.items()}
def int2roman(self,target):
res =""
if target//1000>0:
m=target//1000
target=target-m*1000
res+="M"*m;
if target>=300:
if target>=500:
target=target-500
c=target//100
res+="D"+c*"C"
else:
target=target-()
| [
"252652905@qq.com"
] | 252652905@qq.com |
bb3c3e3539618a84d358090da571b392a03cf637 | 6730aab6ed416937cc1ed96ae87f86d7761c8129 | /src/calc_parse.py | 9dc2b8937e0ec016416c724205099b4f3f25e54b | [] | no_license | vrthra/miner | 3c4adcd1db9b5583354d665ec169c47c38f1faa1 | 4b55999eedb97c607024ff04575b0f09b499d58a | refs/heads/master | 2020-05-02T22:54:43.393084 | 2019-05-02T17:07:37 | 2019-05-02T17:07:37 | 178,266,349 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,783 | py | #!/usr/bin/env python3
import string
from helpers import scope
def parse_num(s,i):
n = ''
while s[i:] and s[i].in_(list(string.digits)):
with scope('while_1', 0):
n += s[i]
i = i +1
return i,n
def parse_paren(s, i):
assert s[i] == '('
i, v = parse_expr(s, i+1)
if s[i:] == '':
with scope('if_0', 0):
raise Exception(s, i)
assert s[i] == ')'
return i+1, v
def parse_expr(s, i = 0):
expr = []
while s[i:]:
with scope('while_2', 0):
c = s[i]
if c.in_(list(string.digits)):
with scope('if_1', 0):
i,num = parse_num(s,i)
expr.append(num)
elif c.in_(['+', '-', '*', '/']):
with scope('if_1', 1):
expr.append(c)
i = i + 1
elif c == '(':
with scope('if_1', 2):
i, cexpr = parse_paren(s, i)
expr.append(cexpr)
elif c == ')':
with scope('if_1', 3):
return i, expr
else:
with scope('if_1', 4):
raise Exception(s,i)
return i, expr
import json
import sys
import Tracer
if __name__ == "__main__":
mystring = sys.argv[1] if len(sys.argv) > 1 else "(25-1/(2+3))*100/3"
restrict = {'methods':['parse_num', 'parse_paren', 'parse_expr']}
with Tracer.Tracer(mystring, restrict) as tracer:
parse_expr(tracer())
assert tracer.inputstr.comparisons
print(json.dumps({
'comparisons':Tracer.convert_comparisons(tracer.inputstr.comparisons),
'method_map': Tracer.convert_method_map(tracer.method_map),
'inputstr': str(tracer.inputstr)}))
| [
"rahul@gopinath.org"
] | rahul@gopinath.org |
2ee60026988f1846551704ad15e993f1cd397d43 | 6aa7e203f278b9d1fd01244e740d5c944cc7c3d3 | /airflow/providers/docker/hooks/docker.py | bae0e7f5a046ff578796c98cc623b63dec42f3c3 | [
"Apache-2.0",
"BSD-3-Clause",
"MIT",
"Python-2.0"
] | permissive | laserpedro/airflow | 83fc991d91749550b151c81876d9e7864bff3946 | a28afa8172489e41ecf7c381674a0cb91de850ff | refs/heads/master | 2023-01-02T04:55:34.030935 | 2020-10-24T15:55:11 | 2020-10-24T15:55:11 | 285,867,990 | 1 | 0 | Apache-2.0 | 2020-08-07T15:56:49 | 2020-08-07T15:56:49 | null | UTF-8 | Python | false | false | 3,265 | py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from typing import Optional
from docker import APIClient
from docker.errors import APIError
from airflow.exceptions import AirflowException
from airflow.hooks.base_hook import BaseHook
from airflow.utils.log.logging_mixin import LoggingMixin
class DockerHook(BaseHook, LoggingMixin):
"""
Interact with a private Docker registry.
:param docker_conn_id: ID of the Airflow connection where
credentials and extra configuration are stored
:type docker_conn_id: str
"""
def __init__(self,
docker_conn_id='docker_default',
base_url: Optional[str] = None,
version: Optional[str] = None,
tls: Optional[str] = None
) -> None:
super().__init__()
if not base_url:
raise AirflowException('No Docker base URL provided')
if not version:
raise AirflowException('No Docker API version provided')
conn = self.get_connection(docker_conn_id)
if not conn.host:
raise AirflowException('No Docker registry URL provided')
if not conn.login:
raise AirflowException('No username provided')
extra_options = conn.extra_dejson
self.__base_url = base_url
self.__version = version
self.__tls = tls
if conn.port:
self.__registry = "{}:{}".format(conn.host, conn.port)
else:
self.__registry = conn.host
self.__username = conn.login
self.__password = conn.password
self.__email = extra_options.get('email')
self.__reauth = extra_options.get('reauth') != 'no'
def get_conn(self) -> APIClient:
client = APIClient(
base_url=self.__base_url,
version=self.__version,
tls=self.__tls
)
self.__login(client)
return client
def __login(self, client) -> None:
self.log.debug('Logging into Docker registry')
try:
client.login(
username=self.__username,
password=self.__password,
registry=self.__registry,
email=self.__email,
reauth=self.__reauth
)
self.log.debug('Login successful')
except APIError as docker_error:
self.log.error('Docker registry login failed: %s', str(docker_error))
raise AirflowException(f'Docker registry login failed: {docker_error}')
| [
"noreply@github.com"
] | laserpedro.noreply@github.com |
5e106deb5aff07fd69b809c2d354e681e3d84798 | f13acd0d707ea9ab0d2f2f010717b35adcee142f | /AtCoder_Virtual_Contest/tessoku-book/ec/main.py | 09c1be3745ae6e29b841ff506af698a7664a78cd | [
"CC0-1.0",
"LicenseRef-scancode-public-domain"
] | permissive | KATO-Hiro/AtCoder | 126b9fe89fa3a7cffcbd1c29d42394e7d02fa7c7 | bf43320bc1af606bfbd23c610b3432cddd1806b9 | refs/heads/master | 2023-08-18T20:06:42.876863 | 2023-08-17T23:45:21 | 2023-08-17T23:45:21 | 121,067,516 | 4 | 0 | CC0-1.0 | 2023-09-14T21:59:38 | 2018-02-11T00:32:45 | Python | UTF-8 | Python | false | false | 3,200 | py | # -*- coding: utf-8 -*-
import random
import string
import traceback
# See:
# https://atcoder.jp/contests/abc284/submissions/37841742
class RollingHashWithRange:
def __init__(self, parent, left, right) -> None:
self.parent = parent
self.left = left
self.right = right
def __getitem__(self, key):
if key > self.right - self.left:
traceback.print_exc()
raise Exception("index out of range")
return self.get(self.left, self.left + key)
# Overall hash value
def get(self, left, right):
mod = RollingHash.mod
return (
self.parent.hash[right]
- self.parent.hash[left] * self.parent.power[right - left]
) % mod
def __len__(self):
return self.right - self.left
def __eq__(self, other):
return self.get(self.left, self.right) == other.get(other.left, other.right)
# Longest Common Prefix
def __lt__(self, other):
length = min(len(self), len(other))
if self[length] == other[length]:
return len(self) < len(other)
left, right = 0, length
while True:
mid = (left + right) // 2
if left == right:
return (
self.parent.s[self.left + right - 1]
< other.parent.s[other.left + right - 1]
)
if self[mid] != other[mid]:
right = mid
else:
left = mid + 1
right = right
class RollingHash:
base = 30
mod = 10**9 + 9
@classmethod
def config(cls, base, mod) -> None:
RollingHash.base = base
RollingHash.mod = mod
def __init__(self, s) -> None:
mod = RollingHash.mod
base = RollingHash.base
self.power = power = [1] * (len(s) + 1)
self.s = s
size = len(s)
self.hash = hash = [0] * (size + 1)
value = 0
for i in range(size):
hash[i + 1] = value = (value * base + ord(s[i])) % mod
value = 1
for i in range(size):
power[i + 1] = value = value * base % mod
def get(self, left, right) -> RollingHashWithRange:
return RollingHashWithRange(self, left, right)
def get_random_name(n):
rand_list = [random.choice(string.ascii_letters + string.digits) for i in range(n)]
return "".join(rand_list)
def test():
RollingHash.config(100, 10**9 + 7)
for i in range(100):
n = 5
x, y = get_random_name(n), get_random_name(n)
y = x
if (x < y) != (RollingHash(x).get(0, n) < RollingHash(y).get(0, n)):
print(
"No", x < y, RollingHash(x).get(0, n) < RollingHash(y).get(0, n), x, y
)
def main():
import sys
input = sys.stdin.readline
n, q = map(int, input().split())
s = input().rstrip()
rh1 = RollingHash(s)
rh2 = RollingHash(s[::-1])
for _ in range(q):
li, ri = map(int, input().split())
li -= 1
if rh1.get(li, ri) == rh2.get(n - ri, n - li):
print("Yes")
else:
print("No")
if __name__ == "__main__":
main()
| [
"k.hiro1818@gmail.com"
] | k.hiro1818@gmail.com |
87f6e6bf13f60d03cd3b263df93b4e6feaef5cdb | 847273de4b1d814fab8b19dc651c651c2d342ede | /.history/Sudoku_II_008_20180622142549.py | a755b1bc64831da70f2eb0c38aca219723cf4bcb | [] | no_license | Los4U/sudoku_in_python | 0ba55850afcffeac4170321651620f3c89448b45 | 7d470604962a43da3fc3e5edce6f718076197d32 | refs/heads/master | 2020-03-22T08:10:13.939424 | 2018-07-04T17:21:13 | 2018-07-04T17:21:13 | 139,749,483 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 7,605 | py | from random import randint
import copy
sudoku1 = [
[5, 9, 8, 6, 1, 2, 3, 4, 7],
[2, 1, 7, 9, 3, 4, 8, 6, 5],
[6, 4, 3, 5, 8, 7, 1, 2, 9],
[1, 6, 5, 4, 9, 8, 2, 7, 3],
[3, 2, 9, 7, 6, 5, 4, 1, 8],
[7, 8, 4, 3, 2, 1, 5, 9, 6],
[8, 3, 1, 2, 7, 6, 9, 5, 4],
[4, 7, 2, 8, 5, 9, 6, 3, 1],
[9, 5, 6, 1, 4, 3, 7, " ", " "]
]
sudoku1solved = [
[5, 9, 8, 6, 1, 2, 3, 4, 7],
[2, 1, 7, 9, 3, 4, 8, 6, 5],
[6, 4, 3, 5, 8, 7, 1, 2, 9],
[1, 6, 5, 4, 9, 8, 2, 7, 3],
[3, 2, 9, 7, 6, 5, 4, 1, 8],
[7, 8, 4, 3, 2, 1, 5, 9, 6],
[8, 3, 1, 2, 7, 6, 9, 5, 4],
[4, 7, 2, 8, 5, 9, 6, 3, 1],
[9, 5, 6, 1, 4, 3, 7, 8, 2]
]
sudoku2 = [
[9, 8, 7, 4, 3, 2, 5, 6, 1],
[2, 4, 3, 5, 1, 6, 8, 7, 9],
[5, 6, 1, 7, 9, 8, 4, 3, 2],
[3, 9, 5, 6, 4, 7, 2, 1, 8],
[8, 2, 4, 3, 5, 1, 6, 9, 7],
[1, 7, 6, 2, 8, 9, 3, 4, 5],
[7, 1, 2, 8, 6, 3, 9, 5, 4],
[4, 3, 8, 9, 7, 5, 1, 2, 6],
[' ', 5, ' ', ' ', 2, ' ', 7, ' ', ' ']
]
sudoku2solved = [
[9, 8, 7, 4, 3, 2, 5, 6, 1],
[2, 4, 3, 5, 1, 6, 8, 7, 9],
[5, 6, 1, 7, 9, 8, 4, 3, 2],
[3, 9, 5, 6, 4, 7, 2, 1, 8],
[8, 2, 4, 3, 5, 1, 6, 9, 7],
[1, 7, 6, 2, 8, 9, 3, 4, 5],
[7, 1, 2, 8, 6, 3, 9, 5, 4],
[4, 3, 8, 9, 7, 5, 1, 2, 6],
[6, 5, 9, 1, 2, 4, 7, 8, 3]
]
sudoku3 = [
[1, 2, ' ', ' ', ' ', 4, ' ', 8, 6],
[5, ' ', ' ', ' ', ' ', 9, ' ', ' ', 4],
[' ', ' ', ' ', ' ', 3, ' ', 2, ' ', ' '],
[2, ' ', 6, ' ', 1, ' ', 4, ' ', 3],
[' ', ' ', ' ', 7, ' ', 6, ' ', ' ', ' '],
[' ', 7, ' ', ' ', 8, ' ', 9, ' ', 1],
[' ', ' ', ' ', ' ', ' ', ' ', 8, 3, ' '],
[3, ' ', 7, 8, ' ', 2, 1, ' ', ' '],
[' ', 4, 1, ' ', ' ', 7, ' ', 9, ' '],
]
sudoku3solved = [
[1, 2, 9, 5, 7, 4, 3, 8, 6],
[5, 3, 8, 6, 2, 9, 7, 1, 4],
[7, 6, 4, 1, 3, 8, 2, 5, 9],
[2, 8, 6, 9, 1, 5, 4, 7, 3],
[9, 1, 3, 7, 4, 6, 5, 2, 8],
[4, 7, 5, 2, 8, 3, 9, 6, 1],
[6, 5, 2, 4, 9, 1, 8, 3, 7],
[3, 9, 7, 8, 6, 2, 1, 4, 5],
[8, 4, 1, 3, 5, 7, 6, 9, 2],
]
def giveHint(emptyS, solvedS):
i = 0
for x in range(0, 9):
for y in range(0, 9):
if emptyS[x][y] == " ":
emptyS[x][y] = solvedS[x][y]
i = 1
break
if i == 1:
break
def printSudoku():
i = 0
while i < 10:
if i == 0:
print(" 1 2 3 4 5 6 7 8 9")
print(" -------------------------")
elif i == 3 or i == 6 or i == 9:
print(" -------------------------")
line = "|"
if i < 9:
print(' {2} {1} {0[0]} {0[1]} {0[2]} {1} {0[3]} {0[4]} {0[5]} {1} {0[6]} {0[7]} {0[8]} {1}'.format(sudoku[i], line, i+1))
i = i + 1
print(" ")
print(" %@@@@@@@ @@@ @@@ (@@@@@@@@@ ,@@@@2@@@@@ @@@, /@@@/ @@@, @@@ ")
print(" @@@* @@@ @@@ (@@( /@@@# .@@@% (@@@ @@@, @@@% @@@, @@@. ")
print(" @@@& @@@ @@@ (@@( @@@* @@@% #@@% @@@,.@@@. @@@, @@@. ")
print(" ,@@@@@@* @@@ @@@ (@@( (@@% .@@@* ,@@@ @@@%@@% @@@, @@@. ")
print(" /@@@@@# @@@ @@@ (@@( (@@% .@@@* ,@@@ @@@,@@@( @@@, @@@. ")
print(" *@@@. @@@ .@@& (@@( @@@. @@@% &@@( @@@, &@@@. @@@* .@@@. ")
print(" &, &@@@ #@@@. ,@@@, (@@( ,&@@@* ,@@@& .@@@@ @@@, (@@@/ #@@@* @@@# ")
print(",@@@@@@@@( (@@@@@@@@% (@@@@@@@@@( #@@@@@@@@@, @@@, ,@@@% ,@@@@@@@@@. \n ")
print("To start game input:")
print(" r - to load random puzzle:")
print(" 1 - to load chart nr 1:")
print(" 2 - to load chart nr 2:")
print(" 3 - to load chart nr 3:")
choice = input("Input here: ")
print("\n\n\n\n")
s = 0
if choice == "R" or choice == "r":
listaSudoku = [sudoku1, sudoku2, sudoku3]
sudoku_number = randint(0, 2)
print("Plansza nr:", sudoku_number)
s = sudoku_number
sudoku = copy.deepcopy(listaSudoku[sudoku_number])
elif int(choice) == 1:
s = 1
sudoku = copy.deepcopy(sudoku1)
elif int(choice) == 2:
s = 2
sudoku = copy.deepcopy(sudoku2)
elif int(choice) == 3:
s = 3
sudoku = copy.deepcopy(sudoku3)
while True: # prints Sudoku until is solved
# print("Your sudoku to solve:")
printSudoku()
print("\nInput 3 numbers in format a b c, np. 4 5 8")
print(" a - row number")
print(" b - column number ")
print(" c - value")
# vprint(" r - reset chart to start\n ")
x = input("Input a b c: ")
print("")
numbers = " 0123456789" # conditions of entering the numbers !
if (len(x) != 5) or (str(x[0]) not in numbers) or (str(x[2]) not in numbers) or (
str(x[4]) not in numbers) or (str(x[1]) != " ") or (str(x[3]) != " "):
if x == "r": # reset
if s == 1:
sudoku = copy.deepcopy(sudoku1)
elif s == 2:
sudoku = copy.deepcopy(sudoku2)
elif s == 3:
sudoku = copy.deepcopy(sudoku3)
elif x == "h": # show:
print()
if s == 1:
giveHint(sudoku, sudoku1solved)
elif s == 2:
giveHint(sudoku, sudoku2solved)
elif s == 3:
giveHint(sudoku, sudoku3solved)
else:
print("Error - wrong number format \n ")
continue
else:
sudoku[int(x[0])-1][int(x[2])-1] = int(x[4])
column1 = 0
column2 = 0
try: # check if sudoku is solved
i = 0
list = []
while i < 9: # check are all column == 45
column = 0
for item in sudoku:
column = column + item[i]
list.append(column)
i += 1
is45 = 0 # check if sudoku is solved
for listElement in list:
if listElement == 45:
is45 = is45 + 1
#
i = 0
for item in sudoku:
if sum(item) == 45 and is45 == 9:
i = i + 1
if i == 9:
printSudoku()
print(" ")
print("/%%. ,%%/ ,%@@@&/ .%%, /%# /%% *%%* /%/ #%/ %%%* (%. ")
print(" @@@, .@@& &@@@/,*@@@@ /@@# &@@. ,@@# @@@@ ,@@, .@@@ @@@@% @@( ")
print(" %@@.,@@& &@@ @@@ /@@# &@@. @@@. /@@@@# @@@ .@@@ @@,@@@ @@( ")
print(" @@@@@# .@@@ @@@ /@@# &@@. ,@@% @@/&@@. .@@, .@@@ @@, @@@ @@( ")
print(" %@@% .@@@ @@@ /@@# &@@. %@@ ,@@ .@@& %@@ .@@@ @@, @@@ @@( ")
print(" *@@* @@@. .@@& *@@& @@@ @@/&@* (@@,@@* .@@@ @@, @@@@@( ")
print(" *@@* @@@&*,/@@@@ #@@@(,/@@@, &@@@@ @@@@& .@@@ @@, @@@@( ")
print(
" .%%. /&@@@%, *&@@@%. #%%, *%%# #%/ %% (%% ")
print(" ")
'''
print(" @@@@@@@@@@@@@@@@@@@@@@@@@@@@@")
print(" @@@@@@@@@@ YOU WIN @@@@@@@@@@")
print(" @@@@@@@@@@@@@@@@@@@@@@@@@@@@@")
'''
break
except TypeError:
print()
| [
"inz.kamil.wos@gmail.com"
] | inz.kamil.wos@gmail.com |
da80db2eab0104cf95159207871265764239feb7 | 67d8173a716da10a7350213d98938aae9f2115ce | /ProgrammingCourses/CS61A/lab/lab09/tests/split-at.py | 8aeb6433d36caff342936185c794ff632ff506c2 | [] | no_license | jxie0755/Learning_Python | 94490d41bdf93acf8396f843328e38b6da310b0f | 143422321cbc3715ca08f6c3af8f960a55887ced | refs/heads/master | 2021-11-02T22:47:35.790239 | 2021-09-26T04:26:23 | 2021-09-26T04:26:23 | 101,445,132 | 0 | 2 | null | 2019-02-19T15:48:44 | 2017-08-25T22:00:16 | Python | UTF-8 | Python | false | false | 1,127 | py | test = {
"name": "split-at",
"points": 0,
"suites": [
{
"type": "scheme",
"scored": True,
"setup": """
scm> (load 'lab09)
scm> (load 'lab09_extra)
""",
"cases": [
{
"code": """
scm> (car (split-at '(1 2 3 4 5) 3))
(1 2 3)
""",
"hidden": False
},
{
"code": """
scm> (cdr (split-at '(1 2 3 4 5) 3))
(4 5)
""",
"hidden": False
},
{
"code": """
scm> (car (split-at '(1 2 3 4 5) 10))
(1 2 3 4 5)
""",
"hidden": False
},
{
"code": """
scm> (cdr (split-at '(1 2 3 4 5) 10))
()
""",
"hidden": False
},
{
"code": """
scm> (car (split-at '(0 1 1 2 3) 0))
()
""",
"hidden": False
},
{
"code": """
scm> (cdr (split-at '(0 1 1 2 3) 0))
(0 1 1 2 3)
""",
"hidden": False
},
]
}
]
}
| [
"30805062+jxie0755@users.noreply.github.com"
] | 30805062+jxie0755@users.noreply.github.com |
888b859b9f5faab436ba29ab2a12e06acbd44125 | 72dc7d124cdac8f2dcab3f72e95e9a646154a6a0 | /byceps/services/news/models/channel.py | 00229d0acddd28bb9c310a03e564d0e9b3135b06 | [
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause"
] | permissive | m-ober/byceps | e6569802ee76e8d81b892f1f547881010359e416 | 4d0d43446f3f86a7888ed55395bc2aba58eb52d5 | refs/heads/master | 2020-11-30T23:31:33.944870 | 2020-02-12T23:53:55 | 2020-02-12T23:56:04 | 40,315,983 | 0 | 0 | null | 2015-08-06T16:41:36 | 2015-08-06T16:41:36 | null | UTF-8 | Python | false | false | 1,022 | py | """
byceps.services.news.models.channel
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:Copyright: 2006-2020 Jochen Kupperschmidt
:License: Modified BSD, see LICENSE for details.
"""
from ....database import db
from ....typing import BrandID
from ....util.instances import ReprBuilder
from ..transfer.models import ChannelID
class Channel(db.Model):
"""A channel to which news items can be published."""
__tablename__ = 'news_channels'
id = db.Column(db.UnicodeText, primary_key=True)
brand_id = db.Column(db.UnicodeText, db.ForeignKey('brands.id'), index=True, nullable=False)
url_prefix = db.Column(db.UnicodeText, nullable=False)
def __init__(
self, channel_id: ChannelID, brand_id: BrandID, url_prefix: str
) -> None:
self.id = channel_id
self.brand_id = brand_id
self.url_prefix = url_prefix
def __repr__(self) -> str:
return ReprBuilder(self) \
.add_with_lookup('id') \
.add('brand', self.brand_id) \
.build()
| [
"homework@nwsnet.de"
] | homework@nwsnet.de |
b0f5e5106a95baf291b263f728fbb921336b5e00 | 512f48fdcfa78e322526cf47163110009b84bf73 | /test/test_update_settings.py | 86dfd6e0e237c110a37851d41b45c214ddc85ca8 | [
"MIT"
] | permissive | confluentinc/vm-console-client-python | 9a0f540c0113acf68ee9dc914715bc255e4d99f4 | ccbd944a0e0333c73e098b769fe4c82755d29874 | refs/heads/master | 2023-07-18T10:33:58.909287 | 2021-09-02T20:52:20 | 2021-09-02T20:52:20 | 402,559,283 | 0 | 0 | MIT | 2021-09-02T20:49:56 | 2021-09-02T20:49:56 | null | UTF-8 | Python | false | false | 851 | py | # coding: utf-8
"""
Python InsightVM API Client
OpenAPI spec version: 3
Contact: support@rapid7.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import swagger_client
from swagger_client.models.update_settings import UpdateSettings # noqa: E501
from swagger_client.rest import ApiException
class TestUpdateSettings(unittest.TestCase):
"""UpdateSettings unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testUpdateSettings(self):
"""Test UpdateSettings"""
# FIXME: construct object with mandatory attributes with example values
# model = swagger_client.models.update_settings.UpdateSettings() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
"zachary_youtz@rapid7.com"
] | zachary_youtz@rapid7.com |
99c3e5aa620113bd9066d530f9fa040ebaa93d0b | 7136e5242793b620fa12e9bd15bf4d8aeb0bfe7a | /examples/adspygoogle/dfp/v201103/update_ad_units.py | e6cac253cc32b39005e61ae41624aad51c441c81 | [
"Apache-2.0"
] | permissive | hockeyprincess/google-api-dfp-python | 534519695ffd26341204eedda7a8b50648f12ea9 | efa82a8d85cbdc90f030db9d168790c55bd8b12a | refs/heads/master | 2021-01-10T10:01:09.445419 | 2011-04-14T18:25:38 | 2011-04-14T18:25:38 | 52,676,942 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,300 | py | #!/usr/bin/python
#
# Copyright 2011 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This code example updates an ad unit by enabling AdSense to the first 500.
To determine which ad units exist, run get_all_ad_units.py or
get_inventory_tree.py."""
__author__ = 'api.sgrinberg@gmail.com (Stan Grinberg)'
# Locate the client library. If module was installed via "setup.py" script, then
# the following two lines are not needed.
import os
import sys
sys.path.append(os.path.join('..', '..', '..', '..'))
# Import appropriate classes from the client library.
from adspygoogle.dfp.DfpClient import DfpClient
# Initialize client object.
client = DfpClient(path=os.path.join('..', '..', '..', '..'))
# Initialize appropriate service. By default, the request is always made against
# the sandbox environment.
inventory_service = client.GetInventoryService(
'https://sandbox.google.com', 'v201103')
# Create statement object to get all ad units.
filter_statement = {'query': 'LIMIT 500'}
# Get ad units by filter.
ad_units = inventory_service.GetAdUnitsByStatement(
filter_statement)[0]['results']
if ad_units:
# Update each local ad unit object by enabling AdSense.
for ad_unit in ad_units:
ad_unit['inheritedAdSenseSettings']['value']['adSenseEnabled'] = 'true'
# Update ad units remotely.
ad_units = inventory_service.UpdateAdUnits(ad_units)
# Display results.
if ad_units:
for ad_units in ad_units:
print ('Ad unit with id \'%s\', name \'%s\', and is AdSense enabled '
'\'%s\' was updated.'
% (ad_unit['id'], ad_unit['name'],
ad_unit['inheritedAdSenseSettings']['value']['adSenseEnabled']))
else:
print 'No ad units were updated.'
else:
print 'No ad units found to update.'
| [
"api.sgrinberg@7990c6e4-1bfd-11df-85e6-9b4bd7dd5138"
] | api.sgrinberg@7990c6e4-1bfd-11df-85e6-9b4bd7dd5138 |
d59a1e58246c961c72fe4f4d523b4df4fa88e7c2 | 87040e6a11f28e9e6bfe19abf2bf912a5c5ea286 | /raccoon_dataset/train.py | 0394928fbd2e0cc153ea193ed052a892fa66b090 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | HoboQian/Deep-Learning | f2b788d64c290ab025ae4e09e1cef494b8204536 | 4d335ffebded266647bd853b138c15d8b9a8694a | refs/heads/master | 2020-03-11T07:24:20.303608 | 2017-12-05T02:27:24 | 2017-12-05T02:27:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,445 | py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Training executable for detection models.
This executable is used to train DetectionModels. There are two ways of
configuring the training job:
1) A single pipeline_pb2.TrainEvalPipelineConfig configuration file
can be specified by --pipeline_config_path.
Example usage:
./train \
--logtostderr \
--train_dir=path/to/train_dir \
--pipeline_config_path=pipeline_config.pbtxt
2) Three configuration files can be provided: a model_pb2.DetectionModel
configuration file to define what type of DetectionModel is being trained, an
input_reader_pb2.InputReader file to specify what training data will be used and
a train_pb2.TrainConfig file to configure training parameters.
Example usage:
./train \
--logtostderr \
--train_dir=path/to/train_dir \
--model_config_path=model_config.pbtxt \
--train_config_path=train_config.pbtxt \
--input_config_path=train_input_config.pbtxt
"""
import functools
import json
import os
import tensorflow as tf
import sys;sys.path.append('../')
sys.path.append('../object_detection/')
sys.path.append('../object_detection/slim')#deployment
from object_detection import trainer
from object_detection.builders import input_reader_builder
from object_detection.builders import model_builder
from object_detection.utils import config_util
tf.logging.set_verbosity(tf.logging.INFO)
flags = tf.app.flags
flags.DEFINE_string('master', '', 'Name of the TensorFlow master to use.')
flags.DEFINE_integer('task', 0, 'task id')
flags.DEFINE_integer('num_clones', 1, 'Number of clones to deploy per worker.')
flags.DEFINE_boolean('clone_on_cpu', False,
'Force clones to be deployed on CPU. Note that even if '
'set to False (allowing ops to run on gpu), some ops may '
'still be run on the CPU if they have no GPU kernel.')
flags.DEFINE_integer('worker_replicas', 1, 'Number of worker+trainer '
'replicas.')
flags.DEFINE_integer('ps_tasks', 0,
'Number of parameter server tasks. If None, does not use '
'a parameter server.')
flags.DEFINE_string('train_dir', '',
'Directory to save the checkpoints and training summaries.')
flags.DEFINE_string('pipeline_config_path', '',
'Path to a pipeline_pb2.TrainEvalPipelineConfig config '
'file. If provided, other configs are ignored')
flags.DEFINE_string('train_config_path', '',
'Path to a train_pb2.TrainConfig config file.')
flags.DEFINE_string('input_config_path', '',
'Path to an input_reader_pb2.InputReader config file.')
flags.DEFINE_string('model_config_path', '',
'Path to a model_pb2.DetectionModel config file.')
FLAGS = flags.FLAGS
def main(_):
assert FLAGS.train_dir, '`train_dir` is missing.'
if FLAGS.task == 0: tf.gfile.MakeDirs(FLAGS.train_dir)
if FLAGS.pipeline_config_path:
configs = config_util.get_configs_from_pipeline_file(
FLAGS.pipeline_config_path)
if FLAGS.task == 0:
tf.gfile.Copy(FLAGS.pipeline_config_path,
os.path.join(FLAGS.train_dir, 'pipeline.config'),
overwrite=True)
else:
configs = config_util.get_configs_from_multiple_files(
model_config_path=FLAGS.model_config_path,
train_config_path=FLAGS.train_config_path,
train_input_config_path=FLAGS.input_config_path)
if FLAGS.task == 0:
for name, config in [('model.config', FLAGS.model_config_path),
('train.config', FLAGS.train_config_path),
('input.config', FLAGS.input_config_path)]:
tf.gfile.Copy(config, os.path.join(FLAGS.train_dir, name),
overwrite=True)
model_config = configs['model']
train_config = configs['train_config']
input_config = configs['train_input_config']
model_fn = functools.partial(
model_builder.build,
model_config=model_config,
is_training=True)
create_input_dict_fn = functools.partial(
input_reader_builder.build, input_config)
env = json.loads(os.environ.get('TF_CONFIG', '{}'))
cluster_data = env.get('cluster', None)
cluster = tf.train.ClusterSpec(cluster_data) if cluster_data else None
task_data = env.get('task', None) or {'type': 'master', 'index': 0}
task_info = type('TaskSpec', (object,), task_data)
# Parameters for a single worker.
ps_tasks = 0
worker_replicas = 1
worker_job_name = 'lonely_worker'
task = 0
is_chief = True
master = ''
if cluster_data and 'worker' in cluster_data:
# Number of total worker replicas include "worker"s and the "master".
worker_replicas = len(cluster_data['worker']) + 1
if cluster_data and 'ps' in cluster_data:
ps_tasks = len(cluster_data['ps'])
if worker_replicas > 1 and ps_tasks < 1:
raise ValueError('At least 1 ps task is needed for distributed training.')
if worker_replicas >= 1 and ps_tasks > 0:
# Set up distributed training.
server = tf.train.Server(tf.train.ClusterSpec(cluster), protocol='grpc',
job_name=task_info.type,
task_index=task_info.index)
if task_info.type == 'ps':
server.join()
return
worker_job_name = '%s/task:%d' % (task_info.type, task_info.index)
task = task_info.index
is_chief = (task_info.type == 'master')
master = server.target
trainer.train(create_input_dict_fn, model_fn, train_config, master, task,
FLAGS.num_clones, worker_replicas, FLAGS.clone_on_cpu, ps_tasks,
worker_job_name, is_chief, FLAGS.train_dir)
if __name__ == '__main__':
tf.app.run()
| [
"lee.x.a90@gmail.com"
] | lee.x.a90@gmail.com |
6e30b513864fb38e0ac1557595002b2ba7e2682e | 09ba5ae2edc51f3fd812b9205188b1b01e6bea77 | /src/CPMel/__init__.py | 3c84ac72d9fa33794d013bdd7ec8fadd8a93a212 | [] | no_license | cpcgskill/Maya_tools | c6a43ad20eab3b97e82c9dfe40a1745b6098e5c4 | 93f9e66e5dc3bb51f33df0615415a56a60613ff1 | refs/heads/main | 2023-02-26T16:20:52.959050 | 2021-01-28T06:12:18 | 2021-01-28T06:12:18 | 325,512,423 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,651 | py | #!/usr/bin/python
# -*-coding:utf-8 -*-
u"""
:创建时间: 2020/5/18 23:57
:作者: 苍之幻灵
:我的主页: https://www.cpcgskill.com
:QQ: 2921251087
:爱发电: https://afdian.net/@Phantom_of_the_Cang
:aboutcg: https://www.aboutcg.org/teacher/54335
:bilibili: https://space.bilibili.com/351598127
* 获得路径模块
* PATH : CPMel所在路径
* MAYAPLUG : CPMel的Maya插件所在路径
* ISDEBUG : 是否处在Debug模式
* 快速入门:
* 导入:
>>> import CPMel.cmds as cc
>>> import CPMel.tool as ctl
* 命令:
* maya.cmds:
>>> import maya.cmds as cmds
>>> cmds.joint()
u"xxx"
* CPMel.cmds
>>> cc.joint()
joint(u"xxx")
* 命令参数转化规则:
* CPObject = str ,Double3 = (x,y,z), Matrix = (x,x,x,..*16)
* 更加方便的创建节点的方法:
>>> cc.createNode.transform()
transform(u"transform")
* mel方法访问:
>>> cc.mel.SmoothSkinWeights()
None
* 事件引擎:
>>> class printDg(cevent.Dg):
... def createNode(self, node):
... print(node)
... def removeNode(self, node):
... print(node)
>>> obj = printDg()
>>> cc.createNode.transform()
transform1 << 打印
transform(u'transform1')
* 工具:
>>> ctl.decode("你好世界")
u'你好世界'
>>> ctl.MayaObjectData(u"time1")
<CPMel.tool.MayaObjectData object at 0x0000000053CB32E8>
>>> ctl.undoBlock(xxx type = func)# Qt撤销的实现
xxx type = func
* 视频版教程: https://www.aboutcg.org/courseDetails/1031/introduce
* 2.5版本更新 :
* 使用了预编译脚本优化了文件体积
* 修复了一些BUG
* 2.6版本更新 :
* 解决了qt错误处理问题
* 错误与mayaplug可以运行多个了
* 实现了相对运行
* 区分debug版与release版
* 去除了static_cmds中无用的注释
* 通过文档注释进行类型指定优化了在pycharm中编写程序的补全效果
* 去除了mayaPlug模块下无用的程序
* 2.7版本更新 :
* 优化了导入实现
* 使用CLI
注意2.7的CLI还不完善将于!!!CPMel3版本稳定CLI功能
"""
from . import initializeMaya
import os
import sys
import maya.cmds
sys.cpmel_data = dict()
MAYAINDEX = int(maya.cmds.about(v=True))
ISDEBUG = False
try:
PATH = os.path.dirname(os.path.abspath(__file__))
if type(PATH) == str:
try:
PATH = PATH.decode("utf8")
except UnicodeDecodeError:
try:
PATH = PATH.decode("gbk")
except UnicodeDecodeError:
try:
PATH = PATH.decode("GB18030")
except UnicodeDecodeError:
try:
PATH = PATH.decode("GB2312")
except UnicodeDecodeError:
PATH = unicode(PATH)
PATH = PATH.encode("utf8").decode("utf8")
except:
PATH = os.path.dirname(os.path.abspath(__file__))
MAYAPLUG = u'%s\\mayaPlug' % PATH
from . import mayaPlug
from . import core
from . import api
from . import cmds
from . import event
from . import ui
from . import tool
# DELETE #
if ISDEBUG:
reload(mayaPlug)
reload(core)
reload(api)
reload(cmds)
reload(event)
reload(ui)
reload(tool)
# \DELETE #
cmds.upcommands()
maya.cmds.pluginInfo(cc=cmds.upcommands)
del maya
if hasattr(sys, "cpmel_data"):
del sys.cpmel_data | [
"www.cpcgskill.com"
] | www.cpcgskill.com |
6b371890aaa3ffd72c30d16c8b286448df3d5f9d | 1fddb12ae9b7db260b9161983a726f1528ece940 | /Part_01/Cap_09/Classes9.7.py | 4b3c0876b7328f323d06eda1b9b9514537c724e0 | [] | no_license | davicosta12/python_work | aecf642bbbf2a007fba159f305b4ab21ff00503f | 5df62d3ced25b7e04cda71846fd07862dda39b4c | refs/heads/master | 2023-04-23T09:49:31.923211 | 2021-04-27T00:31:42 | 2021-04-27T00:31:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,697 | py | class User():
def __init__(self, first_name, last_name, age, sex, situacao, login_attempts):
self.first_name = first_name
self.last_name = last_name
self.age = age
self.sex = sex
self.situacao = situacao
self.login_attempts = login_attempts
def describe_user(self):
print("\n Informações sobre o usuário: " + self.first_name.title())
print("\n Nome: " + self.first_name.title())
print(" Sobrenome: " + self.last_name.title())
print(" Sexo: " + self.sex.title())
print(" Idade: " + str(self.age))
print(" Situação: " + self.situacao.title())
def greet_user(self):
print("\n Boa noite, " + self.first_name.title())
def increment_login_attempts(self):
self.login_attempts = self.login_attempts + 1
def reset_login_attempts(self):
self.login_attempts = 0
class Admin(User):
def __init__(self, first_name, last_name, age, sex, situacao, login_attempts):
super().__init__(first_name, last_name, age, sex, situacao, login_attempts)
self.privileges = ['can add post', 'can delete post', 'can ban user',
'can commit private messages', 'can active anything']
def show_privileges(self):
print(" Privilégios do Admin")
i = 0
for privilegio in self.privileges:
if i == len(self.privileges) - 1:
print("\n " + privilegio + ".")
else:
print("\n " + privilegio + ";")
i = i + 1
instancia_user_davi = Admin('davi', 'silva', 21, 'masculino', 'solteiro', 6)
instancia_user_davi.describe_user()
instancia_user_davi.greet_user()
admin = Admin('davi', 'silva', 21, 'masculino', 'solteiro', 6)
print("\n ------------------------- \n")
admin.show_privileges()
| [
"deadspace24@hotmail.com"
] | deadspace24@hotmail.com |
492e9832c721c5e23652772a639a4d59fc4457f1 | d5af5459d0a68d8934219cdd516a23d73c7c52fb | /labs/greek.py | 308315866e2f731924c244567a5e77a0b20f5e03 | [] | no_license | flathunt/pylearn | 1e5b147924dca792eb1cddbcbee1b8da0fc3d055 | 006f37d67343a0288e7efda359ed9454939ec25e | refs/heads/master | 2022-11-23T23:31:32.016146 | 2022-11-17T08:20:57 | 2022-11-17T08:20:57 | 146,803,991 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 709 | py |
greek = ['Alpha','Beta','Gamma','Delta','Epsilon','Zeta','Eta','Theta',
'Iota','Kappa','Lamda','Mu','Nu','Xi','Omicron','Pi','Rho',
'Final Sigma','Sigma','Tau','Upsilon','Phi','Chi','Psi','Omega']
#Format required:
# The hex value of the character
# The character name (cname), left justified, maximum 12 characters
# A colon separator
# The lowercase Greek character
# The uppercase Greek character
for pos, cname in enumerate(greek, start=0x03B1):
try:
char = chr(pos)
print("{0:#x} {1:^14s} : {2:3s} {3:3s}".format(pos, cname, char, char.upper()))
except UnicodeEncodeError as err:
print (cname, 'unknown')
| [
"porkpie@gmail.com"
] | porkpie@gmail.com |
d15786b8e34263c55d3ab8ca58d919ff88dbdc42 | d22634a6101cafc75dde63f48882e29b0312388c | /master/python/swagger_client/models/fiat_dep_data.py | 06bd3229631ff1bc9dccbb05577149cc1209133d | [
"Apache-2.0"
] | permissive | rajdeep225/plugins | e4cf16bc612ceec160c6ea4bae9ffd47012e0b0a | 6f62aac931e15b844431b08858303cac53b9a5ca | refs/heads/master | 2020-12-24T05:40:05.313004 | 2016-08-21T19:00:00 | 2016-08-21T19:00:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,087 | py | # coding: utf-8
"""
Coinsecure Api Documentation
To generate an API key, please visit <a href='https://coinsecure.in/api' target='_new' class='homeapi'>https://coinsecure.in/api</a>.<br>Guidelines for use can be accessed at <a href='https://api.coinsecure.in/v1/guidelines'>https://api.coinsecure.in/v1/guidelines</a>.<br>Programming Language Libraries for use can be accessed at <a href='https://api.coinsecure.in/v1/code-libraries'>https://api.coinsecure.in/v1/code-libraries</a>.
OpenAPI spec version: beta
Generated by: https://github.com/swagger-api/swagger-codegen.git
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from pprint import pformat
from six import iteritems
import re
class FiatDepData(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, time_details=None, deposit_id=None, value=None, account=None, reason=None):
"""
FiatDepData - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'time_details': 'TimeDataCoin',
'deposit_id': 'str',
'value': 'int',
'account': 'str',
'reason': 'str'
}
self.attribute_map = {
'time_details': 'timeDetails',
'deposit_id': 'depositID',
'value': 'value',
'account': 'account',
'reason': 'reason'
}
self._time_details = time_details
self._deposit_id = deposit_id
self._value = value
self._account = account
self._reason = reason
@property
def time_details(self):
"""
Gets the time_details of this FiatDepData.
:return: The time_details of this FiatDepData.
:rtype: TimeDataCoin
"""
return self._time_details
@time_details.setter
def time_details(self, time_details):
"""
Sets the time_details of this FiatDepData.
:param time_details: The time_details of this FiatDepData.
:type: TimeDataCoin
"""
self._time_details = time_details
@property
def deposit_id(self):
"""
Gets the deposit_id of this FiatDepData.
:return: The deposit_id of this FiatDepData.
:rtype: str
"""
return self._deposit_id
@deposit_id.setter
def deposit_id(self, deposit_id):
"""
Sets the deposit_id of this FiatDepData.
:param deposit_id: The deposit_id of this FiatDepData.
:type: str
"""
self._deposit_id = deposit_id
@property
def value(self):
"""
Gets the value of this FiatDepData.
:return: The value of this FiatDepData.
:rtype: int
"""
return self._value
@value.setter
def value(self, value):
"""
Sets the value of this FiatDepData.
:param value: The value of this FiatDepData.
:type: int
"""
self._value = value
@property
def account(self):
"""
Gets the account of this FiatDepData.
:return: The account of this FiatDepData.
:rtype: str
"""
return self._account
@account.setter
def account(self, account):
"""
Sets the account of this FiatDepData.
:param account: The account of this FiatDepData.
:type: str
"""
self._account = account
@property
def reason(self):
"""
Gets the reason of this FiatDepData.
:return: The reason of this FiatDepData.
:rtype: str
"""
return self._reason
@reason.setter
def reason(self, reason):
"""
Sets the reason of this FiatDepData.
:param reason: The reason of this FiatDepData.
:type: str
"""
self._reason = reason
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| [
"vivek0@users.noreply.github.com"
] | vivek0@users.noreply.github.com |
24d3c36c02a4b47b41d98818cf1b47b210c12c13 | 72b1035d1589d3b77c721c7be4e248bce8cde185 | /pythonapi.py | 9dc0ac5dff54962fc86678d02bdebc952576d5b4 | [] | no_license | marciopocebon/fast2sms | 2632f5c0594392604302b2d9a20f005ddecde58a | 617124b86fc46526d8312c43f27cdaa6afaec5f9 | refs/heads/master | 2020-09-14T06:09:54.435634 | 2019-04-14T16:26:19 | 2019-04-14T16:26:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,810 | py | import requests
import json
def forward_to_api(url,mobile,message,api_key,flash,count):
payload = {'senderId': 'FSTSMS', 'mobile': mobile, 'message': message, 'flash': flash}
headers = {'Authorization': api_key}
print '\n-- Trying to send SMS via API ['+str(count)+'] --'
response = requests.request("POST", url, data=payload, headers=headers)
if response.json()['return'] != True:
return False
print '<< '+response.json()['message']+' >>'
return response.json()['return']
def forward_to_paid_api(url,mobile,message,api_key,flash,count):
headers = {'cache-control': "no-cache"}
querystring = {"authorization":api_key,"sender_id":"FSTSMS","message":message,"language":"english","route":"p","numbers":mobile,"flash":flash}
print '\n-- Trying to send SMS via Paid API ['+str(count)+'] --'
response = requests.request("GET", url, headers=headers, params=querystring)
print '<< '+response.json()['message']+' >>'
return response.json()['return']
def send_sms(sms_data):
phone = sms_data[0]
message = sms_data[1]
allowed_sms_length = 149
#Trim Message length to 160-11 = 149 characters#
if len(message) > allowed_sms_length:
message = message[0:145]
message+='[..]'
print '--> Sending SMS to '+str(phone)
number = str(phone)
api_keys = ["API key1", # Your free msgs API key
"API key2"] # Your paid msgs API key
url = "https://www.fast2sms.com/api/sms/free"
for key in api_keys:
count = api_keys.index(key)+1
if api_keys.index(key)>1:
url = "https://www.fast2sms.com/dev/bulk"
sent_status = forward_to_paid_api(url,number,message,key,0,count)
else:
sent_status = forward_to_api(url,number,message,key,0,count)
if sent_status:
return 'SUCCESS'
else:
print '-- SMS was not sent. Retrying... --'
continue
if sent_status == False:
return 'ERROR'
| [
"noreply@github.com"
] | marciopocebon.noreply@github.com |
790d12fdd35a10bb8f4f501ba6b96fef01d9ee64 | bc441bb06b8948288f110af63feda4e798f30225 | /resource_monitor_sdk/model/inspection/collector_pb2.pyi | fd9fb083c6ce2a16e062f751d97be2910a5a5a97 | [
"Apache-2.0"
] | permissive | easyopsapis/easyops-api-python | 23204f8846a332c30f5f3ff627bf220940137b6b | adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0 | refs/heads/master | 2020-06-26T23:38:27.308803 | 2020-06-16T07:25:41 | 2020-06-16T07:25:41 | 199,773,131 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,520 | pyi | # @generated by generate_proto_mypy_stubs.py. Do not edit!
import sys
from google.protobuf.descriptor import (
Descriptor as google___protobuf___descriptor___Descriptor,
)
from google.protobuf.internal.containers import (
RepeatedCompositeFieldContainer as google___protobuf___internal___containers___RepeatedCompositeFieldContainer,
)
from google.protobuf.message import (
Message as google___protobuf___message___Message,
)
from resource_monitor_sdk.model.inspection.arg_pb2 import (
InspectionArg as resource_monitor_sdk___model___inspection___arg_pb2___InspectionArg,
)
from typing import (
Iterable as typing___Iterable,
Optional as typing___Optional,
Text as typing___Text,
Union as typing___Union,
)
from typing_extensions import (
Literal as typing_extensions___Literal,
)
builtin___bool = bool
builtin___bytes = bytes
builtin___float = float
builtin___int = int
if sys.version_info < (3,):
builtin___buffer = buffer
builtin___unicode = unicode
class InspectionCollector(google___protobuf___message___Message):
DESCRIPTOR: google___protobuf___descriptor___Descriptor = ...
id = ... # type: typing___Text
name = ... # type: typing___Text
content = ... # type: typing___Text
script = ... # type: typing___Text
@property
def args(self) -> google___protobuf___internal___containers___RepeatedCompositeFieldContainer[resource_monitor_sdk___model___inspection___arg_pb2___InspectionArg]: ...
def __init__(self,
*,
id : typing___Optional[typing___Text] = None,
name : typing___Optional[typing___Text] = None,
content : typing___Optional[typing___Text] = None,
script : typing___Optional[typing___Text] = None,
args : typing___Optional[typing___Iterable[resource_monitor_sdk___model___inspection___arg_pb2___InspectionArg]] = None,
) -> None: ...
if sys.version_info >= (3,):
@classmethod
def FromString(cls, s: builtin___bytes) -> InspectionCollector: ...
else:
@classmethod
def FromString(cls, s: typing___Union[builtin___bytes, builtin___buffer, builtin___unicode]) -> InspectionCollector: ...
def MergeFrom(self, other_msg: google___protobuf___message___Message) -> None: ...
def CopyFrom(self, other_msg: google___protobuf___message___Message) -> None: ...
def ClearField(self, field_name: typing_extensions___Literal[u"args",b"args",u"content",b"content",u"id",b"id",u"name",b"name",u"script",b"script"]) -> None: ...
| [
"service@easyops.cn"
] | service@easyops.cn |
b2e93e0c9728d11d1d9ffc5ba84b45a1de565b61 | 7238e16c65051cafd41d6a973bd440598589b10c | /cella/migrations/0002_recoding.py | 5c0c82b92bbeb5515555233f5600befb9aa1de9f | [] | no_license | mnogoruk/TraductorCella | 06ff28b422d3fcc86be216a23ae311d8082f36a7 | 21ee676dc8d3c467454872436bee5df5c023beb6 | refs/heads/master | 2023-03-15T09:43:43.874227 | 2021-03-23T17:08:10 | 2021-03-23T17:08:10 | 330,973,431 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 778 | py | # Generated by Django 3.1.5 on 2021-03-18 10:40
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('authentication', '0001_initial'),
('cella', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Recoding',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True)),
('message', models.TextField()),
('operator', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='authentication.operator')),
],
),
]
| [
"lionless072@gmail.com"
] | lionless072@gmail.com |
c298d73d0f66373ea5e1201d026b6b041489e7c5 | 4331b28f22a2efb12d462ae2a8270a9f666b0df1 | /.history/dvdstore/webapp/form_20190914124144.py | 51f49dd87c243928340315bdc38c041086f3c49f | [] | no_license | ZiyaadLakay/csc312.group.project | ba772a905e0841b17478eae7e14e43d8b078a95d | 9cdd9068b5e24980c59a53595a5d513c2e738a5e | refs/heads/master | 2020-07-26T23:30:22.542450 | 2019-09-16T11:46:41 | 2019-09-16T11:46:41 | 200,703,160 | 0 | 0 | null | 2019-08-05T17:52:37 | 2019-08-05T17:52:37 | null | UTF-8 | Python | false | false | 875 | py | from django import forms
from .models import DVD, Customer
class DocumentForm(forms.ModelForm):
class Meta:
model = DVD
fields = ('Title','year','genre','InStock','Synopsis','BookingPickup' ,'NumOfTimesRented','ImageDVD')
class customerForm:
class Meta:
model= Customer
#user = User.objects.create_user(username=username, password=password1, email=email, first_name=first_name, last_name=last_name)
fields = ('username','password','email','first_name','last_name','phone_number','address','identification')
class customerForm:
class Meta:
model= Customer
#user = User.objects.create_user(username=username, password=password1, email=email, first_name=first_name, last_name=last_name)
fields = ('username','password','email','first_name','last_name','phone_number','address','identification')
| [
"uzairjoneswolf@gmail.com"
] | uzairjoneswolf@gmail.com |
513eb3ce39cc001993bc72f82d1bb7c5faaf1a94 | a9063fd669162d4ce0e1d6cd2e35974274851547 | /swagger_client/models/tsp_accounts_list1.py | 413e6c2c8c5b80d3ab26a0e7afdb03ee2977b8fb | [] | no_license | rootalley/py-zoom-api | 9d29a8c750e110f7bd9b65ff7301af27e8518a3d | bfebf3aa7b714dcac78be7c0affb9050bbce8641 | refs/heads/master | 2022-11-07T14:09:59.134600 | 2020-06-20T18:13:50 | 2020-06-20T18:13:50 | 273,760,906 | 1 | 3 | null | null | null | null | UTF-8 | Python | false | false | 7,034 | py | # coding: utf-8
"""
Zoom API
The Zoom API allows developers to safely and securely access information from Zoom. You can use this API to build private services or public applications on the [Zoom App Marketplace](http://marketplace.zoom.us). To learn how to get your credentials and create private/public applications, read our [Authorization Guide](https://marketplace.zoom.us/docs/guides/authorization/credentials). All endpoints are available via `https` and are located at `api.zoom.us/v2/`. For instance you can list all users on an account via `https://api.zoom.us/v2/users/`. # noqa: E501
OpenAPI spec version: 2.0.0
Contact: developersupport@zoom.us
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class TSPAccountsList1(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'conference_code': 'str',
'leader_pin': 'str',
'dial_in_numbers': 'list[UsersuserIdtspDialInNumbers]',
'tsp_bridge': 'str'
}
attribute_map = {
'conference_code': 'conference_code',
'leader_pin': 'leader_pin',
'dial_in_numbers': 'dial_in_numbers',
'tsp_bridge': 'tsp_bridge'
}
def __init__(self, conference_code=None, leader_pin=None, dial_in_numbers=None, tsp_bridge=None): # noqa: E501
"""TSPAccountsList1 - a model defined in Swagger""" # noqa: E501
self._conference_code = None
self._leader_pin = None
self._dial_in_numbers = None
self._tsp_bridge = None
self.discriminator = None
self.conference_code = conference_code
self.leader_pin = leader_pin
if dial_in_numbers is not None:
self.dial_in_numbers = dial_in_numbers
if tsp_bridge is not None:
self.tsp_bridge = tsp_bridge
@property
def conference_code(self):
"""Gets the conference_code of this TSPAccountsList1. # noqa: E501
Conference code: numeric value, length is less than 16. # noqa: E501
:return: The conference_code of this TSPAccountsList1. # noqa: E501
:rtype: str
"""
return self._conference_code
@conference_code.setter
def conference_code(self, conference_code):
"""Sets the conference_code of this TSPAccountsList1.
Conference code: numeric value, length is less than 16. # noqa: E501
:param conference_code: The conference_code of this TSPAccountsList1. # noqa: E501
:type: str
"""
if conference_code is None:
raise ValueError("Invalid value for `conference_code`, must not be `None`") # noqa: E501
self._conference_code = conference_code
@property
def leader_pin(self):
"""Gets the leader_pin of this TSPAccountsList1. # noqa: E501
Leader PIN: numeric value, length is less than 16. # noqa: E501
:return: The leader_pin of this TSPAccountsList1. # noqa: E501
:rtype: str
"""
return self._leader_pin
@leader_pin.setter
def leader_pin(self, leader_pin):
"""Sets the leader_pin of this TSPAccountsList1.
Leader PIN: numeric value, length is less than 16. # noqa: E501
:param leader_pin: The leader_pin of this TSPAccountsList1. # noqa: E501
:type: str
"""
if leader_pin is None:
raise ValueError("Invalid value for `leader_pin`, must not be `None`") # noqa: E501
self._leader_pin = leader_pin
@property
def dial_in_numbers(self):
"""Gets the dial_in_numbers of this TSPAccountsList1. # noqa: E501
List of dial in numbers. # noqa: E501
:return: The dial_in_numbers of this TSPAccountsList1. # noqa: E501
:rtype: list[UsersuserIdtspDialInNumbers]
"""
return self._dial_in_numbers
@dial_in_numbers.setter
def dial_in_numbers(self, dial_in_numbers):
"""Sets the dial_in_numbers of this TSPAccountsList1.
List of dial in numbers. # noqa: E501
:param dial_in_numbers: The dial_in_numbers of this TSPAccountsList1. # noqa: E501
:type: list[UsersuserIdtspDialInNumbers]
"""
self._dial_in_numbers = dial_in_numbers
@property
def tsp_bridge(self):
"""Gets the tsp_bridge of this TSPAccountsList1. # noqa: E501
Telephony bridge # noqa: E501
:return: The tsp_bridge of this TSPAccountsList1. # noqa: E501
:rtype: str
"""
return self._tsp_bridge
@tsp_bridge.setter
def tsp_bridge(self, tsp_bridge):
"""Sets the tsp_bridge of this TSPAccountsList1.
Telephony bridge # noqa: E501
:param tsp_bridge: The tsp_bridge of this TSPAccountsList1. # noqa: E501
:type: str
"""
allowed_values = ["US_TSP_TB", "EU_TSP_TB"] # noqa: E501
if tsp_bridge not in allowed_values:
raise ValueError(
"Invalid value for `tsp_bridge` ({0}), must be one of {1}" # noqa: E501
.format(tsp_bridge, allowed_values)
)
self._tsp_bridge = tsp_bridge
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(TSPAccountsList1, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, TSPAccountsList1):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"github@rootalley.com"
] | github@rootalley.com |
6080401306f6ff3f3b2e6b26eefa0f58cc359af8 | ef1f62cf4e53f856bf763ac0dee73f054518530d | /Week_09/115.Distinct_Subsequences.py | 018e3a33c7c7d88cdacda79eb2ae627bdde78904 | [] | no_license | ZHHJemotion/algorithm008-class01 | 3338af3619d8e1754a62af6a852f517b47298d95 | 5bb7d2b74110df0b5788b94c69582552d711563a | refs/heads/master | 2022-11-12T09:26:24.941738 | 2020-06-30T15:29:20 | 2020-06-30T15:29:20 | 255,102,230 | 0 | 0 | null | 2020-04-12T14:39:17 | 2020-04-12T14:39:17 | null | UTF-8 | Python | false | false | 1,702 | py | # Given a string S and a string T, count the number of distinct subsequences of
# S which equals T.
#
# A subsequence of a string is a new string which is formed from the original s
# tring by deleting some (can be none) of the characters without disturbing the re
# lative positions of the remaining characters. (ie, "ACE" is a subsequence of "AB
# CDE" while "AEC" is not).
#
# It's guaranteed the answer fits on a 32-bit signed integer.
#
# Example 1:
#
#
# Input: S = "rabbbit", T = "rabbit"
# Output: 3
# Explanation:
# As shown below, there are 3 ways you can generate "rabbit" from S.
# (The caret symbol ^ means the chosen letters)
#
# rabbbit
# ^^^^ ^^
# rabbbit
# ^^ ^^^^
# rabbbit
# ^^^ ^^^
#
#
# Example 2:
#
#
# Input: S = "babgbag", T = "bag"
# Output: 5
# Explanation:
# As shown below, there are 5 ways you can generate "bag" from S.
# (The caret symbol ^ means the chosen letters)
#
# babgbag
# ^^ ^
# babgbag
# ^^ ^
# babgbag
# ^ ^^
# babgbag
# ^ ^^
# babgbag
# ^^^
#
# Related Topics String Dynamic Programming
# leetcode submit region begin(Prohibit modification and deletion)
class Solution:
def numDistinct(self, s: str, t: str) -> int:
# 动态规划
m = len(t)
n = len(s)
dp = [[0 for _ in range(n + 1)] for _ in range(m + 1)]
for j in range(n + 1):
dp[0][j] = 1
for i in range(1, m + 1):
for j in range(1, n + 1):
if t[i - 1] == s[j - 1]:
dp[i][j] = dp[i - 1][j - 1] + dp[i][j - 1]
else:
dp[i][j] = dp[i][j - 1]
return dp[-1][-1]
# leetcode submit region end(Prohibit modification and deletion)
| [
"zhhjemotion@hotmail.com"
] | zhhjemotion@hotmail.com |
302200b67c6ff46459d3cbb1afd4dd9ccceda468 | f576f0ea3725d54bd2551883901b25b863fe6688 | /sdk/healthinsights/azure-healthinsights-cancerprofiling/setup.py | 661aee6d571a51ce540e6ca8bc4357451c10400c | [
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] | permissive | Azure/azure-sdk-for-python | 02e3838e53a33d8ba27e9bcc22bd84e790e4ca7c | c2ca191e736bb06bfbbbc9493e8325763ba990bb | refs/heads/main | 2023-09-06T09:30:13.135012 | 2023-09-06T01:08:06 | 2023-09-06T01:08:06 | 4,127,088 | 4,046 | 2,755 | MIT | 2023-09-14T21:48:49 | 2012-04-24T16:46:12 | Python | UTF-8 | Python | false | false | 2,517 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) Python Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
# coding: utf-8
import os
import re
from setuptools import setup, find_packages
PACKAGE_NAME = "azure-healthinsights-cancerprofiling"
PACKAGE_PPRINT_NAME = "Cognitive Services Health Insights Cancer Profilings"
# a-b-c => a/b/c
package_folder_path = PACKAGE_NAME.replace("-", "/")
# Version extraction inspired from 'requests'
with open(os.path.join(package_folder_path, "_version.py"), "r") as fd:
version = re.search(r'^VERSION\s*=\s*[\'"]([^\'"]*)[\'"]', fd.read(), re.MULTILINE).group(1)
if not version:
raise RuntimeError("Cannot find version information")
setup(
name=PACKAGE_NAME,
version=version,
description='Microsoft {} Client Library for Python'.format(PACKAGE_PPRINT_NAME),
long_description=open("README.md", "r").read(),
long_description_content_type="text/markdown",
license="MIT License",
author="Microsoft Corporation",
author_email="azpysdkhelp@microsoft.com",
url="https://github.com/Azure/azure-sdk-for-python/tree/main/sdk",
keywords="azure, azure sdk",
classifiers=[
"Development Status :: 4 - Beta",
"Programming Language :: Python",
"Programming Language :: Python :: 3 :: Only",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Programming Language :: Python :: 3.11",
"License :: OSI Approved :: MIT License",
],
zip_safe=False,
packages=find_packages(
exclude=[
"tests",
# Exclude packages that will be covered by PEP420 or nspkg
"azure",
"azure.healthinsights",
]
),
include_package_data=True,
package_data={
"pytyped": ["py.typed"],
},
install_requires=[
"isodate<1.0.0,>=0.6.1",
"azure-core<2.0.0,>=1.24.0",
"typing-extensions>=4.3.0; python_version<'3.8.0'",
],
python_requires=">=3.7",
)
| [
"noreply@github.com"
] | Azure.noreply@github.com |
363ccaf11d04a765dfe123aa8108c5efc179a7a3 | fb78fd824e904705fb1ee09db8b3c20cc3902805 | /django-react-app/leads/models.py | 17a10418f8e0184ed0c4e0904cc1bf8146d137be | [] | no_license | Roderich25/mac | 8469833821ac49c539a744db29db5a41d755ad55 | 4f7fe281c88f0199b85d0ac99ce41ffb643d6e82 | refs/heads/master | 2023-01-12T05:55:12.753209 | 2021-11-26T01:16:24 | 2021-11-26T01:16:24 | 207,029,750 | 0 | 0 | null | 2023-01-07T11:49:23 | 2019-09-07T21:51:53 | Jupyter Notebook | UTF-8 | Python | false | false | 277 | py | from django.db import models
class Lead(models.Model):
name = models.CharField(max_length=100)
email = models.EmailField(max_length=100, unique=True)
message = models.CharField(max_length=500, blank=True)
created_at = models.DateTimeField(auto_now_add=True)
| [
"rodrigoavilasolis@gmail.com"
] | rodrigoavilasolis@gmail.com |
fddd9876f7cba4560371eeb94013cf83c23decbe | d9da91e23cb394f4f1622755098717840715be23 | /python/jittor/utils/polish.py | a03b8ed03942ed06fcd290bcfa8dfa556ec73ba0 | [
"Apache-2.0"
] | permissive | yezi05/jittor | 4af5e857575aca28c2b0f7dbfb4d8b717ee659ff | 03e8253a363aa74ce68e707ccf1726f30d9d64c5 | refs/heads/master | 2023-02-06T03:01:59.934523 | 2020-12-22T13:58:52 | 2020-12-22T13:58:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,577 | py | #!/usr/bin/python3
# ***************************************************************
# Copyright (c) 2020 Jittor. Authors: Dun Liang <randonlang@gmail.com>. All Rights Reserved.
# This file is subject to the terms and conditions defined in
# file 'LICENSE.txt', which is part of this source code package.
# ***************************************************************
# Polish steps:
# 1. create jittor-polish repo
# 2. copy jittor src into it
# 3. remove files
# 4. commit jittor-polish(check modify and break)
# 5. compile to build/$git_version/$cc_type/$use_cuda/a.obj
# 6. rsync to build-server
# 7. push to github
# 8. push to pip
import os
import jittor as jt
from jittor import LOG
from jittor.compiler import run_cmd
from jittor_utils import translator
import sys
jittor_path = jt.flags.jittor_path
root_path = os.path.realpath(os.path.join(jt.flags.jittor_path, "..", ".."))
data_path = os.path.join(jittor_path, "src", "__data__")
build_path = os.path.join(data_path, "build")
if not os.path.isdir(build_path):
os.mkdir(build_path)
status = run_cmd("git status", data_path)
print(status)
if "working tree clean" not in status:
LOG.f("__data__ has untracked files")
git_version = run_cmd("git rev-parse HEAD", data_path)
LOG.i("git_version", git_version)
run_cmd(f"git rev-parse HEAD > {jittor_path}/version", data_path)
# remove files
files = jt.compiler.files
data_files = [ name for name in files
if "__data__" in name
]
LOG.i("data_files", data_files)
# compile data files
from pathlib import Path
home = str(Path.home())
# for cc_type in ["g++", "clang"]:
# for device in ["cpu", "cuda"]:
for cc_type in ["g++"]:
for device in ["cpu"]:
key = f"{git_version}-{cc_type}-{device}"
env = f"cache_name=build/{cc_type}/{device} cc_path="
cname = "g++" if cc_type=="g++" else "clang-8"
env += cname
# use core2 arch, avoid using avx instructions
# TODO: support more archs, such as arm, or use ir(GIMPLE or LLVM)
env += " cc_flags='-march=core2' "
if device == "cpu":
env += "nvcc_path='' "
elif jt.flags.nvcc_path == "":
env = "unset nvcc_path && " + env
cmd = f"{env} {sys.executable} -c 'import jittor'"
LOG.i("run cmd:", cmd)
os.system(cmd)
LOG.i("run cmd:", cmd)
os.system(cmd)
obj_path = home + f"/.cache/jittor/build/{cc_type}/{device}/{cname}/obj_files"
obj_files = []
for name in data_files:
name = name.split("/")[-1]
fname = f"{obj_path}/{name}.o"
assert os.path.isfile(fname), fname
obj_files.append(fname)
run_cmd(f"ld -r {' '.join(obj_files)} -o {build_path}/{key}.o")
# compress source
# tar -cvzf build/jittor.tgz . --exclude build --exclude .git --exclude .ipynb_checkpoints --exclude __pycache__
# mkdir -p jittor && tar -xvf ./jittor.tgz -C jittor
assert os.system(f"cd {root_path} && tar --exclude=build --exclude=.git --exclude=.ipynb_checkpoints --exclude=__pycache__ --exclude=__data__ --exclude=my --exclude=dist --exclude=.vscode --exclude=.github -cvzf {build_path}/jittor.tgz * ")==0
# rsync to build-server
jittor_web_base_dir = "Documents/jittor-blog/assets/"
jittor_web_build_dir = jittor_web_base_dir
assert os.system(f"rsync -avPu {build_path} jittor-web:{jittor_web_build_dir}")==0
assert os.system(f"ssh jittor-web Documents/jittor-blog.git/hooks/post-update")==0
# sys.exit(0)
# push to github
# assert os.system(f"cd {polish_path} && git push -f origin master")==0
# push to pip | [
"randonlang@gmail.com"
] | randonlang@gmail.com |
efe621adfaa9b8a93bd44ee3a70caffb919035cf | 23f6dbacd9b98fdfd08a6f358b876d3d371fc8f6 | /rootfs/usr/share/pyshared/desktopcouch/pair/__init__.py | fc4788fa401ce96d02d912686bae79f0a5eedc4b | [] | no_license | xinligg/trainmonitor | 07ed0fa99e54e2857b49ad3435546d13cc0eb17a | 938a8d8f56dc267fceeb65ef7b867f1cac343923 | refs/heads/master | 2021-09-24T15:52:43.195053 | 2018-10-11T07:12:25 | 2018-10-11T07:12:25 | 116,164,395 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 787 | py | # Copyright 2009 Canonical Ltd.
#
# This file is part of desktopcouch.
#
# desktopcouch is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License version 3
# as published by the Free Software Foundation.
#
# desktopcouch is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with desktopcouch. If not, see <http://www.gnu.org/licenses/>.
"""The pair module."""
pairing_record_type = "http://www.freedesktop.org/wiki/Specifications/desktopcouch/paired_server"
| [
"root@xinli.xinli"
] | root@xinli.xinli |
ec29664e79ce885956b357da822230d58ef90bff | cf088e68e93981292270905c983378288c4bbd66 | /backend/chat/migrations/0001_initial.py | 2fb31cf4d731520bfec0e757eea48934675de7d1 | [] | no_license | crowdbotics-apps/test-27314 | 92f91f9e3bc3cfe508338cd71b8d29df8de1927d | e436c8d0f0b346b23b26b90ea698c3ad0020711e | refs/heads/master | 2023-04-21T01:55:47.441435 | 2021-05-24T10:39:51 | 2021-05-24T10:39:51 | 370,310,574 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,805 | py | # Generated by Django 2.2.20 on 2021-05-24 10:39
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('chat_user_profile', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Message',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('message', models.TextField()),
('attachment', models.URLField()),
('is_draft', models.BooleanField()),
('is_delivered', models.BooleanField()),
('is_read', models.BooleanField()),
('timestamp_created', models.DateTimeField(auto_now_add=True)),
('timestamp_delivered', models.DateTimeField()),
('timestamp_read', models.DateTimeField()),
],
),
migrations.CreateModel(
name='Thread',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('thread_photo', models.URLField()),
('timestamp_created', models.DateTimeField(auto_now_add=True)),
],
),
migrations.CreateModel(
name='ThreadMember',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('is_admin', models.BooleanField()),
('timestamp_joined', models.DateTimeField(auto_now_add=True)),
('timestamp_left', models.DateTimeField()),
('last_rejoined', models.DateTimeField()),
('profile', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='threadmember_profile', to='chat_user_profile.Profile')),
('thread', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='threadmember_thread', to='chat.Thread')),
],
),
migrations.CreateModel(
name='ThreadAction',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('action', models.CharField(max_length=7)),
('timestamp_action', models.DateTimeField(auto_now_add=True)),
('profile', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='threadaction_profile', to='chat_user_profile.Profile')),
('thread', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='threadaction_thread', to='chat.Thread')),
],
),
migrations.CreateModel(
name='MessageAction',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('action', models.CharField(max_length=7)),
('timestamp_action', models.DateTimeField(auto_now_add=True)),
('message', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='messageaction_message', to='chat.Message')),
('profile', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='messageaction_profile', to='chat_user_profile.Profile')),
],
),
migrations.AddField(
model_name='message',
name='sent_by',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='message_sent_by', to='chat.ThreadMember'),
),
migrations.AddField(
model_name='message',
name='thread',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='message_thread', to='chat.Thread'),
),
migrations.CreateModel(
name='ForwardedMessage',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('timestamp_forwarded', models.DateTimeField(auto_now_add=True)),
('forwarded_by', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='forwardedmessage_forwarded_by', to='chat_user_profile.Profile')),
('forwarded_to', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='forwardedmessage_forwarded_to', to='chat.Thread')),
('message', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='forwardedmessage_message', to='chat.Message')),
],
),
]
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
0f3033d9f66cd33138abf1898e4ac25f24abc2f5 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/429/usersdata/321/101585/submittedfiles/jogoDaVelha.py | ceb9986c93fca04e49a3a98db4789eb815024b33 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 459 | py | # -*- coding: utf-8 -*-
from jogoDaVelha_BIB import *
# COLOQUE SEU PROGRAMA A PARTIR DAQUI
print('Bem vindo ao JogoDaVelha do grupo 8 [Iara, Ingrid, Luiz Otávio, Tatiane]')
nome = str(input('Qual seu nome? '))
s = str(input('Qual símbolo você deseja utilizar no jogo? (X ou O) '))
while s != X or s != O:
print('Isira um símbolo válido')
s = str(input('Qual símbolo você deseja utilizar no jogo? '))
print(sorteio(inicio))
print(tabuleiro) | [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
7f485812412727f8eec54250a2717de3c9cc38c6 | c0e1d9ab5c5fae94f988e03ead37337cd283e012 | /earth461/ass1/q1b.py | 9c0c57dc3f730e8c0738ef34cbfc4ba6e4f27518 | [] | no_license | BruceJohnJennerLawso/turbulence-chain | 114dafd19fa00f6e0af7113cdaa8d603a62dfc66 | 767786368c08e8ca3ba0fbc2896cb6cc9908ebdb | refs/heads/master | 2016-09-01T05:15:48.650177 | 2015-12-07T01:49:46 | 2015-12-07T01:49:46 | 45,279,135 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,312 | py | ## q2.py #######################################################################
## log log conductivity by ppm again, this time with different values ##########
## depending on temperature ####################################################
################################################################################
from conductivity import *
if(__name__ == "__main__"):
tds_values = range(50, 50000)
tencConductivities = []
twentyfivecConductivities = []
fortycConductivities = []
for cy in tds_values:
tencConductivities.append(adjustConductivityForTemperature(25, 10, getConductivity(1.0, 0.40, 1.5, cy , 0.5))*1000)
twentyfivecConductivities.append(getConductivity(1.0, 0.40, 1.5, cy , 0.5)*1000)
fortycConductivities.append(adjustConductivityForTemperature(25, 40, getConductivity(1.0, 0.40, 1.5, cy , 0.5))*1000)
## 0.5 works here cause we can
ten = dataSet(tds_values, tencConductivities, "10 Degrees C", "b-")
twentyfive = dataSet(tds_values,twentyfivecConductivities,"25 Degrees C", "g-")
forty = dataSet(tds_values, fortycConductivities, "40 Degrees C", "r-")
graphLogLog("log conductivity by log TDS for various\n temperatures of saturated sands", \
"log sand conductivity (mS/m)", "log TDS (ppm)", 1, 1e6, 1e-2, 1e4, True, \
ten, twentyfive, forty)
| [
"johnnybgoode@rogers.com"
] | johnnybgoode@rogers.com |
9bf0b8872faffa3a20d8a64810b284909a28af6b | a8b37bd399dd0bad27d3abd386ace85a6b70ef28 | /airbyte-ci/connectors/metadata_service/orchestrator/tests/test_dagster_helpers.py | 13d92550f51ddd6f46330b65e3966c660c1ef658 | [
"LicenseRef-scancode-free-unknown",
"MIT",
"Elastic-2.0"
] | permissive | thomas-vl/airbyte | 5da2ba9d189ba0b202feb952cadfb550c5050871 | 258a8eb683634a9f9b7821c9a92d1b70c5389a10 | refs/heads/master | 2023-09-01T17:49:23.761569 | 2023-08-25T13:13:11 | 2023-08-25T13:13:11 | 327,604,451 | 1 | 0 | MIT | 2021-01-07T12:24:20 | 2021-01-07T12:24:19 | null | UTF-8 | Python | false | false | 1,358 | py | from orchestrator.utils.dagster_helpers import string_array_to_hash
def test_string_array_to_hash_is_deterministic():
strings = ["hello", "world", "foo", "bar", "baz"]
assert string_array_to_hash(strings) == string_array_to_hash(strings)
def test_string_array_to_hash_ignores_repeated_strings():
strings = ["hello", "world", "foo", "bar", "baz"]
repeated_strings = ["hello", "world", "foo", "bar", "baz", "foo", "bar"]
assert string_array_to_hash(strings) == string_array_to_hash(repeated_strings)
def test_string_array_to_hash_outputs_on_empty_list():
assert string_array_to_hash([])
def test_string_array_to_hash_ignores_value_order_input():
strings = ["baz", "bar", "foo", "world", "hello"]
same_but_different_order = ["hello", "world", "foo", "bar", "baz"]
assert string_array_to_hash(strings) == string_array_to_hash(same_but_different_order)
def test_string_array_to_hash_differs():
unique_cursor_1 = string_array_to_hash(["hello", "world", "foo"])
unique_cursor_2 = string_array_to_hash(["hello", "world", "foo", "bar", "baz", "foo", "bar"])
unique_cursor_3 = string_array_to_hash(["hello", "world", "baz"])
unique_cursor_4 = string_array_to_hash(["world", "baz"])
unique_set = set([unique_cursor_1, unique_cursor_2, unique_cursor_3, unique_cursor_4])
assert len(unique_set) == 4
| [
"noreply@github.com"
] | thomas-vl.noreply@github.com |
2d59dda3daae2055f815f55af8792090bb339bf3 | 487ce91881032c1de16e35ed8bc187d6034205f7 | /codes/CodeJamCrawler/CJ_16_2/16_2_1_Mike_Xiao_q1.py | f1ab7c431a32948cb0d5c6ff6318219a81209fcd | [] | no_license | DaHuO/Supergraph | 9cd26d8c5a081803015d93cf5f2674009e92ef7e | c88059dc66297af577ad2b8afa4e0ac0ad622915 | refs/heads/master | 2021-06-14T16:07:52.405091 | 2016-08-21T13:39:13 | 2016-08-21T13:39:13 | 49,829,508 | 2 | 0 | null | 2021-03-19T21:55:46 | 2016-01-17T18:23:00 | Python | UTF-8 | Python | false | false | 855 | py | filename = "A-large"
fin = open(filename + ".in", "r")
fout = open(filename + ".out", "w")
casenum = int(fin.readline())
for ite in range(casenum):
in_str = fin.readline()
dignums = [0,0,0,0,0,0,0,0,0,0]
dignums[0] = in_str.count('Z')
dignums[2] = in_str.count('W')
dignums[4] = in_str.count('U')
dignums[6] = in_str.count('X')
dignums[8] = in_str.count('G')
dignums[3] = in_str.count('H') - dignums[8]
dignums[5] = in_str.count('F') - dignums[4]
dignums[1] = in_str.count('O') - dignums[0] - dignums[2] - dignums[4]
dignums[7] = in_str.count('V') - dignums[5]
dignums[9] = in_str.count('I') - dignums[5] - dignums[6] - dignums[8]
return_str = ""
for jte in range(10):
return_str += str(jte) * dignums[jte]
fout.write("Case #{0}: {1}\n".format(ite + 1, return_str))
| [
"[dhuo@tcd.ie]"
] | [dhuo@tcd.ie] |
286e5b54ffddc08cced75a5f8c14d1b7d58eb735 | 04c06575a49a3f4e30e4f3f2bf2365585664d2e8 | /python_leetcode_2020/Python_Leetcode_2020/204_count_primes.py | 92cf09def1c8409654aad00245651ec1aa15c81e | [] | no_license | xiangcao/Leetcode | 18da3d5b271ff586fdf44c53f1a677423ca3dfed | d953abe2c9680f636563e76287d2f907e90ced63 | refs/heads/master | 2022-06-22T04:45:15.446329 | 2022-06-17T13:03:01 | 2022-06-17T13:03:01 | 26,052,392 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 637 | py | """
Count the number of prime numbers less than a non-negative number, n.
Example 1:
Input: n = 10
Output: 4
Explanation: There are 4 prime numbers less than 10, they are 2, 3, 5, 7.
Example 2:
Input: n = 0
Output: 0
Example 3:
Input: n = 1
Output: 0
"""
class Solution:
def countPrimes(self, n: int) -> int:
if n < 3:
return 0
isPrime = [True] * n
isPrime[0] = isPrime[1] = False
for i in range(2, int(math.sqrt(n))+1):
if isPrime[i]:
for j in range(i*i, n, i):
isPrime[j] = False
return isPrime.count(True)
| [
"xiangcao_liu@apple.com"
] | xiangcao_liu@apple.com |
6cc9e1b159a966897cb04e871047b58e49c391ed | 83cf642504313b6ef6527dda52158a6698c24efe | /scripts/addons/fd_scripting_tools/autocompletion/suggestions/dynamic/_bpy_fake/__private__/motionpath.py | 0f8b705595927dd286be09273a493ff5ba911c2d | [] | no_license | PyrokinesisStudio/Fluid-Designer-Scripts | a4c40b871e8d27b0d76a8025c804d5a41d09128f | 23f6fca7123df545f0c91bf4617f4de7d9c12e6b | refs/heads/master | 2021-06-07T15:11:27.144473 | 2016-11-08T03:02:37 | 2016-11-08T03:02:37 | 113,630,627 | 1 | 0 | null | 2017-12-09T00:55:58 | 2017-12-09T00:55:58 | null | UTF-8 | Python | false | false | 1,020 | py | from . motionpathvert import MotionPathVert
from . struct import Struct
from . bpy_struct import bpy_struct
import mathutils
class MotionPath(bpy_struct):
@property
def rna_type(self):
'''(Struct) RNA type definition'''
return Struct()
@property
def points(self):
'''(Sequence of MotionPathVert) Cached positions per frame'''
return (MotionPathVert(),)
@property
def frame_start(self):
'''(Integer) Starting frame of the stored range'''
return int()
@property
def frame_end(self):
'''(Integer) End frame of the stored range'''
return int()
@property
def length(self):
'''(Integer) Number of frames cached'''
return int()
@property
def use_bone_head(self):
'''(Boolean) For PoseBone paths, use the bone head location when
calculating this path'''
return bool()
@property
def is_modified(self):
'''(Boolean) Path is being edited'''
return bool() | [
"dev.andrewpeel@gmail.com"
] | dev.andrewpeel@gmail.com |
583762e669d9ed831762240fab8c3d9d18bf4965 | 7b5f6c72fa6427a9d9350d1c9007f346161b3866 | /portfolio/views.py | 8c90e48160b005931e983f557f6ad67693646031 | [
"MIT"
] | permissive | GoWebyCMS/portfolio | a15daf8f553e7ddb92154c60e7b57b4383fa2a44 | 1ed5c20f6fe280388ff0876ca6a5b5129cf6b3f2 | refs/heads/master | 2020-06-25T22:33:46.298359 | 2017-07-17T10:44:46 | 2017-07-17T10:44:46 | 96,993,813 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,481 | py | from django.shortcuts import render
from django.http import HttpResponse
from django.views.generic import ListView
from django.utils import timezone
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from .models import Project, ProjectCategory, Skill
# Create your views here.
# TODO: Find out how to UT test a class based view
# class PortfolioListView(ListView):
def portfolio_list(request):
project_list = Project.objects.filter(end_date__lte=timezone.now()).order_by('-end_date')
categories = Category.objects.all()
# get pageneated objects for projects
paginator = Paginator(project_list, 6)
page = request.GET.get('page')
try:
projects = paginator.page(page)
except PageNotAnInteger:
# if page is not an integer deliver the first page
projects = paginator.page(1)
except EmptyPage:
# if page is out of range deliver the last page of results
projects = paginator.page(paginator.num_pages)
return render(request, 'portfolio/list.html',
{
'project_list': project_list,
'projects': projects,
'categories': categories,
'page': page,
})
def portfolio_detail(request, pk):
project = get_object_or_404(Project, pk=pk)
return render(
request,
'portfolio/portfolio_detail.html',
{
'project': project,
}
)
| [
"kkampardi@gmail.com"
] | kkampardi@gmail.com |
2f6e304d6d3a22932ae14069516b6f656a244924 | 4a8c1f7d9935609b780aff95c886ef7781967be0 | /atcoder/LeetCodeWeekly/327_c.py | 255919176b616b224096bbcb0e64767e7e69ba66 | [] | no_license | recuraki/PythonJunkTest | d5e5f5957ac5dd0c539ef47759b1fe5ef7a2c52a | 2556c973d468a6988d307ce85c5f2f8ab15e759a | refs/heads/master | 2023-08-09T17:42:21.875768 | 2023-07-18T23:06:31 | 2023-07-18T23:06:31 | 13,790,016 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,077 | py | from typing import List, Tuple, Optional
from pprint import pprint
from collections import deque, defaultdict
list_lower = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']
class Solution:
def isItPossible(self, word1: str, word2: str) -> bool:
se1 = defaultdict(int)
se2 = defaultdict(int)
for x in word1: se1[x] += 1
for x in word2: se2[x] += 1
# se 1 >= se 2としたい
if len(se1) < len(se2):
se1, se2 = se2, se1
word1, word2 = word2, word1
#print(se1, se2)
diffchar = (len(se1) - len(se2))
print(">", diffchar, word1, word2)
# 3以上の時は何をしてもだめ
if diffchar >= 3:
return False
# 0の場合、必ずswapしないといけないので
# pat1: count1同士をswapする
# pat2: count>=2同士をswapする
# が必要
if diffchar == 0:
for a in list_lower:
if se1[a] == 0: continue
for b in list_lower:
if se2[b] == 0: continue
if se1[a] == se2[b] == 1: return True
if se1[a] >=2 and se2[b] >= 2: return True
return False
# 1の時、se2を1つ増やさないといけない
# pat1: se1に2つ以上あり、se2にないものをあげる (se2++)
# pat1: se1に1つしかなく、se2にあるものを上げる (se1++)
if diffchar == 1:
for a in list_lower:
if se1[a] == 0: continue
for b in list_lower:
if se2[b] == 0: continue
#if se1[a] >= 2 and se2[b] == 0: return True
if se1[a] == 1 and se2[b] >= 2:
return True
return False
# 2の時、se2を2つふやさないといけない
# pat1: se1に1つしかなく、se2にないものを上げる(se1--, se2++)
if diffchar == 2:
for a in list_lower:
if se1[a] == 0: continue
for b in list_lower:
if se2[b] == 0: continue
if se1[a] == 1 and se2[b] == 0: return True
return False
st = Solution()
print(st.isItPossible(word1 = "abcd", word2 = "a")==False) # むり
print(st.isItPossible(word1 = "abc", word2 = "d")==False) # むり
print(st.isItPossible(word1 = "abc", word2 = "a")==False) # むり
print(st.isItPossible(word1 = "ac", word2 = "b")==False) # むり
print(st.isItPossible(word1 = "ac", word2 = "a")==True) # aa, c
print(st.isItPossible(word1 = "abcc", word2 = "aab")==True) # abca, acb
print(st.isItPossible(word1 = "abcde", word2 = "fghij")==True) # どれをひっくりかえしてもOK
print(st.isItPossible(word1 = "abc", word2 = "ddeeff")==False) # abd, cdeeff など絶対に3,4になる
print(st.isItPossible(word1 = "c", word2 = "ac")==True) # a, cc
| [
"kanai@wide.ad.jp"
] | kanai@wide.ad.jp |
d343f2bf7825fe6d38f60fa6dc2ccb045815be2a | 15d05b24ab8086ac84757c4d596372fd7801b827 | /.ycm_extra_conf.py | d083fa69a8d6ce218a149a5a10a224238cb5081b | [] | no_license | wqx081/mpr_base | 5d410720728b3e1a720a36087226979763ac538b | fb1526856898954420673807eef50a5478382c38 | refs/heads/master | 2021-01-11T14:28:24.249395 | 2017-02-10T03:43:21 | 2017-02-10T03:43:21 | 56,306,744 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,628 | py | # This is free and unencumbered software released into the public domain.
#
# Anyone is free to copy, modify, publish, use, compile, sell, or
# distribute this software, either in source code form or as a compiled
# binary, for any purpose, commercial or non-commercial, and by any
# means.
#
# In jurisdictions that recognize copyright laws, the author or authors
# of this software dedicate any and all copyright interest in the
# software to the public domain. We make this dedication for the benefit
# of the public at large and to the detriment of our heirs and
# successors. We intend this dedication to be an overt act of
# relinquishment in perpetuity of all present and future rights to this
# software under copyright law.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
# For more information, please refer to <http://unlicense.org/>
# This is a configuration file for YouCompleteMe (YCM), a Vim extension for
# navigation and code completion with C++ and other languages.
#
# To make YCM work with Kudu, add your Kudu source directory to the
# g:ycm_extra_conf_globlist variable in your .vimrc file. For details on how to
# install and configure YouCompleteMe, see
# https://github.com/Valloric/YouCompleteMe
#
# This file is based on the example configuration file from YouCompleteMe.
import os
import ycm_core
# These are the compilation flags that will be used in case there's no
# compilation database set (by default, one is not set).
# CHANGE THIS LIST OF FLAGS. YES, THIS IS THE DROID YOU HAVE BEEN LOOKING FOR.
flags = [
'-x',
'c++',
'-DBASE_HEADERS_NO_STUBS=1',
'-DBASE_HEADERS_USE_RICH_SLICE=1',
'-DBASE_HEADERS_USE_SHORT_STATUS_MACROS=1',
'-DBASE_STATIC_DEFINE',
'-Dintegration_tests_EXPORTS',
'-D__STDC_FORMAT_MACROS',
'-fno-strict-aliasing',
'-msse4.2',
'-Wall',
'-Wno-sign-compare',
'-Wno-deprecated',
'-pthread',
'-ggdb',
'-Qunused-arguments',
'-Wno-ambiguous-member-template',
'-std=c++11',
'-g',
'-fPIC',
'-I',
'src',
'-I',
'./src',
'-isystem',
'thirdparty/installed/common/include',
'-isystem',
'thirdparty/installed/uninstrumented/include',
]
# Set this to the absolute path to the folder (NOT the file!) containing the
# compile_commands.json file to use that instead of 'flags'. See here for
# more details: http://clang.llvm.org/docs/JSONCompilationDatabase.html
#
# You can get CMake to generate this file for you by adding:
# set( CMAKE_EXPORT_COMPILE_COMMANDS 1 )
# to your CMakeLists.txt file.
#
# Most projects will NOT need to set this to anything; you can just change the
# 'flags' list of compilation flags. Notice that YCM itself uses that approach.
compilation_database_folder = ''
if os.path.exists( compilation_database_folder ):
database = ycm_core.CompilationDatabase( compilation_database_folder )
else:
database = None
SOURCE_EXTENSIONS = [ '.cpp', '.cxx', '.cc', '.c', '.m', '.mm' ]
def DirectoryOfThisScript():
return os.path.dirname( os.path.abspath( __file__ ) )
def MakeRelativePathsInFlagsAbsolute( flags, working_directory ):
if not working_directory:
return list( flags )
new_flags = []
make_next_absolute = False
path_flags = [ '-isystem', '-I', '-iquote', '--sysroot=' ]
for flag in flags:
new_flag = flag
if make_next_absolute:
make_next_absolute = False
if not flag.startswith( '/' ):
new_flag = os.path.join( working_directory, flag )
for path_flag in path_flags:
if flag == path_flag:
make_next_absolute = True
break
if flag.startswith( path_flag ):
path = flag[ len( path_flag ): ]
new_flag = path_flag + os.path.join( working_directory, path )
break
if new_flag:
new_flags.append( new_flag )
return new_flags
def IsHeaderFile( filename ):
extension = os.path.splitext( filename )[ 1 ]
return extension in [ '.h', '.hxx', '.hpp', '.hh' ]
def GetCompilationInfoForFile( filename ):
# The compilation_commands.json file generated by CMake does not have entries
# for header files. So we do our best by asking the db for flags for a
# corresponding source file, if any. If one exists, the flags for that file
# should be good enough.
if IsHeaderFile( filename ):
basename = os.path.splitext( filename )[ 0 ]
for extension in SOURCE_EXTENSIONS:
replacement_file = basename + extension
if os.path.exists( replacement_file ):
compilation_info = database.GetCompilationInfoForFile(
replacement_file )
if compilation_info.compiler_flags_:
return compilation_info
return None
return database.GetCompilationInfoForFile( filename )
def FlagsForFile( filename, **kwargs ):
if database:
# Bear in mind that compilation_info.compiler_flags_ does NOT return a
# python list, but a "list-like" StringVec object
compilation_info = GetCompilationInfoForFile( filename )
if not compilation_info:
return None
final_flags = MakeRelativePathsInFlagsAbsolute(
compilation_info.compiler_flags_,
compilation_info.compiler_working_dir_ )
else:
relative_to = DirectoryOfThisScript()
final_flags = MakeRelativePathsInFlagsAbsolute( flags, relative_to )
return {
'flags': final_flags,
'do_cache': True
}
| [
"you@example.com"
] | you@example.com |
410cb4081e89dc3eb59cc3fbda59b68ae7844275 | 2a67dc681af4c4b9ef7a8e18c2ff75377dc5b44f | /aws.autoscaling.Group.mixed-instances-policy-python/__main__.py | 2c3d5382d28b5d51dfca25f592d0fd7afc8785c2 | [] | no_license | ehubbard/templates-aws | e323b693a18234defe6bd56ffcc64095dc58e3a1 | 2ae2e7a5d05490078017fed6d132dcdde1f21c63 | refs/heads/master | 2022-11-17T13:53:14.531872 | 2020-07-10T21:56:27 | 2020-07-10T21:56:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 871 | py | import pulumi
import pulumi_aws as aws
example_launch_template = aws.ec2.LaunchTemplate("exampleLaunchTemplate",
image_id=data["aws_ami"]["example"]["id"],
instance_type="c5.large",
name_prefix="example")
example_group = aws.autoscaling.Group("exampleGroup",
availability_zones=["us-east-1a"],
desired_capacity=1,
max_size=1,
min_size=1,
mixed_instances_policy={
"launch_template": {
"launchTemplateSpecification": {
"launchTemplateId": example_launch_template.id,
},
"override": [
{
"instance_type": "c4.large",
"weightedCapacity": "3",
},
{
"instance_type": "c3.large",
"weightedCapacity": "2",
},
],
},
})
| [
"jvp@justinvp.com"
] | jvp@justinvp.com |
ab019ff8f44d3ba691aebdc11c86e282acb76fe4 | 1fc45a47f0e540941c87b04616f3b4019da9f9a0 | /src/sentry/api/endpoints/project_release_commits.py | ee4a0f82aea772d83f6c1a4e66e53739b4b45f9a | [
"BSD-2-Clause"
] | permissive | seukjung/sentry-8.15.0 | febc11864a74a68ddb97b146cc1d2438ef019241 | fd3cab65c64fcbc32817885fa44df65534844793 | refs/heads/master | 2022-10-28T06:39:17.063333 | 2018-01-17T12:31:55 | 2018-01-17T12:31:55 | 117,833,103 | 0 | 0 | BSD-3-Clause | 2022-10-05T18:09:54 | 2018-01-17T12:28:13 | Python | UTF-8 | Python | false | false | 1,641 | py | from __future__ import absolute_import
from sentry.api.base import DocSection
from sentry.api.bases.project import ProjectEndpoint, ProjectReleasePermission
from sentry.api.exceptions import ResourceDoesNotExist
from sentry.api.serializers import serialize
from sentry.models import Release, ReleaseCommit
class ProjectReleaseCommitsEndpoint(ProjectEndpoint):
doc_section = DocSection.RELEASES
permission_classes = (ProjectReleasePermission,)
def get(self, request, project, version):
"""
List a Project Release's Commits
````````````````````````````````
Retrieve a list of commits for a given release.
:pparam string organization_slug: the slug of the organization the
release belongs to.
:pparam string project_slug: the slug of the project to list the
release files of.
:pparam string version: the version identifier of the release.
:auth: required
"""
try:
release = Release.objects.get(
organization_id=project.organization_id,
projects=project,
version=version,
)
except Release.DoesNotExist:
raise ResourceDoesNotExist
queryset = ReleaseCommit.objects.filter(
release=release,
).select_related('commit', 'commit__author')
return self.paginate(
request=request,
queryset=queryset,
order_by='order',
on_results=lambda x: serialize([rc.commit for rc in x], request.user),
)
| [
"jeyce@github.com"
] | jeyce@github.com |
8b110ee3b6013db04f1448091c8518136433b53e | 7d800b5d51e47bf59ef5788bd1592d9c306d14c3 | /orders/migrations/0023_order.py | acac20f5ce2afce9ee8126e02bb1517ca7612693 | [] | no_license | paulitstep/cafe-website | 2e28e5a218f58c491cd65c6dc2deee22158765e2 | 29d077b1924871941ef95d5715412ae64ff4e892 | refs/heads/master | 2020-04-19T04:06:02.902468 | 2019-02-11T11:08:54 | 2019-02-11T11:08:54 | 167,953,512 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,231 | py | # Generated by Django 2.1.1 on 2018-11-13 20:43
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('cart', '0015_cart_cartitem'),
('orders', '0022_auto_20181113_2242'),
]
operations = [
migrations.CreateModel(
name='Order',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('order_id', models.CharField(default='ABC', max_length=120, unique=True, verbose_name='ID заказа')),
('status', models.CharField(choices=[('Started', 'Started'), ('Finished', 'Finished')], default='Started', max_length=120, verbose_name='Статус заказа')),
('price_total', models.DecimalField(decimal_places=2, default=0.0, max_digits=6, verbose_name='Итоговая сумма')),
('cart', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='cart.Cart', verbose_name='Корзина')),
('order_info', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='orders.OrderInfo')),
],
),
]
| [
"pasha-mo1@rambler.ru"
] | pasha-mo1@rambler.ru |
4590727dcc68468074628fc79b7672369506aef3 | 4f5675d7d1cdb9dfb42cabcb9b154f08ed4f92e6 | /sensor/manage4.py | 19f8fd405b6dd84beea27a75322d39a9f510f808 | [] | no_license | block1b/twisted_rpc | 674934a85313761fabc48d8529f326a7f6958a29 | 5fa8f0f8ce07f99a280c4f8d81362532a443440a | refs/heads/master | 2020-03-19T18:07:02.738900 | 2018-06-10T06:54:16 | 2018-06-10T06:54:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,627 | py | # coding=utf-8
from init_pro import ConnectionFactory, ConnectionProtocol, Connector
import sys
from twisted.internet import reactor
from twisted.python import log
import random
import json
import datetime
log.startLogging(sys.stdout)
class CreateConnection(object):
"""
创建主动的长连接
"""
def __init__(self, host, port):
self.long_connection = ConnectionFactory('ConnectionPlatform', ConnectionProtocol)
self.long_connection.onlineProtocol = Connector
self.host = host
self.port = port
def create_long_connection(self):
"""建立长连接"""
if not len(Connector.get_online_protocol('ConnectionPlatform')):
print u"未连接........................"
reactor.connectTCP(self.host, self.port, self.long_connection)
print u"正在重连........................"
else:
Connector.get_online_protocol('ConnectionPlatform')[0].transport.write(json.dumps(self.pack_data()))
print u"已发送采集的到的数据....................."
reactor.callLater(1, self.create_long_connection) # 一直尝试在连接
@staticmethod
def pack_data():
info = dict()
info["id"] = '4'
info["entry_time"] = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
info["entry_data"] = random.uniform(-10, 50)
info["info"] = "null"
info['type'] = 'temp'
info['name'] = 'jian'
return info
create_connection = CreateConnection('127.0.0.1', 5002)
create_connection.create_long_connection()
reactor.run()
| [
"1115064450@qq.com"
] | 1115064450@qq.com |
971f11bd2b61abcdffd848b6073412f022007d3a | 2e682fd72e3feaa70e3f7bf2a3b83c50d783ec02 | /PyTorch/built-in/cv/detection/SSD_for_PyTorch/configs/retinanet/retinanet_r101_caffe_fpn_mstrain_3x_coco.py | 21352a07ca8f6642d491dc9a5b683b2b4483d2d5 | [
"Apache-2.0",
"GPL-1.0-or-later",
"BSD-2-Clause",
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | Ascend/ModelZoo-PyTorch | 4c89414b9e2582cef9926d4670108a090c839d2d | 92acc188d3a0f634de58463b6676e70df83ef808 | refs/heads/master | 2023-07-19T12:40:00.512853 | 2023-07-17T02:48:18 | 2023-07-17T02:48:18 | 483,502,469 | 23 | 6 | Apache-2.0 | 2022-10-15T09:29:12 | 2022-04-20T04:11:18 | Python | UTF-8 | Python | false | false | 853 | py | # Copyright 2022 Huawei Technologies Co., Ltd.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
_base_ = './retinanet_r50_caffe_fpn_mstrain_1x_coco.py'
# learning policy
model = dict(
pretrained='open-mmlab://detectron2/resnet101_caffe',
backbone=dict(depth=101))
lr_config = dict(step=[28, 34])
runner = dict(type='EpochBasedRunner', max_epochs=36)
| [
"chenyong84@huawei.com"
] | chenyong84@huawei.com |
40af72dc5ee87cc11f60cf3e10c57cc8617d2fbf | 8d2e5b5ea408579faa699c09bdbea39e864cdee1 | /ufora/util/TypeAwareComparison.py | ec50d2969a9792de4b1a5196ed0ac3933d706cc9 | [
"dtoa",
"MIT",
"BSD-3-Clause",
"BSL-1.0",
"Apache-2.0",
"LicenseRef-scancode-public-domain",
"CC0-1.0"
] | permissive | iantuioti/ufora | 2218ef4c7e33c171268ce11458e9335be7421943 | 04db96ab049b8499d6d6526445f4f9857f1b6c7e | refs/heads/master | 2021-01-17T17:08:39.228987 | 2017-01-30T16:00:45 | 2017-01-30T16:00:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 965 | py | # Copyright 2015 Ufora Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
def typecmp(self, other, ownTypeComp):
'''
Compares objects of varying types. If they are different types it returns the lexical
comparison of their type string. Otherwise it uses the provided type comparison callable
'''
if self.__class__ != other.__class__:
return cmp(self.__class__, other.__class__)
return ownTypeComp(self, other)
| [
"braxton.mckee@gmail.com"
] | braxton.mckee@gmail.com |
11723a0259f19eee03f62469fb9728c3ae122d34 | ef02d3f3c5dbb2f1bf1b5a8b419d44efc9eb9cf1 | /src/scraping/migrations/0006_auto_20210227_1647.py | 99f1ec19ef18215e8c6e4f0d0c9e3b87564d93d2 | [] | no_license | Kirill67tyar/scraping_service | d0e60c1a07e1455b007b80908a0145ac26c38ba4 | e1c8ed8275d20e104e912e48bbc3d2b3a4e889ff | refs/heads/master | 2023-03-23T00:49:11.641175 | 2021-03-05T14:49:57 | 2021-03-05T14:49:57 | 335,295,599 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 467 | py | # Generated by Django 3.1.6 on 2021-02-27 13:47
from django.db import migrations
import jsonfield.fields
import scraping.models
class Migration(migrations.Migration):
dependencies = [
('scraping', '0005_auto_20210224_1842'),
]
operations = [
migrations.AlterField(
model_name='error',
name='data',
field=jsonfield.fields.JSONField(default=scraping.models.get_default_data_errors),
),
]
| [
"50547951+Kirill67tyar@users.noreply.github.com"
] | 50547951+Kirill67tyar@users.noreply.github.com |
bbd30238c51b35c915d81fddbe1772cad0af452e | 97bd006a2a9885f1733bead1fcb6cb59b7779c43 | /experiments/naive_bayes/rbf_if/parameterization.py | 04084965224f5451e61ed1fff97b8b3fa8453c45 | [] | no_license | foxriver76/master-thesis-rslvq | d3524176d05e553b7cca5a37f580ef2cf9efc805 | 00a6c0b4bc3289fe30ead7d7c4e1ae41984dcf8b | refs/heads/master | 2022-03-07T03:55:19.244161 | 2018-10-15T08:28:41 | 2018-10-15T08:28:41 | 139,425,955 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,419 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Aug 14 08:34:18 2018
@author: moritz
"""
from sklearn.model_selection import GridSearchCV
from rslvq_stream import RSLVQ
import json
from skmultiflow.data.random_rbf_generator_drift import RandomRBFGeneratorDrift
"""Subset of 30k"""
stream = RandomRBFGeneratorDrift(change_speed=0.001)
stream.prepare_for_use()
X, y = stream.next_sample(batch_size=30000)
clf = RSLVQ()
"""Specify possible params"""
ppt_range = [1, 2, 4, 8, 10, 12, 20]
sigma_range = [1.0, 2.0, 3.0, 5.0]
param_grid = [{'sigma': sigma_range,
'gradient_descent': ['SGD'],
'prototypes_per_class': ppt_range}]
gs = GridSearchCV(estimator=clf,
param_grid=param_grid,
scoring='accuracy',
cv=10,
n_jobs=-1)
gs = gs.fit(X, y)
"""Print best params"""
print(gs.best_score_)
print(gs.best_params_)
"""Test classifier"""
clf = gs.best_estimator_
clf.fit(X, y)
print('Korrektklassifizierungsraten: \
%.3f' % clf.score(X, y))
accuracy = clf.score(X, y)
"""Write results to File"""
file = open('../../param_search_results.txt', 'a+')
file.write(50 * '-')
file.write('\nAGRAWAL - RSLVQ SGD\n')
file.write('\nBest score: %.5f ' % (gs.best_score_))
file.write('\nBest param: %s' % (json.dumps(gs.best_params_)))
file.write('\nTest Accuracy: %.5f \n\n' % (accuracy))
file.close() | [
"moritz.heusinger@gmail.com"
] | moritz.heusinger@gmail.com |
ca20ef76e3f12077d99b30818c4300e8d0ed1e2b | e6f02bafae8842cae7b45efc2d4719c1a931f68d | /python/scripts/serve_dir_single_file.py | 92b87c034eb909ca4638e839c4e3aca61ae2d15f | [] | no_license | minhhoangcn4/nuxt-dashboard-template | 71d7f54462edb96ddb09667fee9c5a4ea76583ca | 7f15e1c79122ad45e398cc319d716d2a439b8365 | refs/heads/master | 2022-04-18T05:43:56.254971 | 2020-04-19T16:52:32 | 2020-04-19T16:52:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 227 | py |
import os
import build_frontend as bf
here = os.path.dirname(os.path.abspath(__file__))
folder_root = os.path.join(here, '..', '..')
c = bf.ServeSingleFile(folder_root=folder_root,
port=8080)
c.run()
| [
"olivier.borderies@gmail.com"
] | olivier.borderies@gmail.com |
283c78dd3f670c65171d58c2b10825d443747d41 | 71fa0d6b0cf81dcd68fb4b5f43bb9fb7026df170 | /code/lamost/mass_age/paper_plots/write_paper_table.py | d7d649c0c8413ca2e487c363c9867f887718c740 | [
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] | permissive | aceilers/TheCannon | 31354db3df2495cea1e938090079f74f316a5bbf | c140a0c9555bb98956b013d1a9d29eb94ed4c514 | refs/heads/master | 2020-12-25T22:29:57.147937 | 2017-07-18T08:22:46 | 2017-07-18T08:22:46 | 64,823,554 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,707 | py | import pyfits
import numpy as np
from sigfig import round_sig
def ndec(num):
dec = str(num).split('.')[-1]
return len(dec)
def fmt_id(id_val):
split_val = id_val.split('_')
return split_val[0] + "\_" + split_val[1]
inputf = pyfits.open("/Users/annaho/Data/LAMOST/Mass_And_Age/Ho2016b_Catalog.fits")
dat = inputf[1].data
inputf.close()
choose = dat['in_martig_range']
lamost_id = dat['LAMOST_ID'][choose]
lamost_id = np.array([fmt_id(val) for val in lamost_id])
ra = dat['RA'][choose]
dec = dat['Dec'][choose]
teff = dat['Teff'][choose]
logg = dat['logg'][choose]
mh = dat['MH'][choose]
cm = dat['CM'][choose]
nm = dat['NM'][choose]
am = dat['AM'][choose]
ak = dat['Ak'][choose]
mass = dat['Mass'][choose]
logAge = dat['logAge'][choose]
teff = np.array([int(val) for val in teff])
logg = np.array([round_sig(val,3) for val in logg])
mh = np.array([round_sig(val, 3) for val in mh])
cm = np.array([round_sig(val, 3) for val in cm])
nm = np.array([round_sig(val, 3) for val in nm])
am = np.array([round_sig(val, 3) for val in am])
ak = np.array([round_sig(val, 3) for val in ak])
mass = np.array([round_sig(val, 2) for val in mass])
logAge = np.array([round_sig(val, 2) for val in logAge])
teff_err = dat['Teff_err'][choose]
logg_err = dat['logg_err'][choose]
mh_err = dat['MH_err'][choose]
cm_err = dat['CM_err'][choose]
nm_err = dat['NM_err'][choose]
am_err = dat['AM_err'][choose]
ak_err = dat['Ak_err'][choose]
teff_scat = dat['Teff_scatter'][choose]
logg_scat = dat['logg_scatter'][choose]
mh_scat = dat['MH_scatter'][choose]
cm_scat = dat['CM_scatter'][choose]
nm_scat = dat['NM_scatter'][choose]
am_scat = dat['AM_scatter'][choose]
mass_err = dat['Mass_err'][choose]
logAge_err = dat['logAge_err'][choose]
snr = dat['SNR'][choose]
chisq =dat['Red_Chisq'][choose]
content = '''\\begin{tabular}{cccccccccc}
\\tableline\\tableline
LAMOST ID & RA & Dec & \\teff\ & \logg\ & \mh\ & \cm\ & \\nm\ & \\alpham\ & \\ak\ \\\\
& (deg) & (deg) & (K) & (dex) & (dex) & (dex) & (dex) & (dex) & mag \\\\
\\tableline
'''
outputf = open("paper_table.txt", "w")
outputf.write(content)
for i in range(0,4):
outputf.write(
'%s & %s & %s & %s & %s & %s & %s & %s & %s & %s \\\ '
%(lamost_id[i], np.round(ra[i], 2), np.round(dec[i], 2),
teff[i], logg[i], mh[i], cm[i], nm[i], am[i], ak[i]))
#int(teff[i]), round_sig(logg[i], 3), round_sig(mh[i], 3),
#round_sig(cm[i], 3), round_sig(nm[i], 3), round_sig(am[i], 3),
#round_sig(ak[i], 3)))
content = '''\\tableline
\end{tabular}}
\end{table}
\\begin{table}[H]
\caption{
Continued from Table 1: Formal Errors}
{\scriptsize
\\begin{tabular}{cccccccc}
\\tableline\\tableline
LAMOST ID & $\sigma$(\\teff) & $\sigma$(\logg) & $\sigma$(\mh) & $\sigma$(\cm) & $\sigma$(\\nm) & $\sigma$(\\alpham) & $\sigma$(\\ak) \\\\
& (K) & (dex) & (dex) & (dex) & (dex) & (dex) & (mag) \\\\
\\tableline
'''
outputf.write(content)
for i in range(0,4):
outputf.write(
'%s & %s & %s & %s & %s & %s & %s & %s \\\\ '
%(lamost_id[i], int(teff_err[i]),
np.round(logg_err[i], ndec(logg[i])),
np.round(mh_err[i], ndec(mh[i])),
np.round(cm_err[i], ndec(cm[i])),
np.round(nm_err[i], ndec(nm[i])),
np.round(am_err[i], ndec(am[i])),
np.round(ak_err[i], ndec(ak[i]))))
content = '''\\tableline
\end{tabular}}
\end{table}
\\begin{table}[H]
\caption{
Continued from Table 2: Estimated Error (Scatter)}
{\scriptsize
\\begin{tabular}{cccccccccc}
\\tableline\\tableline
LAMOST ID & $s$(\\teff) & $s$(\logg) & $s$(\mh) & $s$(\cm) & $s$(\\nm) & $s$(\\alpham) \\\\
& (K) & (dex) & (dex) & (dex) & (dex) & (dex) \\\\
\\tableline
'''
outputf.write(content)
for i in range(0,4):
outputf.write(
'%s & %s & %s & %s & %s & %s & %s \\\\ '
%(lamost_id[i], int(teff_scat[i]),
np.round(logg_scat[i], ndec(logg[i])),
np.round(mh_scat[i], ndec(mh[i])),
np.round(cm_scat[i], ndec(cm[i])),
np.round(nm_scat[i], ndec(nm[i])),
np.round(am_scat[i], ndec(am[i]))))
content = '''\\tableline
\end{tabular}}
\end{table}
\\begin{table}[H]
\caption{
Continued from Table 3}
{\scriptsize
\\begin{tabular}{ccccccc}
\\tableline\\tableline
LAMOST ID & Mass & log(Age) & $\sigma$(Mass) & $\sigma$(log(Age)) & SNR & Red. \\\\
& ($M_\odot$) & dex & ($M_\odot$) & (dex) & & $\chi^2$ \\\\
\\tableline
'''
outputf.write(content)
for i in range(0,4):
outputf.write(
'%s & %s & %s & %s & %s & %s & %s \\\\ '
%(lamost_id[i], mass[i], logAge[i],
np.round(mass_err[i], ndec(mass_err[i])),
np.round(logAge_err[i], ndec(logAge[i])),
round_sig(snr[i], 3), round_sig(chisq[i], 2)))
content = '''\\tableline
\end{tabular}}
\end{table}
'''
outputf.write(content)
outputf.close()
| [
"annayqho@gmail.com"
] | annayqho@gmail.com |
f5ddb3679e63a10e89433aa31c76875a957f6882 | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /9sN5tvXZjYCsKb4Mx_10.py | 87730daea0cefa4298f4002d9ce77ce7bf826233 | [] | no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 419 | py | """
Create a function that takes the volume of a cube and returns the length of
the cube's main diagonal, rounded to two decimal places.
### Examples
cube_diagonal(8) ➞ 3.46
cube_diagonal(343) ➞ 12.12
cube_diagonal(1157.625) ➞ 18.19
#### Notes
Use the `sqrt` function in the math module.
"""
import math
def cube_diagonal(volume):
return round(math.sqrt(3) * pow(volume,1/3),2)
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.