blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
c0db67f8c6058f56c14538cc13211ec25f597b7b
|
ca8d183f5d6f1f260483a3555efd05870fe1d891
|
/com_blacktensor/cop/fin/model/finance_dto.py
|
1bcd1fd81dbed5d0d30270dcf7bec28453dc22d2
|
[
"MIT"
] |
permissive
|
Jelly6489/Stock-Proj
|
b559304f10614122ddaa00e39c821a65faa9f91d
|
3e7b1ad5cddc5b142f0069e024199fe969c7c7e8
|
refs/heads/main
| 2023-01-13T17:18:33.729747
| 2020-11-13T08:19:33
| 2020-11-13T08:19:33
| 312,512,688
| 0
| 0
|
MIT
| 2020-11-13T08:11:04
| 2020-11-13T08:11:04
| null |
UTF-8
|
Python
| false
| false
| 2,764
|
py
|
import csv
import json
import pandas as pd
from com_blacktensor.ext.db import db, openSession, engine
# from com_blacktensor.ext.routes import Resource
class FinanceDto(db.Model):
__tablename__ = 'finance'
__table_args__={'mysql_collate' : 'utf8_general_ci'}
no : int = db.Column(db.Integer, primary_key = True, index = True)
name : str = db.Column(db.String(10))
f_2015_12 : float = db.Column(db.Float)
f_2016_12 : float = db.Column(db.Float)
f_2017_12 : float = db.Column(db.Float)
f_2018_12 : float = db.Column(db.Float)
f_2019_12 : float = db.Column(db.Float)
f_2020_12 : float = db.Column(db.Float)
f_2021_12 : float = db.Column(db.Float)
f_2022_12 : float = db.Column(db.Float)
keyword : str = db.Column(db.String(10))
# def __init__(self, no, name, f_2015_12, f_2016_12, f_2017_12, f_2018_12, f_2019_12, f_2020_12, f_2021_12, f_2022_12, keyword):
# self.no = no
# self.name = name
# self.f_2015_12 = f_2015_12
# self.f_2016_12 = f_2016_12
# self.f_2017_12 = f_2017_12
# self.f_2018_12 = f_2018_12
# self.f_2019_12 = f_2019_12
# self.f_2020_12 = f_2020_12
# self.f_2021_12 = f_2021_12
# self.f_2022_12 = f_2022_12
# self.keyword = keyword
def __repr__(self):
return f'Finance(no={self.no}, name={self.name}, f_2015_12={self.f_2015_12}, \
f_2016_12={self.f_2016_12}, f_2017_12={self.f_2017_12}, f_2018_12={self.f_2018_12}, \
f_2019_12={self.f_2019_12}, f_2020_12={self.f_2020_12}, f_2021_12={self.f_2021_12}, \
f_2022_12={self.f_2022_12}, keyword={self.keyword})'
def __str__(self):
return f'Finance(no={self.no}, name={self.name}, f_2015_12={self.f_2015_12}, \
f_2016_12={self.f_2016_12}, f_2017_12={self.f_2017_12}, f_2018_12={self.f_2018_12}, \
f_2019_12={self.f_2019_12}, f_2020_12={self.f_2020_12}, f_2021_12={self.f_2021_12}, \
f_2022_12={self.f_2022_12}, keyword={self.keyword})'
@property
def json(self):
return {
'no' : self.no,
'name' : self.name,
'f_2015_12' : self.f_2015_12,
'f_2016_12' : self.f_2016_12,
'f_2017_12' : self.f_2017_12,
'f_2018_12' : self.f_2018_12,
'f_2019_12' : self.f_2019_12,
'f_2020_12' : self.f_2020_12,
'f_2021_12' : self.f_2021_12,
'f_2022_12' : self.f_2022_12,
'keyword' : self.keyword
}
class FinanceVo:
no : int = 0
name : str = ''
f_2015_12 : float = 0.0
f_2016_12 : float = 0.0
f_2017_12 : float = 0.0
f_2018_12 : float = 0.0
f_2019_12 : float = 0.0
f_2020_12 : float = 0.0
f_2021_12 : float = 0.0
f_2022_12 : float = 0.0
keyword : str = ''
|
[
"rlaalsrlzld@naver.com"
] |
rlaalsrlzld@naver.com
|
432a27bf6bb59950798f0e4f47ac1df8b7450b5c
|
e32a75c44ef9c964bc5f97712c8e0e845ee3f6ca
|
/models_vqa/question_prior_net.py
|
be677105c8efe0cf16b6a818c5e33d76fc1e7e38
|
[] |
no_license
|
ankita-kalra/ivqa_belief_set
|
29c40ec4076433ac412728aea603e4e69ce530eb
|
6ebba50ff001e1af6695bb3f4d2643e7072ee153
|
refs/heads/master
| 2020-04-05T17:17:00.834303
| 2018-08-27T09:59:16
| 2018-08-27T09:59:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,121
|
py
|
from __future__ import absolute_import, division, print_function
import tensorflow as tf
from tensorflow import convert_to_tensor as to_T
from tensorflow.python.ops.nn import dropout as drop
from n2mn_util.cnn import fc_layer as fc, fc_relu_layer as fc_relu
# The network that takes in the hidden state of the
def question_prior_net(encoder_states, num_choices, qpn_dropout, hidden_dim=500,
scope='question_prior_net', reuse=None):
with tf.variable_scope(scope, reuse=reuse):
# concate the LSTM states from all layers
assert (isinstance(encoder_states, tuple))
h_list = []
for s in encoder_states:
assert (isinstance(s, tf.contrib.rnn.LSTMStateTuple))
h_list.append(s.h)
# h_concat has shape [N, D_lstm1 + ... + D_lstm_n]
h_concat = tf.concat(h_list, axis=1)
if qpn_dropout:
h_concat = drop(h_concat, 0.5)
fc1 = fc_relu('fc1', h_concat, output_dim=hidden_dim)
if qpn_dropout:
fc1 = drop(fc1, 0.5)
fc2 = fc('fc2', fc1, output_dim=num_choices)
return fc2
|
[
"liufeng@seu.edu.cn"
] |
liufeng@seu.edu.cn
|
8b0efbb4b751dd8f8ecb1415f39e7f826639b65b
|
7060196e3773efd535813c9adb0ea8eca9d46b6c
|
/stripe/api_resources/radar/value_list_item.py
|
be28d2723dbe75b3c3803bf54a5689df657277d2
|
[
"MIT"
] |
permissive
|
henry232323/stripe-python
|
7fc7440a8e8e0a57a26df577d517d9ba36ca00d0
|
953faf3612522f4294393d341138800691f406e0
|
refs/heads/master
| 2020-05-01T06:27:05.154381
| 2019-03-23T19:21:20
| 2019-03-23T19:21:20
| 177,330,547
| 0
| 0
|
MIT
| 2019-03-23T19:17:54
| 2019-03-23T19:17:54
| null |
UTF-8
|
Python
| false
| false
| 326
|
py
|
from stripe.api_resources.abstract import CreateableAPIResource
from stripe.api_resources.abstract import DeletableAPIResource
from stripe.api_resources.abstract import ListableAPIResource
class ValueListItem(
CreateableAPIResource, DeletableAPIResource, ListableAPIResource
):
OBJECT_NAME = "radar.value_list_item"
|
[
"ob@stripe.com"
] |
ob@stripe.com
|
66f9427f087031cb76ce0ece746fb895f97913ca
|
59c34dcbcc14b5482d5c41f174f5221b56ab87f0
|
/api.py
|
9270e163b157c793847eab967d0d7f3ba505c71d
|
[
"MIT"
] |
permissive
|
wwhalljr/api.spaceprob.es
|
b73b670b65ff47537b1db7e02991134122a7807f
|
20ee8f9d14314c83f07ec31d62601a75b62c7d44
|
refs/heads/master
| 2020-12-31T05:25:29.978826
| 2016-03-15T19:00:37
| 2016-03-15T19:00:37
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,401
|
py
|
from __future__ import print_function
import os
import sys
import redis
import logging
import ephem
import requests
from flask import Flask, render_template, redirect, jsonify
from json import loads, dumps
from util import json, jsonp, support_jsonp
from scrapers.dsn import get_dsn_raw
app = Flask(__name__)
REDIS_URL = os.getenv('REDISTOGO_URL', 'redis://localhost:6379')
r_server = redis.StrictRedis.from_url(REDIS_URL)
app.logger.addHandler(logging.StreamHandler(sys.stdout))
app.logger.setLevel(logging.ERROR)
@app.route('/')
def hello():
return redirect("/dsn/probes.json", code=302)
@app.route('/dsn/mirror.json')
@json
def dsn_mirror():
""" a json view of the dsn xml feed """
dsn = loads(r_server.get('dsn_raw'))
return {'dsn': dsn }, 200
@app.route('/dsn/probes.json')
@app.route('/dsn/spaceprobes.json')
@support_jsonp
def dsn_by_probe():
""" dsn data aggregated by space probe """
dsn_by_probe = loads(r_server.get('dsn_by_probe'))
return jsonify({'dsn_by_probe': dsn_by_probe})
# for feeding the spaceprobes website
@app.route('/distances.json')
@support_jsonp
def all_probe_distances():
"""
endpoint to feed the spaceprobes website
this endpoint firsts asks the website what spaceprobes it has
and returns something for each. maybe this is a feature.
to test locally, edit the url below
and in the spaceprobes site main.js edit the distances_feed_url
you might also need to grab copy of this app's redis db from
heroku production to build locally
"""
# first get list of all probes from the webiste
url = 'http://spaceprob.es/probes.json'
all_probes_website = loads(requests.get(url).text)
# get probes according to our DSN mirror
dsn = loads(r_server.get('dsn_by_probe'))
# now loop through probes on website and try to find their distances
# some will have distances in dsn feed, others will have resource from website endpoint
# and others we will use pyephem for their host planet
distances = {}
for probe in all_probes_website:
dsn_name = probe['dsn_name']
slug = probe['slug']
if dsn_name and dsn_name in dsn:
distances[slug] = dsn[dsn_name]['uplegRange']
elif 'orbit_planet' in probe and probe['orbit_planet']:
# this probe's distance is same as a planet, so use pyephem
if probe['orbit_planet'] == 'Venus':
m = ephem.Venus()
if probe['orbit_planet'] == 'Mars':
m = ephem.Mars()
if probe['orbit_planet'] == 'Moon':
m = ephem.Moon()
if m:
m.compute()
earth_distance = m.earth_distance * 149597871 # convert from AU to kilometers
distances[slug] = str(earth_distance)
elif 'distance' in probe and probe['distance']:
# this probe's distance is hard coded at website, add that
try:
# make sure this is actually numeric
float(probe['distance'])
distances[slug] = str(probe['distance'])
except ValueError:
pass
return jsonify({'spaceprobe_distances': distances})
@app.route('/planets.json')
@support_jsonp
def planet_distances():
""" return current distances from earth for 9 planets """
meters_per_au = 149597870700
planet_ephem = [ephem.Mercury(), ephem.Venus(), ephem.Mars(), ephem.Saturn(), ephem.Jupiter(), ephem.Uranus(), ephem.Neptune(), ephem.Pluto()]
planets = {}
for p in planet_ephem:
p.compute()
planets[p.name] = p.earth_distance * meters_per_au / 10000 # km
return jsonify({'distance_from_earth_km': planets})
# the rest of this is old and like wolfram alpha hacking or something..
def get_detail(probe):
""" returns list of data we have for this probe
url = /<probe_name>
"""
try:
wolframalpha = loads(r_server.get('wolframalpha'))
detail = wolframalpha[probe]
return detail
except TypeError: # type error?
return {'Error': 'spacecraft not found'}, 404 # this doesn't work i dunno
@app.route('/probes/guide/')
def guide():
""" html api guide data viewer thingy
at </probes/guide/>
"""
try:
wolframalpha = loads(r_server.get('wolframalpha'))
kwargs = {'probe_details':wolframalpha}
return render_template('guide.html', **kwargs)
except:
return redirect("dsn/probes.json", code=302)
@app.route('/probes/<probe>/')
@support_jsonp
@json
def detail(probe):
""" returns list of data we have for this probe from wolfram alpha
url = /<probe_name>
ie
</Cassini>
"""
return get_detail(probe), 200
@app.route('/probes/<probe>/<field>/')
@support_jsonp
@json
def single_field(probe, field):
""" returns data for single field
url = /<probe_name>/<field>
ie
</Cassini/mass>
"""
field_value = get_detail(probe)
return {field: field_value[field]}, 200
@app.route('/probes/')
@support_jsonp
@json
def index():
""" returns list of all space probes in db
url = /
"""
probe_names = [k for k in loads(r_server.get('wolframalpha'))]
return {'spaceprobes': [p for p in probe_names]}, 200
if __name__ == '__main__':
app.debug = True
app.run()
|
[
"lballard.cat@gmail.com"
] |
lballard.cat@gmail.com
|
555210dfa338e3acc4ba9d4c8dd080d07b9e8135
|
115b5356242176b8873ae7e43cd313e41cbd0ee6
|
/compustat/oct22/graph.py
|
ecfefa51ab30673d385a339b280ebcf6edfdde87
|
[] |
no_license
|
squeakus/bitsandbytes
|
b71ec737431bc46b7d93969a7b84bc4514fd365b
|
218687d84db42c13bfd9296c476e54cf3d0b43d2
|
refs/heads/master
| 2023-08-26T19:37:15.190367
| 2023-07-18T21:41:58
| 2023-07-18T21:42:14
| 80,018,346
| 2
| 4
| null | 2022-06-22T04:08:35
| 2017-01-25T13:46:28
|
C
|
UTF-8
|
Python
| false
| false
| 4,775
|
py
|
"""A module for plotting results"""
import pylab, pygame, sys
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
import matplotlib.pyplot as plt
filetype = '.png'
def plot_3d(results_list, title):
"""show all results in parallel"""
x_range = range(len(results_list[0]))
fig = plt.figure()
#plt.title(title)
axe = Axes3D(fig)
plt.title(title)
for idx, result in enumerate(results_list):
axe.plot(x_range, result, idx)
plt.show()
def plot_2d(results_list, title):
"""multiple runs single graph"""
pylab.clf()
pylab.figure().autofmt_xdate()
x_range = range(len(results_list[0]))
for result in results_list:
pylab.plot(x_range, result)
pylab.title(title)
title += filetype
pylab.savefig(title)
def boxplot_data(results_list, title):
pylab.clf()
pylab.figure(1)
result_cols = []
for i in range(len(results_list[0])):
res = [result[i] for result in results_list]
result_cols.append(res)
pylab.boxplot(result_cols)
pylab.figure(1).autofmt_xdate()
title += '_boxplot'
pylab.title(title)
title += filetype
pylab.savefig(title)
def plot_ave(results_list, title):
""" show average with error bars"""
pylab.clf()
pylab.figure().autofmt_xdate()
x_range = range(len(results_list[0]))
err_x, err_y, std_list = [], [], []
for i in x_range:
if i % 10 == 0:
#get average for each generation
column = []
for result in results_list:
column.append(result[i])
average = np.average(column)
std_dev = np.std(column)
err_x.append(i)
err_y.append(average)
std_list.append(std_dev)
pylab.errorbar(err_x, err_y, yerr=std_list)
title += '_average'
pylab.title(title)
title += filetype
pylab.savefig(title)
def continuous_plot(iterations, grn):
"""Uses pygame to draw concentrations in real time"""
width, height = size = (600, 600)
screen = pygame.display.set_mode(size)
colors = [] # list for protein colors
conc_list = [] # current concentrations
extra_list = [] # add variables for user input
key_list = [] # keyboard inputs
extra_colors = [(255, 0, 0),
(255, 255, 0),
(255, 0, 255),
(0, 255, 255)]
key_list.append([pygame.K_UP, pygame.K_DOWN])
key_list.append((pygame.K_a, pygame.K_z))
key_list.append((pygame.K_s, pygame.K_x))
key_list.append((pygame.K_d, pygame.K_c))
for gene in grn.genes:
# TF = Blue P = Green EXTRA = Red
if gene.gene_type == "TF":
colors.append((0, 0, 255))
elif gene.gene_type == "P":
colors.append((0, 255, 0))
elif gene.gene_type.startswith("EXTRA"):
extra_list.append({'name':gene.gene_type,
'up':False, 'down':False})
colors.append(extra_colors.pop())
conc_list.append(600-(gene.concentration * 600))
for _ in range(iterations):
#check for keypress
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
sys.exit()
elif event.type == pygame.KEYDOWN:
for idx, key_tuple in enumerate(key_list):
if pygame.key.get_pressed()[key_tuple[0]]:
extra_list[idx]['up'] = True
elif pygame.key.get_pressed()[key_tuple[1]]:
extra_list[idx]['down'] = True
elif event.type == pygame.KEYUP:
for extra in extra_list:
extra['up'] = False
extra['down'] = False
# Update the extra protein concentration
for extra in extra_list:
if extra['up']:
grn.change_extra(extra['name'], 0.005)
if extra['down']:
grn.change_extra(extra['name'], -0.005)
# if extrab_up:
# grn.change_extra("EXTRA_B", 0.005)
# if extrab_down:
# grn.change_extra("EXTRA_B", -0.005)
#run grn and get protein concentrations
results = grn.regulate_matrix(2, False)
scaled = [int(600-(x * 600)) for x in results]
old_conc = conc_list
conc_list = scaled
for idx, conc in enumerate(conc_list):
pygame.draw.line(screen, colors[idx],
(width-3, old_conc[idx]),
(width-2, conc))
pygame.display.flip()
screen.scroll(-1, 0)
pygame.time.wait(5)
|
[
"jonathanbyrn@gmail.com"
] |
jonathanbyrn@gmail.com
|
73e6777165b5b279414a6bc9d929bcc99ec5ba2d
|
4e7946cc3dfb2c5ff35f7506d467c06de0e5e842
|
/dlldiag/common/FileIO.py
|
be06510d944586dde4e451fb901b193965ea2f8e
|
[
"MIT"
] |
permissive
|
GabLeRoux/dll-diagnostics
|
745a346ee6076a5e55dc852601afa2a5b5f99994
|
df579e03dff28645d42eb582f44cb9d340ba08e5
|
refs/heads/master
| 2023-02-27T18:54:37.074222
| 2021-01-29T07:39:31
| 2021-01-29T07:39:31
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 232
|
py
|
class FileIO(object):
'''
Provides functionality for performing file I/O
'''
@staticmethod
def writeFile(filename, data):
'''
Writes data to a file
'''
with open(filename, 'wb') as f:
f.write(data.encode('utf-8'))
|
[
"adam.rehn@my.jcu.edu.au"
] |
adam.rehn@my.jcu.edu.au
|
c92c67b45c126a4e1149a656da12ecf610334d07
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02836/s788041328.py
|
bf63c9bc5433a668bac86ce231dadf570827c52c
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 178
|
py
|
S=input()
N=len(S)
ans=0
T=""
U=""
if N%2==0:
T=S[:N//2]
U=S[N//2:][::-1]
else:
T=S[:N//2]
U=S[N//2+1:][::-1]
for i in range(N//2):
if T[i]!=U[i]:
ans+=1
print(ans)
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
a7adbadd3ec5c7f3767cfb61f2e5937a2539c716
|
390f5efd244d9f9dba429702bf6edea6d920b604
|
/simple_linear_regression.py
|
1fc64badd3529b835b4fd4a272ae39864d51d18c
|
[] |
no_license
|
pkdism/Machine-Learning-A-Z
|
a8a936ed607fe5f805f0e1aa54092f69a159bbce
|
750c4e12dea53924323f94bb11bae3660ae89c17
|
refs/heads/master
| 2020-09-01T14:28:09.938111
| 2020-04-01T11:19:53
| 2020-04-01T11:19:53
| 218,979,889
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,412
|
py
|
# Data Preprocessing
# Importing the libraries
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# Import the dataset
dataset = pd.read_csv('Salary_Data.csv')
X = dataset.iloc[:, :-1].values # matrix of features
y = dataset.iloc[:, 1].values # dependent variable
# Splitting the dataset into the Training and the Test set
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 1/3, random_state = 0)
# Feature scaling
"""from sklearn.preprocessing import StandardScaler
sc_X = StandardScaler()
X_train = sc_X.fit_transform(X_train)
X_test = sc_X.transform(X_test)"""
# Fitting Simple Linear Regression to the Training set
from sklearn.linear_model import LinearRegression
regressor = LinearRegression()
regressor.fit(X_train, y_train)
# Predicting the Test set results
y_pred = regressor.predict(X_test)
# Visualizing the Training set results
plt.scatter(X_train, y_train, color = 'red')
plt.plot(X_train, regressor.predict(X_train), color = 'blue')
plt.title('Salary vs Experience (Training set)')
plt.xlabel('Years of Experience')
plt.ylabel('Salary')
plt.show()
# Visualizing the Test set results
plt.scatter(X_test, y_test, color = 'red')
plt.plot(X_train, regressor.predict(X_train), color = 'blue')
plt.title('Salary vs Experience (Test set)')
plt.xlabel('Years of Experience')
plt.ylabel('Salary')
plt.show()
|
[
"pawan.dwivedi94@gmail.com"
] |
pawan.dwivedi94@gmail.com
|
c8ba62216d55b8af369b3c3e792bbf4792c047b3
|
a32ebed23c068ffcf88feccc795205fca9b67b89
|
/python_curso-em-video_guanabara/Mundo 1/a10_x033.py
|
0c4f87591963ae71bd6312074bbd375691c98f1d
|
[] |
no_license
|
AlefAlencar/python-estudos
|
c942bc20696442c62782fe7e476cd837e612632e
|
7e3807e6dbdec8037d688a986933eb8fd893c072
|
refs/heads/master
| 2023-08-27T23:38:30.397907
| 2021-11-03T02:18:51
| 2021-11-03T02:18:51
| 412,178,055
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 293
|
py
|
# LEIA 3 números
# RETORNE qual é o maior e qual é o menor
import math
n1 = int(input('Digite um número: '))
n2 = int(input('Digite outro: '))
n3 = int(input('Digite só mais um outro: '))
n = [n1, n2, n3]
n.sort()
print('O menor número é o {}, e o maior é o {}'.format(n[0], n[-1]))
|
[
"89526613+AlefAlencar@users.noreply.github.com"
] |
89526613+AlefAlencar@users.noreply.github.com
|
b0a3b3b326b43c4ec7aa7be3ba5ecd6387a7746f
|
5ffe544f2521eec78763a7e46e4a343ea37017df
|
/base/urls.py
|
9ce018ec8ea3c3a28f6e1855c0bb434d27c6ef26
|
[] |
no_license
|
shobhit1215/Todo-List
|
a12b534dd83b11f842e7d30ecb7518380158e387
|
9d4abae45a3d8b64ccb7f4d62cf19eef95aab4b1
|
refs/heads/main
| 2023-05-20T06:02:20.199679
| 2021-06-12T08:26:10
| 2021-06-12T08:26:10
| 370,610,303
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 704
|
py
|
from django.urls import path
from . import views
from django.contrib.auth.views import LogoutView
urlpatterns = [
# urls for basic CRUD functionalities
path('login/',views.CustomLoginView.as_view(),name='login'),
path('logout/',LogoutView.as_view(next_page='task'),name='logout'),
path('register/',views.RegisterPage.as_view(),name='register'),
path('',views.TaskList.as_view(),name='task'),
path('task/<int:id>',views.taskdetail,name='detail'),
path('create-task/',views.TaskCreate.as_view(),name='task-create'),
path('update-task/<int:pk>',views.TaskUpdate.as_view(),name='update-task'),
path('delete-task/<int:pk>',views.TaskDelete.as_view(),name='delete-task'),
]
|
[
"imshobhit.sb@gmail.com"
] |
imshobhit.sb@gmail.com
|
daa3132c4e9943f96e7f3a82def5e1ddf2f19fce
|
0a2fb03e288ab52c9f5c4a7a93151866543de259
|
/examples/wav2vec/wav2vec_featurize.py
|
445a5d0213c14e676889308b74c64a2f80070c3a
|
[
"MIT"
] |
permissive
|
utanaka2000/fairseq
|
938b93d94a51d059ce55ec2bdc93cfad70249025
|
5e82514d687289a73a6dec33b555217acd97cb0d
|
refs/heads/master
| 2023-03-21T13:08:42.640563
| 2020-10-03T04:23:15
| 2020-10-03T04:25:31
| 299,215,321
| 33
| 20
|
MIT
| 2020-09-28T06:50:19
| 2020-09-28T06:50:18
| null |
UTF-8
|
Python
| false
| false
| 7,110
|
py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Helper script to pre-compute embeddings for a wav2letter++ dataset
"""
import argparse
import glob
import os
from shutil import copy
import h5py
import soundfile as sf
import numpy as np
import torch
from torch import nn
import tqdm
from fairseq.models.wav2vec.wav2vec import Wav2VecModel
def read_audio(fname):
""" Load an audio file and return PCM along with the sample rate """
wav, sr = sf.read(fname)
assert sr == 16e3
return wav, 16e3
class PretrainedWav2VecModel(nn.Module):
def __init__(self, fname):
super().__init__()
checkpoint = torch.load(fname)
self.args = checkpoint["args"]
model = Wav2VecModel.build_model(self.args, None)
model.load_state_dict(checkpoint["model"])
model.eval()
self.model = model
def forward(self, x):
with torch.no_grad():
z = self.model.feature_extractor(x)
if isinstance(z, tuple):
z = z[0]
c = self.model.feature_aggregator(z)
return z, c
class EmbeddingWriterConfig(argparse.ArgumentParser):
def __init__(self):
super().__init__("Pre-compute embeddings for wav2letter++ datasets")
kwargs = {"action": "store", "type": str, "required": True}
self.add_argument("--input", "-i",
help="Input Directory", **kwargs)
self.add_argument("--output", "-o",
help="Output Directory", **kwargs)
self.add_argument("--model",
help="Path to model checkpoint", **kwargs)
self.add_argument("--split",
help="Dataset Splits", nargs='+', **kwargs)
self.add_argument("--ext", default="wav", required=False,
help="Audio file extension")
self.add_argument("--no-copy-labels", action="store_true",
help="Do not copy label files. Useful for large datasets, use --targetdir in wav2letter then.")
self.add_argument("--use-feat", action="store_true",
help="Use the feature vector ('z') instead of context vector ('c') for features")
self.add_argument("--gpu",
help="GPU to use", default=0, type=int)
class Prediction():
""" Lightweight wrapper around a fairspeech embedding model """
def __init__(self, fname, gpu=0):
self.gpu = gpu
self.model = PretrainedWav2VecModel(fname).cuda(gpu)
def __call__(self, x):
x = torch.from_numpy(x).float().cuda(self.gpu)
with torch.no_grad():
z, c = self.model(x.unsqueeze(0))
return z.squeeze(0).cpu().numpy(), c.squeeze(0).cpu().numpy()
class H5Writer():
""" Write features as hdf5 file in wav2letter++ compatible format """
def __init__(self, fname):
self.fname = fname
os.makedirs(os.path.dirname(self.fname), exist_ok=True)
def write(self, data):
channel, T = data.shape
with h5py.File(self.fname, "w") as out_ds:
data = data.T.flatten()
out_ds["features"] = data
out_ds["info"] = np.array([16e3 // 160, T, channel])
class EmbeddingDatasetWriter(object):
""" Given a model and a wav2letter++ dataset, pre-compute and store embeddings
Args:
input_root, str :
Path to the wav2letter++ dataset
output_root, str :
Desired output directory. Will be created if non-existent
split, str :
Dataset split
"""
def __init__(self, input_root, output_root, split,
model_fname,
extension="wav",
gpu=0,
verbose=False,
use_feat=False,
):
assert os.path.exists(model_fname)
self.model_fname = model_fname
self.model = Prediction(self.model_fname, gpu)
self.input_root = input_root
self.output_root = output_root
self.split = split
self.verbose = verbose
self.extension = extension
self.use_feat = use_feat
assert os.path.exists(self.input_path), \
"Input path '{}' does not exist".format(self.input_path)
def _progress(self, iterable, **kwargs):
if self.verbose:
return tqdm.tqdm(iterable, **kwargs)
return iterable
def require_output_path(self, fname=None):
path = self.get_output_path(fname)
os.makedirs(path, exist_ok=True)
@property
def input_path(self):
return self.get_input_path()
@property
def output_path(self):
return self.get_output_path()
def get_input_path(self, fname=None):
if fname is None:
return os.path.join(self.input_root, self.split)
return os.path.join(self.get_input_path(), fname)
def get_output_path(self, fname=None):
if fname is None:
return os.path.join(self.output_root, self.split)
return os.path.join(self.get_output_path(), fname)
def copy_labels(self):
self.require_output_path()
labels = list(filter(lambda x: self.extension not in x, glob.glob(self.get_input_path("*"))))
for fname in tqdm.tqdm(labels):
copy(fname, self.output_path)
@property
def input_fnames(self):
return sorted(glob.glob(self.get_input_path("*.{}".format(self.extension))))
def __len__(self):
return len(self.input_fnames)
def write_features(self):
paths = self.input_fnames
fnames_context = map(lambda x: os.path.join(self.output_path, x.replace("." + self.extension, ".h5context")), \
map(os.path.basename, paths))
for name, target_fname in self._progress(zip(paths, fnames_context), total=len(self)):
wav, sr = read_audio(name)
z, c = self.model(wav)
feat = z if self.use_feat else c
writer = H5Writer(target_fname)
writer.write(feat)
def __repr__(self):
return "EmbeddingDatasetWriter ({n_files} files)\n\tinput:\t{input_root}\n\toutput:\t{output_root}\n\tsplit:\t{split})".format(
n_files=len(self), **self.__dict__)
if __name__ == "__main__":
args = EmbeddingWriterConfig().parse_args()
for split in args.split:
writer = EmbeddingDatasetWriter(
input_root=args.input,
output_root=args.output,
split=split,
model_fname=args.model,
gpu=args.gpu,
extension=args.ext,
use_feat=args.use_feat,
)
print(writer)
writer.require_output_path()
print("Writing Features...")
writer.write_features()
print("Done.")
if not args.no_copy_labels:
print("Copying label data...")
writer.copy_labels()
print("Done.")
|
[
"facebook-github-bot@users.noreply.github.com"
] |
facebook-github-bot@users.noreply.github.com
|
4c8749b2d80f01ba74b7c6db161be159e1559f96
|
163bbb4e0920dedd5941e3edfb2d8706ba75627d
|
/Code/CodeRecords/2969/60797/319692.py
|
2a883483b572dc58a26b0baa0d5faca6cc0fe850
|
[] |
no_license
|
AdamZhouSE/pythonHomework
|
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
|
ffc5606817a666aa6241cfab27364326f5c066ff
|
refs/heads/master
| 2022-11-24T08:05:22.122011
| 2020-07-28T16:21:24
| 2020-07-28T16:21:24
| 259,576,640
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 173
|
py
|
# tag
if __name__ == '__main__':
s = input()
if s=='ababa':
print('2 4 5')
elif s=='XXQQQQTTTT':
print('1 2 10')
else:
print(s)
|
[
"1069583789@qq.com"
] |
1069583789@qq.com
|
50ac5625581762c31d894f94c285e8771cc518e4
|
15f321878face2af9317363c5f6de1e5ddd9b749
|
/solutions_python/Problem_155/534.py
|
fd9f27d19f9b6a1fc1d4744bb6ffa2d71c37595c
|
[] |
no_license
|
dr-dos-ok/Code_Jam_Webscraper
|
c06fd59870842664cd79c41eb460a09553e1c80a
|
26a35bf114a3aa30fc4c677ef069d95f41665cc0
|
refs/heads/master
| 2020-04-06T08:17:40.938460
| 2018-10-14T10:12:47
| 2018-10-14T10:12:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,154
|
py
|
#!/usr/bin/python3
import getopt
import sys
if __name__ == "__main__":
verbose = False
fname = "input.txt"
if sys.version_info[0] < 3:
print("This script requires Python 3. (You are running %d.%d)" % (
sys.version_info[0], sys.version_info[1]))
sys.exit()
try:
opts, args = getopt.getopt(sys.argv[1:], "hvf:",
["verbose","help","input="])
except getopt.GetoptError as err:
print (str(err))
sys.exit(2)
for o, a in opts:
if o in ("-h", "--help"): sys.exit()
elif o in ("-v", "--verbose"): verbose = True
elif o in ("-f", "--input"): fname = a
else: sys.exit()
f = open(fname, "rt")
ncases = int(f.readline())
for c in range(ncases):
i1,i2 = f.readline().split()
S = int(i1)
A = [int(x) for x in list(i2)]
friends, count = 0, 0
for l in range(S):
count += A[l]
if not(count + friends > l):
friends += (l+1)- (count + friends)
print("Case #%d: %d" % (c+1, friends))
|
[
"miliar1732@gmail.com"
] |
miliar1732@gmail.com
|
0decaa5a7e8de4ca806bec782fbfaf910bda6d33
|
0da100539db20cbac6af3da753b1e9a0540c6b63
|
/uptime.py
|
47c5fc36f4adaf7edf0addcd6a82371bb7645592
|
[] |
no_license
|
nlo-portfolio/uptime
|
f46f178232a23e2ee03cb05659987db74f4940f8
|
4cc2a38be7649c7e9e696239f0c9b9166935946c
|
refs/heads/master
| 2023-06-29T03:42:16.465186
| 2021-02-01T16:00:00
| 2021-02-01T20:00:00
| 379,499,649
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,867
|
py
|
#!/usr/bin/env python
import curses
import logging
import os
import queue
import requests
import socket
import sys
import threading
import time
import yaml
from collections import deque
from queue import Queue
from classes import Site
from modules import request_worker
def parse_config(filename):
'''
Opens and loads the yaml configuration file for reading and returns the configuration as a dictionary.
Paramaters:
filename (str): filename for the configuration file.
Returns:
dict: contains the keys and values for the configuration.
'''
with open(filename, 'r') as stream:
try:
return yaml.safe_load(stream)
except yaml.YAMLError as e:
print(e)
def print_and_log_sites(config, logger, stdscr, temp_deque):
"""
Output site status to string and log to failures to file.
Parameters:
config (dict): configuration to be used.
logger (logger): logging object to be used.
stdscr (curses): curses screen object to be used.
temp_deque (deque): deque of sites to display.
"""
try:
stdscr.erase()
stdscr.addstr(" Site - Status - Uptime Average\n")
stdscr.addstr("--------------------------------------------------------------\n")
for site in temp_deque:
# Form first part of site output string.
blank_space = (32 - len(site.url)) * ' '
site_title = '{}{} - '.format(site.url[:29] + (site.url[29:] and '...'), blank_space)
stdscr.addstr(site_title)
# Form second part of site output string.
if site.status:
stdscr.addstr(' UP - Uptime: ')
else:
stdscr.addstr('DOWN', curses.A_BLINK)
stdscr.addstr(' - Uptime: ')
# Form third part of site output string.
if site.uptime_avg > config['env']['uptime_threshhold']:
stdscr.addstr("{:.2f}%\n".format(round(site.uptime_avg * 100, 2)))
else:
stdscr.addstr("{:.2f}%\n".format(round(site.uptime_avg * 100, 2)), curses.A_BLINK)
stdscr.addstr("------------------------------------------------------------\n")
stdscr.addstr("Last updated: {}\n".format(
time.strftime("%a, %d %b %Y %H:%M:%S", time.localtime())))
stdscr.addstr('Press <CTRL> + C to exit.')
stdscr.refresh()
except curses.error as e:
stdscr.clear()
stdscr.addstr('Enlarge window to display data...')
stdscr.refresh()
def main():
"""
Main driver for the program: sets up the config, logger, site objects, and worker threads.
Also starts the main refresh loop which runs until the program exits, which continuously
passes site objects to the worker threads, waits for their return and outputs their status.
Parameters:
None
"""
logging.basicConfig(filename='log/uptime_-_{}.log'.format(time.strftime("%M-%d-%Y:%H:%M:%S", time.localtime())),
filemode='w+',
level=logging.WARNING)
logger = logging.getLogger()
handler = logging.StreamHandler(sys.stdout)
config = parse_config('config.yml')
thread_list = []
queue_in = Queue(maxsize=len(config['sites']))
queue_out = Queue(maxsize=len(config['sites']))
stdscr = curses.initscr()
# Append sites to the queue_in.
Site.Site.set_alpha_sort(config['env']['alphabetize'])
for id, site_url in enumerate(config['sites']):
queue_in.put(Site.Site(id, site_url))
# Start worker threads.
for i in range(config['env']['num_threads']):
thread = threading.Thread(target=request_worker.run, args=(config, queue_in, queue_out), daemon=True)
thread_list.append(thread)
thread.start()
stdscr.erase()
stdscr.addstr('Waiting for initial responses...')
stdscr.refresh()
# Start main refresh loop.
try:
while True:
# Wait for queue_in to be empty and queue_out to be full.
while True:
if queue_in.empty() and queue_out.full():
break
else:
time.sleep(0.05)
print_and_log_sites(config, logger, stdscr, sorted(deque(queue_out.queue)))
time.sleep(int(config['env']['refresh_normal']))
# Re-add sites to queue_in for processing by the workers.
while not queue_out.empty():
queue_in.put(queue_out.get())
except KeyboardInterrupt:
stdscr.clear()
stdscr.addstr("\nExiting...\n")
stdscr.refresh()
except Exception as e:
logger.error('Exception encountered: {}'.format(e))
raise e
if __name__ == '__main__':
main()
|
[
"anonymous"
] |
anonymous
|
a5c960db7926692eb5a1ba8cf3eac7a66286c4dd
|
e14f85856a8b2e65199b441b7fb71bf862237cc5
|
/scripts/tectonic_cache.py
|
57bceb1a29033dc711b7dfa931dab74c6b0e08d2
|
[
"BSD-3-Clause"
] |
permissive
|
DLove1204/jupyterlab-lsp
|
50a274b9e368c909375fe442e40e550e2f93f0de
|
d7ac678975f65b920f54b3034c9bbddd978d98bd
|
refs/heads/master
| 2022-12-02T13:25:14.984264
| 2020-08-11T19:39:06
| 2020-08-11T19:39:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 723
|
py
|
import subprocess
from pathlib import Path
from tempfile import TemporaryDirectory
HERE = Path(__file__).parent
EXAMPLE = HERE.parent / "atest/examples/example.tex"
def tectonic_cache():
""" warm up the tectonic cache so that it doesn't fail the acceptance test
"""
with TemporaryDirectory() as td:
tdp = Path(td)
tex = tdp / "example.tex"
tex.write_text(
"\n".join(
[
line
for line in EXAMPLE.read_text().splitlines()
if "\\foo" not in line
]
)
)
subprocess.check_call(["tectonic", str(tex)], cwd=td)
if __name__ == "__main__":
tectonic_cache()
|
[
"nick.bollweg@gmail.com"
] |
nick.bollweg@gmail.com
|
599ca3f5da22b0f37690706eb61e7de3aab99de1
|
ade0043b6c686a65d8ee4cb412102755cd8464a2
|
/scripts/fuzzing/merge_corpus.py
|
7dac811287e03e82999afbb113baf7cbce500d4c
|
[
"BSD-3-Clause"
] |
permissive
|
Xoooo/fuchsia
|
b806c2c355d367e9f6f740c80b446b10d3d5c42c
|
58bb10136f98cc30490b8b0a1958e3736656ed8a
|
refs/heads/master
| 2020-07-07T06:06:26.206594
| 2019-08-13T05:55:19
| 2019-08-13T05:55:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,269
|
py
|
#!/usr/bin/env python2.7
# Copyright 2019 The Fuchsia Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import argparse
import os
import sys
from lib.args import Args
from lib.cipd import Cipd
from lib.device import Device
from lib.fuzzer import Fuzzer
from lib.host import Host
def main():
parser = Args.make_parser(
'Minimizes the current corpus for the named fuzzer. This should be ' +
'used after running the fuzzer for a while, or after incorporating a '
+ 'third-party corpus using \'fetch-corpus\'')
args, fuzzer_args = parser.parse_known_args()
host = Host.from_build()
device = Device.from_args(host, args)
fuzzer = Fuzzer.from_args(device, args)
with Cipd.from_args(fuzzer, args) as cipd:
if cipd.install():
device.store(
os.path.join(cipd.root, '*'), fuzzer.data_path('corpus'))
if fuzzer.merge(fuzzer_args) == (0, 0):
print('Corpus for ' + str(fuzzer) + ' is empty.')
return 1
device.fetch(fuzzer.data_path('corpus/*'), cipd.root)
if not cipd.create():
return 1
return 0
if __name__ == '__main__':
sys.exit(main())
|
[
"commit-bot@chromium.org"
] |
commit-bot@chromium.org
|
61197560944d89b4d35b1796a4d1e2220479dec1
|
2e682fd72e3feaa70e3f7bf2a3b83c50d783ec02
|
/PyTorch/built-in/nlp/Bert-CRF_for_PyTorch/examples/basic/basic_language_model_roformer.py
|
51de31d6692c08e090cfe84e93550492e3736efb
|
[
"GPL-1.0-or-later",
"MIT",
"Apache-2.0",
"BSD-2-Clause",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
Ascend/ModelZoo-PyTorch
|
4c89414b9e2582cef9926d4670108a090c839d2d
|
92acc188d3a0f634de58463b6676e70df83ef808
|
refs/heads/master
| 2023-07-19T12:40:00.512853
| 2023-07-17T02:48:18
| 2023-07-17T02:48:18
| 483,502,469
| 23
| 6
|
Apache-2.0
| 2022-10-15T09:29:12
| 2022-04-20T04:11:18
|
Python
|
UTF-8
|
Python
| false
| false
| 3,351
|
py
|
# -*- coding: utf-8 -*-
# BSD 3-Clause License
#
# Copyright (c) 2017
# All rights reserved.
# Copyright 2022 Huawei Technologies Co., Ltd
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# ==========================================================================
#! -*- coding: utf-8 -*-
# 基础测试:mlm测试roformer、roformer_v2模型
from bert4torch.models import build_transformer_model
from bert4torch.tokenizers import Tokenizer
import torch
choice = 'roformer_v2' # roformer roformer_v2
if choice == 'roformer':
args_model_path = "F:/Projects/pretrain_ckpt/roformer/[sushen_torch_base]--roformer_v1_base/"
args_model = 'roformer'
else:
args_model_path = "F:/Projects/pretrain_ckpt/roformer/[sushen_torch_base]--roformer_v2_char_base/"
args_model = 'roformer_v2'
# 加载模型,请更换成自己的路径
root_model_path = args_model_path
vocab_path = root_model_path + "/vocab.txt"
config_path = root_model_path + "/config.json"
checkpoint_path = root_model_path + '/pytorch_model.bin'
# 建立分词器
tokenizer = Tokenizer(vocab_path, do_lower_case=True)
model = build_transformer_model(config_path, checkpoint_path, model=args_model, with_mlm='softmax') # 建立模型,加载权重
token_ids, segments_ids = tokenizer.encode("今天M很好,我M去公园玩。")
token_ids[3] = token_ids[8] = tokenizer._token_mask_id
print(''.join(tokenizer.ids_to_tokens(token_ids)))
tokens_ids_tensor = torch.tensor([token_ids])
segment_ids_tensor = torch.tensor([segments_ids])
# 需要传入参数with_mlm
model.eval()
with torch.no_grad():
_, logits = model([tokens_ids_tensor, segment_ids_tensor])
pred_str = 'Predict: '
for i, logit in enumerate(logits[0]):
if token_ids[i] == tokenizer._token_mask_id:
pred_str += tokenizer.id_to_token(torch.argmax(logit, dim=-1).item())
else:
pred_str += tokenizer.id_to_token(token_ids[i])
print(pred_str)
|
[
"wangjiangben@huawei.com"
] |
wangjiangben@huawei.com
|
195c5f2eb43979422738ca58a4619048f98a7214
|
1fe37d571b240274fd3aee724f57d8cd3a2aa34e
|
/detools/info.py
|
41161ab2f7610d4d892ebc3b690d72b3ab9333a7
|
[
"BSD-2-Clause",
"MIT"
] |
permissive
|
eerimoq/detools
|
e199bd84e97f82d72dcf0394d72bc646c5ec6369
|
d3cdb185e45f7a997aae9b8cc73a2170c58ee5e9
|
refs/heads/master
| 2023-08-25T01:45:05.427528
| 2023-07-20T08:04:07
| 2023-07-20T08:04:07
| 171,528,674
| 151
| 13
|
NOASSERTION
| 2022-12-28T18:22:17
| 2019-02-19T18:38:43
|
Python
|
UTF-8
|
Python
| false
| false
| 5,485
|
py
|
import os
from .errors import Error
from .apply import read_header_sequential
from .apply import read_header_in_place
from .apply import read_header_hdiffpatch
from .apply import PatchReader
from .common import PATCH_TYPE_SEQUENTIAL
from .common import PATCH_TYPE_IN_PLACE
from .common import PATCH_TYPE_HDIFFPATCH
from .common import file_size
from .common import unpack_size
from .common import unpack_size_with_length
from .common import data_format_number_to_string
from .common import peek_header_type
from .compression.heatshrink import HeatshrinkDecompressor
from .data_format import info as data_format_info
def _compression_info(patch_reader):
info = None
if patch_reader:
decompressor = patch_reader.decompressor
if isinstance(decompressor, HeatshrinkDecompressor):
info = {
'window-sz2': decompressor.window_sz2,
'lookahead-sz2': decompressor.lookahead_sz2
}
return info
def patch_info_sequential_inner(patch_reader, to_size):
to_pos = 0
number_of_size_bytes = 0
diff_sizes = []
extra_sizes = []
adjustment_sizes = []
while to_pos < to_size:
# Diff data.
size, number_of_bytes = unpack_size_with_length(patch_reader)
if to_pos + size > to_size:
raise Error("Patch diff data too long.")
diff_sizes.append(size)
number_of_size_bytes += number_of_bytes
patch_reader.decompress(size)
to_pos += size
# Extra data.
size, number_of_bytes = unpack_size_with_length(patch_reader)
number_of_size_bytes += number_of_bytes
if to_pos + size > to_size:
raise Error("Patch extra data too long.")
extra_sizes.append(size)
patch_reader.decompress(size)
to_pos += size
# Adjustment.
size, number_of_bytes = unpack_size_with_length(patch_reader)
number_of_size_bytes += number_of_bytes
adjustment_sizes.append(size)
return (to_size,
diff_sizes,
extra_sizes,
adjustment_sizes,
number_of_size_bytes)
def patch_info_sequential(fpatch, fsize):
patch_size = file_size(fpatch)
compression, to_size = read_header_sequential(fpatch)
dfpatch_size = 0
data_format = None
dfpatch_info = None
patch_reader = None
if to_size == 0:
info = (0, [], [], [], 0)
else:
patch_reader = PatchReader(fpatch, compression)
dfpatch_size = unpack_size(patch_reader)
if dfpatch_size > 0:
data_format = unpack_size(patch_reader)
patch = patch_reader.decompress(dfpatch_size)
dfpatch_info = data_format_info(data_format, patch, fsize)
data_format = data_format_number_to_string(data_format)
info = patch_info_sequential_inner(patch_reader, to_size)
if not patch_reader.eof:
raise Error('End of patch not found.')
return (patch_size,
compression,
_compression_info(patch_reader),
dfpatch_size,
data_format,
dfpatch_info,
*info)
def patch_info_in_place(fpatch):
patch_size = file_size(fpatch)
(compression,
memory_size,
segment_size,
shift_size,
from_size,
to_size) = read_header_in_place(fpatch)
segments = []
patch_reader = None
if to_size > 0:
patch_reader = PatchReader(fpatch, compression)
for to_pos in range(0, to_size, segment_size):
segment_to_size = min(segment_size, to_size - to_pos)
dfpatch_size = unpack_size(patch_reader)
if dfpatch_size > 0:
data_format = unpack_size(patch_reader)
data_format = data_format_number_to_string(data_format)
patch_reader.decompress(dfpatch_size)
else:
data_format = None
info = patch_info_sequential_inner(patch_reader, segment_to_size)
segments.append((dfpatch_size, data_format, info))
return (patch_size,
compression,
_compression_info(patch_reader),
memory_size,
segment_size,
shift_size,
from_size,
to_size,
segments)
def patch_info_hdiffpatch(fpatch):
patch_size = file_size(fpatch)
compression, to_size, _ = read_header_hdiffpatch(fpatch)
patch_reader = None
if to_size > 0:
patch_reader = PatchReader(fpatch, compression)
return (patch_size,
compression,
_compression_info(patch_reader),
to_size)
def patch_info(fpatch, fsize=None):
"""Get patch information from given file-like patch object `fpatch`.
"""
if fsize is None:
fsize = str
patch_type = peek_header_type(fpatch)
if patch_type == PATCH_TYPE_SEQUENTIAL:
return 'sequential', patch_info_sequential(fpatch, fsize)
elif patch_type == PATCH_TYPE_IN_PLACE:
return 'in-place', patch_info_in_place(fpatch)
elif patch_type == PATCH_TYPE_HDIFFPATCH:
return 'hdiffpatch', patch_info_hdiffpatch(fpatch)
else:
raise Error('Bad patch type {}.'.format(patch_type))
def patch_info_filename(patchfile, fsize=None):
"""Same as :func:`~detools.patch_info()`, but with a filename instead
of a file-like object.
"""
with open(patchfile, 'rb') as fpatch:
return patch_info(fpatch, fsize)
|
[
"erik.moqvist@gmail.com"
] |
erik.moqvist@gmail.com
|
542cdb30ac871d10f35856ab599f982138e1621d
|
01382c58ae18404aa442533eea992330ec941d35
|
/tests/conftest.py
|
cb9a33ea61524b31b55a6fa68da1b8f7b971a8e0
|
[] |
no_license
|
gitter-badger/ens.py
|
124d5bfc0b27b2c3ebe7ff1c6f4c14eacc687f18
|
565bf0cb0afc1f628c6ba29616bb6bb362aa4de9
|
refs/heads/master
| 2021-01-01T17:41:33.595961
| 2017-07-24T01:12:27
| 2017-07-24T01:12:27
| 98,136,264
| 0
| 0
| null | 2017-07-24T01:16:31
| 2017-07-24T01:16:31
| null |
UTF-8
|
Python
| false
| false
| 1,806
|
py
|
import pytest
from unittest.mock import Mock
from web3 import Web3
from web3.providers.tester import EthereumTesterProvider
from web3utils import web3 as REAL_WEB3
from ens import ENS
def mkhash(num, digits=40):
return '0x' + str(num) * digits
@pytest.fixture
def addr1():
return mkhash(1)
@pytest.fixture
def addr2():
return mkhash(2)
@pytest.fixture
def addr9():
return mkhash(9)
@pytest.fixture
def addrbytes1(addr1):
return Web3.toAscii(addr1)
@pytest.fixture
def hash1():
return mkhash(1, digits=64)
@pytest.fixture
def hash9():
return mkhash(9, digits=64)
@pytest.fixture
def hashbytes1(hash1):
return Web3.toAscii(hash1)
@pytest.fixture
def hashbytes9(hash9):
return Web3.toAscii(hash9)
@pytest.fixture
def name1():
return 'dennis.the.peasant'
@pytest.fixture
def label1():
return 'peasant'
@pytest.fixture
def label2():
return 'dennisthe'
@pytest.fixture
def value1():
return 1000000000000000000000002
@pytest.fixture
def secret1():
return 'SUCH_SAFE_MUCH_SECRET'
@pytest.fixture
def ens():
web3 = REAL_WEB3
web3.setProvider(EthereumTesterProvider())
web3 = Mock(wraps=REAL_WEB3)
return ENS(web3)
@pytest.fixture
def registrar(ens, monkeypatch, addr9):
monkeypatch.setattr(ens, 'owner', lambda namehash: addr9)
return ens.registrar
@pytest.fixture
def fake_hash():
def _fake_hash(tohash, encoding=None):
if type(tohash) == bytes and not encoding:
encoding = 'bytes'
assert encoding == 'bytes'
if isinstance(tohash, str):
tohash = tohash.encode('utf-8')
tohash = b'b'+tohash
return b'HASH(%s)' % tohash
return _fake_hash
@pytest.fixture
def fake_hash_utf8(fake_hash):
return lambda name: fake_hash(name, encoding='bytes')
|
[
"ut96caarrs@snkmail.com"
] |
ut96caarrs@snkmail.com
|
9200a771ec7bef264c6449eeb3e335264b19aa41
|
871b10e6abd1ca9db406af9acddb391e4c2ec693
|
/scholar_sanction_criteria.py
|
34a704895205a13e9804ad37b4d10eb132400b9b
|
[] |
no_license
|
Novasoft-India/wakf
|
d64efc557584a2bb0fadfdebf33ee738b2063253
|
d27ff6bb88a36f33bcf173d9c814345294ab7def
|
refs/heads/master
| 2020-12-24T16:50:25.632826
| 2014-09-18T13:42:19
| 2014-09-18T13:42:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,366
|
py
|
from osv import osv
from osv import fields
class SWS_Scholar_sanction_Criteria(osv.osv):
_name = 'sws.scholar.sanction.criteria'
_description = 'sws.scholar.sanction.criteria'
_columns = {
'name':fields.char('Criteria Name:', required=False),
'criteria_no':fields.integer('Criteria Number:', required=False),
'active_is':fields.boolean('Active',required=False),
'date_valid':fields.date('Date valid From',required=False),
'criteria_line_id':fields.one2many('sws.scholar.sanction.criteria.line','criteria1_id'),
}
SWS_Scholar_sanction_Criteria()
class SWS_Scholar_sanction_Criteria_line(osv.osv):
_name = 'sws.scholar.sanction.criteria.line'
_description = 'sws.scholar.sanction.criteria.line'
_columns = {
'category_course':fields.many2one('sws.scholar.criteria.course','Course', required=True),
'amount_sanction':fields.integer('Total Amount',required=True),
'amount_per_year':fields.integer('Amount Per Year',required=True),
'total_year':fields.integer('Total Years', required=True),
'criteria1_id':fields.many2one('sws.scholar.sanction.criteria','Line of Sanction Criteria')
}
SWS_Scholar_sanction_Criteria_line()
|
[
"hashir.haris@gmail.com"
] |
hashir.haris@gmail.com
|
84b0505f88884cf5adf7c93c363c044118cdfb83
|
90ca69d5d6bd9d08ee2d2b8150eb2fa6a6b00e72
|
/src/entities/metric.py
|
868bfa0563f670af8963e4937589ca2bae921afc
|
[
"CC-BY-4.0"
] |
permissive
|
budh333/UnSilence_VOC
|
07a4a5a58fd772230bfe1ffbcb8407de89daa210
|
f6ba687f96f2c23690c84590adcb24ee239aa86b
|
refs/heads/main
| 2023-05-26T20:49:49.105492
| 2023-05-12T23:18:50
| 2023-05-12T23:18:50
| 388,462,045
| 6
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,064
|
py
|
from __future__ import annotations # This is so we can use Metric as type hint
from typing import Dict, List
import numpy as np
class Metric:
def __init__(
self,
amount_limit: int = 5,
metric: Metric = None):
self._accuracies: Dict[str, List[float]] = {}
self._losses: List[float] = []
self._amount_limit = amount_limit
if metric:
self._amount_limit = metric._amount_limit
self.initialize(metric)
def add_accuracies(self, accuracies: Dict[str, float]):
for key, value in accuracies.items():
if key not in self._accuracies.keys():
self._accuracies[key] = []
self._accuracies[key].append(value)
if self._amount_limit:
self._accuracies[key] = self._accuracies[key][-self._amount_limit:]
def get_current_accuracies(self) -> Dict[str, float]:
result = {}
for key, value in self._accuracies.items():
result[key] = np.mean(value, axis=0)
return result
def get_accuracy_metric(self, metric_type: str) -> float:
if metric_type not in self._accuracies.keys():
return 0
result = np.mean(self._accuracies[metric_type], axis=0)
return result
def add_loss(self, loss_value: float):
self._losses.append(loss_value)
if self._amount_limit:
self._losses = self._losses[-self._amount_limit:]
def get_current_loss(self) -> float:
return np.mean(self._losses, axis=0)
def initialize(self, metric: Metric):
self._losses = metric._losses[-self._amount_limit:]
self._accuracies = {}
accuracies = metric._accuracies
for key, value in accuracies.items():
self._accuracies[key] = value[-self._amount_limit:]
def contains_accuracy_metric(self, metric_key: str) -> bool:
return metric_key in self._accuracies.keys()
@property
def is_new(self) -> bool:
return len(self._losses) == 0 and len(self._accuracies) == 0
|
[
"kztodorov@outlook.com"
] |
kztodorov@outlook.com
|
eec37585b4feaa9cc15c76cd3fead5d237d8c535
|
db6a3eb678c372256c342e76caaf750de253c119
|
/tests/conftest.py
|
1153bb41932ba778afc0b91d999d0b9826a2eb12
|
[
"BSD-3-Clause"
] |
permissive
|
molssi-seamm/table_step
|
80678f1bb05fb068d0dc5c6390d4e69a822d9cbc
|
e98f79ee5455f1caf1f03da32c9771cee24e7813
|
refs/heads/main
| 2023-08-17T02:20:08.451145
| 2023-07-25T19:22:14
| 2023-07-25T19:22:14
| 170,744,384
| 0
| 0
|
BSD-3-Clause
| 2023-07-25T19:22:16
| 2019-02-14T19:14:57
|
Python
|
UTF-8
|
Python
| false
| false
| 1,584
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Fixtures for testing the table_step package."""
import pytest
def pytest_addoption(parser):
parser.addoption(
"--no-unit", action="store_true", default=False, help="don't run the unit tests"
)
parser.addoption(
"--integration",
action="store_true",
default=False,
help="run the integration tests",
)
parser.addoption(
"--timing", action="store_true", default=False, help="run the timing tests"
)
def pytest_configure(config):
config.addinivalue_line(
"markers", "unit: unit tests run by default, use '--no-unit' to turn off"
)
config.addinivalue_line(
"markers", "integration: integration test, run with --integration"
)
config.addinivalue_line("markers", "timing: timing tests, run with --timing")
def pytest_collection_modifyitems(config, items):
if config.getoption("--no-unit"):
skip = pytest.mark.skip(reason="remove --no-unit option to run")
for item in items:
if "unit" in item.keywords:
item.add_marker(skip)
if not config.getoption("--integration"):
skip = pytest.mark.skip(reason="use the --integration option to run")
for item in items:
if "integration" in item.keywords:
item.add_marker(skip)
if not config.getoption("--timing"):
skip = pytest.mark.skip(reason="need --timing option to run")
for item in items:
if "timing" in item.keywords:
item.add_marker(skip)
|
[
"paul@thesaxes.net"
] |
paul@thesaxes.net
|
aab78f44cb62d54f045f75c91f11d8a9fb0b08d9
|
7ecbdf17a3d6c11adac10781fc08f16e60c2abe7
|
/testGridWorldPython/multiplePCrystal_N1609/testGridWorld.py
|
6e24c232b62941638a56757ba8ea221ecf64d66c
|
[] |
no_license
|
yangyutu/multiagentAssembly
|
459f7941403d38c877f51094dee9462372b491c0
|
1e7c950e13fadf5e2e471de15b5c06f349130869
|
refs/heads/master
| 2020-06-07T06:17:27.747500
| 2019-06-30T19:39:58
| 2019-06-30T19:39:58
| 192,946,813
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 663
|
py
|
import GridWorldPython as gw
import json
import numpy as np
from timeit import default_timer as timer
configName = 'config.json'
model = gw.GridWorldPython(configName, 1)
with open(configName,'r') as file:
config = json.load(file)
N = config['N']
iniConfigPos = np.genfromtxt('squareTargetN1609CPP.txt')
iniConfig = []
for pos in iniConfigPos:
iniConfig.append(pos[1:3].tolist() + [0])
model.reset()
model.setIniConfig(np.array(iniConfig))
#print(iniConfig)
start = timer()
for i in range(100):
speeds = [0 for _ in range(N)]
model.step(speeds)
pos = model.getPositions()
pos.shape = (N, 3)
end = timer()
print("timecost", end - start)
|
[
"yangyutu123@gmail.com"
] |
yangyutu123@gmail.com
|
cd2cd539213e5471e56fbe40f7277b75376c4c8a
|
bdff6688cee79226723fbcf9980c3757a55651b7
|
/algorithms/implementation/find_digits.py
|
ae389e624b94fb978174728591e73e5e389879b0
|
[] |
no_license
|
kruthar/hackerrank
|
1f151203c8f26c033585f30d2cf69a2b22dcaf71
|
ef81b2aa41a678ad6b0692f933f438a62b1d6b64
|
refs/heads/master
| 2016-08-10T07:15:19.165058
| 2016-02-26T17:48:58
| 2016-02-26T17:48:58
| 49,286,556
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 284
|
py
|
import sys
num_lines = int(sys.stdin.next().strip())
for i in range(0, num_lines):
num_string = sys.stdin.next().strip()
num = int(num_string)
divs = 0
for digit in num_string:
if digit != '0' and num % int(digit) == 0:
divs += 1
print divs
|
[
"kruthar@gmail.com"
] |
kruthar@gmail.com
|
ff8c2bc814120ac52259dc6eb7847a31d804f055
|
a7b0d36ed471f48b549355430eb474d1f212fed4
|
/products/tests.py
|
455a4e4972f84336b889ee549a3a66df2411f105
|
[
"MIT"
] |
permissive
|
Yezz123-Archive/E-Commerce-Boilerplate
|
93fd1aca5fb462c44915aeb60a1e89284c15ad0a
|
c57196b92ae5395bbfa0ef97f50425904fe2ead2
|
refs/heads/main
| 2023-05-07T05:08:59.232857
| 2021-06-05T11:56:51
| 2021-06-05T11:56:51
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 243
|
py
|
from django.test import TestCase
from .models import Product
# Create your tests here.
class ProductTests(TestCase):
def test_str(self):
test_name = Product(name='product')
self. assertEqual(str(test_name), 'product')
|
[
"yasserth19@gmail.com"
] |
yasserth19@gmail.com
|
6cc1153dc9a841af6cfd746ccd2fceb3e74f88e1
|
b7663107da61a8530c92567639f9bc3e51b4b3c7
|
/models/org.py
|
7382bbf5a6d5092ca17972a91445f26a9546972b
|
[
"MIT"
] |
permissive
|
goldenboy/eden
|
adb5dddeee56b891ee11fb31453fa7ea65df60d3
|
628ee2278b7535486482a6030c5d84b70e3bdb4a
|
refs/heads/master
| 2021-01-16T21:36:21.965358
| 2012-02-26T23:54:56
| 2012-02-26T23:54:56
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,577
|
py
|
# -*- coding: utf-8 -*-
"""
Organization Registry
"""
# -----------------------------------------------------------------------------
# Defined in the Model for use from Multiple Controllers for unified menus
#
def org_organisation_controller():
""" RESTful CRUD controller """
# T = current.T
# db = current.db
# gis = current.gis
s3 = current.response.s3
manager = current.manager
tablename = "org_office"
table = s3db[tablename]
# Pre-process
def prep(r):
if r.interactive:
r.table.country.default = gis.get_default_country("code")
if r.component_name == "human_resource" and r.component_id:
# Workaround until widget is fixed:
hr_table = db.hrm_human_resource
hr_table.person_id.widget = None
hr_table.person_id.writable = False
elif r.component_name == "office" and \
r.method and r.method != "read":
# Don't want to see in Create forms
# inc list_create (list_fields over-rides)
table = r.component.table
s3.address_hide(table)
# Process Base Location
#manager.configure(table._tablename,
# onaccept=s3.address_onaccept)
elif r.component_name == "task" and \
r.method != "update" and r.method != "read":
# Create or ListCreate
r.component.table.organisation_id.default = r.id
r.component.table.status.writable = False
r.component.table.status.readable = False
elif r.component_name == "project" and r.link:
# Hide/show host role after project selection in embed-widget
tn = r.link.tablename
manager.configure(tn,
post_process="hide_host_role($('#%s').val());")
script = "s3.hide_host_role.js"
s3.scripts.append( "%s/%s" % (s3.script_dir, script))
return True
s3.prep = prep
rheader = s3db.org_rheader
output = s3_rest_controller("org", "organisation",
native=False, rheader=rheader)
return output
# -----------------------------------------------------------------------------
def org_office_controller():
""" RESTful CRUD controller """
# gis = current.gis
# request = current.request
# session = current.session
s3 = current.response.s3
manager = current.manager
settings = current.deployment_settings
tablename = "org_office"
table = s3db[tablename]
# Load Models to add tabs
if settings.has_module("inv"):
manager.load("inv_inv_item")
elif settings.has_module("req"):
# (gets loaded by Inv if available)
manager.load("req_req")
if isinstance(request.vars.organisation_id, list):
request.vars.organisation_id = request.vars.organisation_id[0]
office_search = s3base.S3Search(
advanced=(s3base.S3SearchSimpleWidget(
name="office_search_text",
label=T("Search"),
comment=T("Search for office by text."),
field=["name", "comments", "email"]
),
s3base.S3SearchOptionsWidget(
name="office_search_org",
label=T("Organization"),
comment=T("Search for office by organization."),
field=["organisation_id"],
represent ="%(name)s",
cols = 3
),
s3base.S3SearchLocationHierarchyWidget(
name="office_search_location",
comment=T("Search for office by location."),
represent ="%(name)s",
cols = 3
),
s3base.S3SearchLocationWidget(
name="office_search_map",
label=T("Map"),
),
))
manager.configure(tablename,
search_method = office_search)
# Pre-processor
def prep(r):
table = r.table
if r.representation == "popup":
organisation = r.vars.organisation_id or \
session.s3.organisation_id or ""
if organisation:
table.organisation_id.default = organisation
elif r.representation == "plain":
# Map popups want less clutter
table.obsolete.readable = False
if r.record.type == 5:
s3.crud_strings[tablename].title_display = T("Warehouse Details")
if r.record and deployment_settings.has_module("hrm"):
# Cascade the organisation_id from the office to the staff
hrm_table = db.hrm_human_resource
hrm_table.organisation_id.default = r.record.organisation_id
hrm_table.organisation_id.writable = False
if r.interactive or r.representation == "aadata":
if deployment_settings.has_module("inv"):
# Filter out Warehouses, since they have a dedicated controller
response.s3.filter = (table.type != 5) | \
(table.type == None)
if r.interactive:
if settings.has_module("inv"):
# Don't include Warehouses in the type dropdown
s3.org_office_type_opts.pop(5)
table.type.requires = IS_NULL_OR(IS_IN_SET(s3.org_office_type_opts))
if r.record and r.record.type == 5: # 5 = Warehouse
s3.crud_strings[tablename] = s3.org_warehouse_crud_strings
if r.method == "create":
table.obsolete.readable = table.obsolete.writable = False
if r.vars.organisation_id and r.vars.organisation_id != "None":
table.organisation_id.default = r.vars.organisation_id
if r.method and r.method != "read":
# Don't want to see in Create forms
# inc list_create (list_fields over-rides)
table.obsolete.writable = False
table.obsolete.readable = False
s3.address_hide(table)
if r.component:
if r.component.name == "inv_item" or \
r.component.name == "recv" or \
r.component.name == "send":
# Filter out items which are already in this inventory
s3db.inv_prep(r)
elif r.component.name == "human_resource":
# Filter out people which are already staff for this office
s3_filter_staff(r)
# Cascade the organisation_id from the hospital to the staff
hrm_table.organisation_id.default = r.record.organisation_id
hrm_table.organisation_id.writable = False
elif r.component.name == "req":
if r.method != "update" and r.method != "read":
# Hide fields which don't make sense in a Create form
# inc list_create (list_fields over-rides)
s3db.req_create_form_mods()
return True
s3.prep = prep
rheader = s3db.org_rheader
return s3_rest_controller("org", "office", rheader=rheader)
# END =========================================================================
|
[
"fran@aidiq.com"
] |
fran@aidiq.com
|
baadca2b3a178ee55b51a4210be750d6080444b5
|
d5c8f7f7481c86266dfccb862e16c59132f5eb34
|
/config.py
|
414128cefca0776539c2eef574cb551b9d9efe7c
|
[] |
no_license
|
AngleMAXIN/BlogGo
|
59aec933b7743ba19a3eca147ce61b864da79399
|
49dd6cd930ae059d533bcfd2f1aa3cd3197deb95
|
refs/heads/master
| 2021-04-12T12:32:32.895508
| 2018-04-24T09:17:07
| 2018-04-24T09:17:07
| 126,703,268
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,581
|
py
|
#/usr/bin/env python3
# -*- coding: utf-8 -*-
#/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
basedir = os.path.abspath(os.path.dirname(__file__))
HOSTNAME = '127.0.0.1'
PORT = '3306'
USERNAME = 'root'
PASSWORD = 'maxin'
class Config(object):
SECRET_KEY = os.environ.get('SECRET_KEY') or 'this is a secret string'
SQLALCHEMY_TRACK_MODIFICATIONS = True
@staticmethod
def init_app(app):
pass
class DevelopmentConfig(Config):
"""开发时的数据库"""
DEBUG = True
CACHE_TYPE = 'simple'
DATABASE = 'bloggo_dev_db'
SQLALCHEMY_DATABASE_URI = 'mysql://{}:{}@{}:{}/{}?charset=utf8'.format( USERNAME,
PASSWORD,
HOSTNAME,
PORT,
DATABASE)
class TestingConfig(Config):
"""测试的数据库"""
TESTING = True
SQLALCHEMY_DATABASE_URI = os.environ.get('TEST_DATABASE_URI') or \
'sqlite:///' + os.path.join(basedir, 'test.db')
class ProductionConfig(Config):
"""产品时的数据库"""
SQLALCHEMY_DATABASE_URI = os.environ.get('DATABASE_URI') or \
'sqlite:///' + os.path.join(basedir, 'data.db')
config = {
'development': DevelopmentConfig,
'testing': TestingConfig,
'production': ProductionConfig,
'default': DevelopmentConfig
}
|
[
"1678190746@qq.com"
] |
1678190746@qq.com
|
8ffbcbd15ddfd36fa85faee3a276f226df7e7419
|
21f98d8bb31264c94e7a98fb8eb806d7f5bd396e
|
/Fenwick Tree/218. The Skyline Problem.py
|
0ce89568e86b08184393b63f0be9409ec3eb3a60
|
[] |
no_license
|
mcfair/Algo
|
e1500d862a685e598ab85e8ed5b68170632fdfd0
|
051e2a9f6c918907cc8b665353c46042e7674e66
|
refs/heads/master
| 2021-10-31T01:25:46.917428
| 2021-10-08T04:52:44
| 2021-10-08T04:52:44
| 141,460,584
| 2
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,574
|
py
|
class Solution(object):
def getSkyline(self, buildings):
"""
:type buildings: List[List[int]]
:rtype: List[List[int]]
"""
points =[]
endsAt ={}
for L, R, H in buildings:
left, right = (L, 'left', -H), (R, 'right', -H) #'left' < 'right', don't use 'start' 'end'
points += [left,right]
endsAt[left] = right
points = sorted(list(set(points))) #remove duplicates and sort
rank = {points[i]: i for i in range(len(points))} # sorted index as rank
tree = BIT(len(points))
ret = []
for p in points:
if p[1] == 'left':
q = endsAt[p]
tree.update(rank[q], -p[-1]) # end idx is exclusive
h = tree.query(rank[p]+1) # start idx is inclusive
#not sure about this part!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
if not ret or ret[-1][1] != h:
if ret and ret[-1][0] == p[0]:
ret[-1][1] = h
else:
ret.append([p[0], h])
return ret
class BIT(object):
def __init__(self, m):
self.bit = [0] *(m+1)
def update(self, p, h):
while p > 0: # scope of h is towards left
self.bit[p] = max(self.bit[p], h)
p -= p & -p
def query(self,p):
ret = 0
while p <= len(self.bit): # check anything to the right that has a higher value
ret = max(ret, self.bit[p])
p += p & -p
return ret
|
[
"noreply@github.com"
] |
mcfair.noreply@github.com
|
14d9deb91f33a952aa11d3c81ab4256f362e0ba0
|
f98a2875e0cdc84341fe8e37b11336368a257fe7
|
/auxiliary/accessing_comparative_data_sensitivity_runs.py
|
f6c3cd9c7f009bf8ac5af1f2218e353a026000d2
|
[
"MIT"
] |
permissive
|
anhnguyendepocen/PolicySpace2
|
eaa83533b7ad599af677ce69353841e665b447d0
|
d9a450e47651885ed103d3217dbedec484456d07
|
refs/heads/master
| 2023-08-28T04:55:40.834445
| 2021-10-21T18:50:03
| 2021-10-21T18:50:03
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,431
|
py
|
import os
from shutil import copyfile
import numpy as np
import pandas as pd
from analysis.output import OUTPUT_DATA_SPEC as cols
import read_meta_json
def moving_files(path1, path2):
path = os.path.join(path1, path2)
dirs = os.listdir(path)
for each in dirs:
if '=' in each and '.csv' not in each:
t_path = os.path.join(path, each, 'avg', 'temp_stats.csv')
destination = f'{path}/{each}.csv'
copyfile(t_path, destination)
def reading_summarizing_tables(path1, path2, col='families_helped'):
d = pd.read_csv(os.path.join(path1, path2, t), sep=';', names=cols['stats']['columns'])
return f'{t}: avg {col} {np.mean(d[col]):.2f}\n'
if __name__ == '__main__':
p1 = r'\\storage1\carga\modelo dinamico de simulacao\Exits_python\PS2020'
fls = os.listdir(p1)
interest = [f for f in fls if f.startswith('POLICIES')]
for run in interest:
try:
moving_files(p1, run)
except FileNotFoundError:
continue
with open(f'{p1}/Report_{run}.txt', 'a') as f:
f.write(read_meta_json.read_meta(p1, run))
for each in cols['stats']['columns'][1:]:
tables = [f for f in os.listdir(os.path.join(p1, run)) if f.endswith('.csv')]
for t in tables:
f.write(reading_summarizing_tables(p1, run, each))
f.write('_____________\n')
|
[
"furtadobb@gmail.com"
] |
furtadobb@gmail.com
|
3421a3b55caa64222cf08320ee46daabcf0b12f6
|
2f0d56cdcc4db54f9484b3942db88d79a4215408
|
/.history/Python_Learning/lesson13_20200329144341.py
|
c8241db28a95319939d792a162f3f2bb4400c80a
|
[] |
no_license
|
xiangxing98/xiangxing98.github.io
|
8571c8ee8509c0bccbb6c2f3740494eedc53e418
|
23618666363ecc6d4acd1a8662ea366ddf2e6155
|
refs/heads/master
| 2021-11-17T19:00:16.347567
| 2021-11-14T08:35:01
| 2021-11-14T08:35:01
| 33,877,060
| 7
| 1
| null | 2017-07-01T16:42:49
| 2015-04-13T15:35:01
|
HTML
|
UTF-8
|
Python
| false
| false
| 3,467
|
py
|
# -*- encoding: utf-8 -*-
# !/usr/bin/env python
'''
@File : lesson13.py
@Time : 2020/03/29 11:43:27
@Author : Stone_Hou
@Version : 1.0
@Contact : xiangxing985529@163.com
@License : (C)Copyright 2010-2020, Stone_Hou
@Desc : None
'''
# here put the import lib
# Practice #01
print('He said,"I\'m yours!"')
# He said, "I'm yours!"
# Practice #02
print('\\\\_v_//')
# \\_v_//
# Practice #03
print("Stay hungry, \n\
stay foolish.\n\
-- Steve Jobs")
# Stay hungry,
# stay foolish.
# -- Steve Jobs
# Practice #04
# print("----\n")
# n = 5
# for i in range(1, n+1):
# for j in range(0, (n + 1)/2):
# print("*")
# for j in range(0, (n + 1)/2):
# print(" ")
# print("")
# print("----\n")
# 第1行, 1个*, 四个空格
# 第2行, 一个*, 四个空格
# *
# ***
# *****
# ***
# *
# Practice #05
# 输入一个大于等于1的值n,输出星号(*)组成的等腰三角形,底边长为n
# 例:
# 输入
# 3
# 输出
# *
# * *
# * * *
print("\n输出星号(*)组成的等腰三角形,底边长为n\n")
n = int(input())
for i in range(1, n+1): # n row
for j in range(0, n-i): # i row, need n-1 blank
print(' ', end='')
for j in range(0, i): # i row, need i *
print('*', end='')
print() # blank row
print("-------------------------\n")
# 打印左下角三角形:for i in range(10):之后,range(0,i)
# 打印右上角三角形:在左下角的基础上,将"-"变成" "空格
print("-------------------\n打印左下角三角形\n")
for i in range(10):
for j in range(0, i):
print("-", end=" ")
for j in range(i, 10):
print("$", end=" ")
print("")
print("-------------------\n")
# 打印左下角三角形
# $ $ $ $ $ $ $ $ $ $
# - $ $ $ $ $ $ $ $ $
# - - $ $ $ $ $ $ $ $
# - - - $ $ $ $ $ $ $
# - - - - $ $ $ $ $ $
# - - - - - $ $ $ $ $
# - - - - - - $ $ $ $
# - - - - - - - $ $ $
# - - - - - - - - $ $
# - - - - - - - - - $
# -------------------
# 打印左上角三角形:for i in range(10):之后,range(0,10-i)
# 打印右下角三角形:在左上角的基础上,将"-"变成" "空格
print("-------------------\n打印左上角三角形\n")
for i in range(10):
for j in range(0, 10 - i):
print("-", end=" ")
for k in range(10 - i, 10):
print("$", end=" ")
print("")
print("-------------------\n")
# -------------------
# 打印左上角三角形
# - - - - - - - - - -
# - - - - - - - - - $
# - - - - - - - - $ $
# - - - - - - - $ $ $
# - - - - - - $ $ $ $
# - - - - - $ $ $ $ $
# - - - - $ $ $ $ $ $
# - - - $ $ $ $ $ $ $
# - - $ $ $ $ $ $ $ $
# - $ $ $ $ $ $ $ $ $
# -------------------
# 打印上三角,只需要将"-",去掉
print("-------------------\n打印上三角\n")
for i in range(10):
for j in range(0, 10 - i):
print(end=" ")
for k in range(10 - i, 10):
print("$", end=" ")
print("")
print("-------------------\n")
# -------------------
# 打印上三角
# $
# $ $
# $ $ $
# $ $ $ $
# $ $ $ $ $
# $ $ $ $ $ $
# $ $ $ $ $ $ $
# $ $ $ $ $ $ $ $
# $ $ $ $ $ $ $ $ $
# -------------------
# 打印倒三角,只需要将"-",去掉
print("-------------------\n打印倒三角\n")
for i in range(10):
for j in range(0,i):
print(end=" ")
for j in range(i,10):
print("$", end=" ")
print("")
print("-------------------\n")
|
[
"xiangxing985529@163.com"
] |
xiangxing985529@163.com
|
d2281b34b9b3168f241bc8f861adadeb4a0716cc
|
da4303d7ee77310c5a21997f1bfe0746affa3e0c
|
/papers/resources/best.py
|
395be833ef41ff557a22bc6013b545f7f5ac067d
|
[
"MIT"
] |
permissive
|
AaronCWacker/copycat
|
44bf0626965cfba12c304d9e44fe848ce0791cc7
|
c05e3e832ce614e8ccea9b47cb1d3ac31d2b18c2
|
refs/heads/master
| 2022-04-09T03:45:41.076460
| 2020-03-26T11:39:11
| 2020-03-26T11:39:11
| 378,260,131
| 1
| 0
|
MIT
| 2021-06-18T20:38:32
| 2021-06-18T20:38:31
| null |
UTF-8
|
Python
| false
| false
| 577
|
py
|
def _working_best(temp, prob):
s = .5 # convergence
r = 1.05 # power
u = prob ** r if prob < .5 else prob ** (1/r)
return _weighted(temp, prob, s, u)
def _soft_best(temp, prob):
s = .5 # convergence
r = 1.05 # power
u = prob ** r if prob < .5 else prob ** (1/r)
return _weighted(temp, prob, s, u)
def _parameterized_best(temp, prob):
alpha = 5
beta = 1
s = .5
s = (alpha * prob + beta * s) / (alpha + beta)
r = 1.05
u = prob ** r if prob < .5 else prob ** (1/r)
return _weighted(temp, prob, s, u)
|
[
"lucassaldyt@gmail.com"
] |
lucassaldyt@gmail.com
|
c29a0940b61699c23849ff51675b3dd3b4b3a82f
|
2af6a5c2d33e2046a1d25ae9dd66d349d3833940
|
/res/scripts/client/gui/scaleform/daapi/view/lobby/boosters/__init__.py
|
8931986bafcc9a61774fc38ab7ba64eb901eeea7
|
[] |
no_license
|
webiumsk/WOT-0.9.12-CT
|
e6c8b5bb106fad71b5c3056ada59fb1aebc5f2b2
|
2506e34bd6634ad500b6501f4ed4f04af3f43fa0
|
refs/heads/master
| 2021-01-10T01:38:38.080814
| 2015-11-11T00:08:04
| 2015-11-11T00:08:04
| 45,803,240
| 0
| 0
| null | null | null | null |
WINDOWS-1250
|
Python
| false
| false
| 414
|
py
|
# 2015.11.10 21:26:35 Střední Evropa (běžný čas)
# Embedded file name: scripts/client/gui/Scaleform/daapi/view/lobby/boosters/__init__.py
__author__ = 'i_malashenko'
# okay decompyling c:\Users\PC\wotsources\files\originals\res\scripts\client\gui\scaleform\daapi\view\lobby\boosters\__init__.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2015.11.10 21:26:35 Střední Evropa (běžný čas)
|
[
"info@webium.sk"
] |
info@webium.sk
|
908b043137e9fe004778131309d0327786b0e893
|
a3635ea0c475cc7e4655ed786e870361eede6464
|
/format/_fix_incomplete_iso9660/directory_record.py
|
1d2a9176ba8e8af6f6fc5efb0ddb612f1b9995c8
|
[] |
no_license
|
Nicba1010/PS-Tools
|
91802ce37309ef0dcd52771a8c2c0c3c2f6c4a8b
|
2c2b7a83b4bea39be4459bb980adfd37bd44c2be
|
refs/heads/master
| 2020-04-14T00:25:09.976960
| 2019-03-05T09:14:12
| 2019-03-05T09:14:12
| 163,532,843
| 4
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,805
|
py
|
from datetime import datetime
from aenum import IntFlag
from format._fix_incomplete_iso9660.errors import InvalidISOException
from format._fix_incomplete_iso9660.utils import unpack_both_endian_u32, unpack_directory_record_datetime, \
unpack_both_endian_u16, unpack_str_a
from utils.utils import unpack_u8
class FileFlags(IntFlag):
EXISTENCE = 1 # If TRUE, user doesn't need to know about file
DIRECTORY = 2 # If TRUE, record identifies a directory
ASSOCIATED_FILE = 4 # If TRUE, file is an Associated File
RECORD = 8 # If TRUE, structure in file has a format specified in the Extended Attribute Record
PROTECTION = 16 # If TRUE, owner specified for file also one+ of the even bits is set to 1 in the EAR
RESERVED_1 = 32
RESERVED_2 = 64
MULTI_EXTENT = 128 # If TRUE, not the final Directory Record for the file
class DirectoryRecord(object):
def __init__(self, data: bytes):
#: Directory Record Length
self.length: int = unpack_u8(data[0:1])
#: Extended Attribute Record Length
self.extended_attribute_record_length: int = unpack_u8(data[1:2])
#: Location of LBA LSB&MSB
self.lba_location: int = unpack_both_endian_u32(data[2:10])
#: Data Length LSB&MSB
self.data_length: int = unpack_both_endian_u32(data[10:18])
#: Recording Date and Time
self.recording_datetime: datetime = unpack_directory_record_datetime(data[18:25])
#: File Flags
self.file_flags: FileFlags = FileFlags(data[25])
#: File Unit Size
self.file_unit_size: int = unpack_u8(data[26:27])
#: Interleave gap size for files recorded in interleaved mode, 0x00 otherwise
self.interleave_gap: int = unpack_u8(data[27:28])
#: Volume Sequence Number (Number of Disk this is recorded on) LSB & MSB
self.volume_sequence_number: int = unpack_both_endian_u16(data[28:32])
#: File Identifier Length (File Name)
self.file_identifier_length: int = unpack_u8(data[32:33])
file_identifier_end_offset: int = 33 + self.file_identifier_length
#: File Identifier
self.file_identifier: str = unpack_str_a(data[33:33 + self.file_identifier_length])
#: Padding (None if file identifier length even, the field does not exist in that case)
self.padding: int
if self.file_identifier_length % 2 == 0 or self.file_identifier == b'\x00'.decode('ASCII'):
self.padding = None
else:
self.padding = data[file_identifier_end_offset]
if self.padding is not None and self.padding != 0x00:
raise InvalidISOException("Directory Record Padding is {hex(self.padding)} instead of 0x00 or None")
# TODO: Implement Extensions! Also System Use Field!
|
[
"nicba1010@gmail.com"
] |
nicba1010@gmail.com
|
1763030ef95c32889f4303497003b01b717874d2
|
f0d713996eb095bcdc701f3fab0a8110b8541cbb
|
/Fz92j7nQEkoRXhRE7_8.py
|
4c783b20eedaf0eec0f44af589ca079bcadde34b
|
[] |
no_license
|
daniel-reich/turbo-robot
|
feda6c0523bb83ab8954b6d06302bfec5b16ebdf
|
a7a25c63097674c0a81675eed7e6b763785f1c41
|
refs/heads/main
| 2023-03-26T01:55:14.210264
| 2021-03-23T16:08:01
| 2021-03-23T16:08:01
| 350,773,815
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,021
|
py
|
"""
A frog wants to cross a river. Unfortunately, he can't jump across in a single
leap. Luckily, there are `n` stones in the river.
The frog can jump from the near bank to stone `1` and from stone `n` to the
far bank. He can also jump from stone to stone, forward and backward. However,
on each stone, a number `j` is written and he must only jump exactly `j`
stones backward or forward.
Return the minimum number of jumps to cross the river (including jumps to the
first stone and from the last stone (or any other stone, if possible) to the
far bank) or `no chance :-(` if it's not possible to cross the river.
### Examples
jumping_frog(5, [1, 1, 1, 1, 1]) ➞ 6
jumping_frog(5, [1, 3, 1, 1, 1]) ➞ 4
jumping_frog(5, [1, 1, 0, 1, 1]) ➞ "no chance :-("
### Notes
* The frog may also reach the far bank from another stone than `n` if a large enough number is written on it.
* `n` is at least 2.
"""
def jumping_frog(n, stones):
if n < 2:
return None
jmpCount = 1 # one jump from the left bank to 1st element
i = 0
while i < (n - 1): # visit each stone except the last, (n-1) is the last stone
distanceToEnd = (n - 1) - i
jmpValue = stones[i]
#can we jump ahead? i.e (is there a distance to last stone)
if 0 < jmpValue <= distanceToEnd:
# check if we can jump backward ("we did'n reach left bank")
if (i - jmpValue) >= 0 and stones[i - jmpValue] - 2 * jmpValue > stones[i + jmpValue]:
i -= jmpValue
else:
i += jmpValue
# increase jump counter
jmpCount += 1
# if distance == 0, we reached the end
if (n - 1) - i == 0:
return jmpCount + 1
elif 0 < jmpValue > distanceToEnd: #if stone has abig num to cross the right bank
return jmpCount + 1
elif jmpValue == 0 and distanceToEnd > 0: #if we got stuck on a zero num before the end
return "no chance :-("
|
[
"daniel.reich@danielreichs-MacBook-Pro.local"
] |
daniel.reich@danielreichs-MacBook-Pro.local
|
f9b655af2e1bbd8ac1b50e7fc3c73ce38f411749
|
19da1a56f137a08772c347cf974be54e9c23c053
|
/lib/adafruit_lidarlite.py
|
1893b50a7169d64063eb64031a74fdd87eb79bd7
|
[] |
no_license
|
mk53202/mk53202-timeclock-pyportal
|
d94f45a9d186190a4bc6130077baa6743a816ef3
|
230a858d429f8197c00cab3e67dcfd3b295ffbe0
|
refs/heads/master
| 2021-02-04T05:38:25.533292
| 2020-02-27T22:45:56
| 2020-02-27T22:45:56
| 243,626,362
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,421
|
py
|
# The MIT License (MIT)
#
# Copyright (c) 2018 ladyada for adafruit industries
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
`adafruit_lidarlite`
====================================================
A CircuitPython & Python library for Garmin LIDAR Lite sensors over I2C
* Author(s): ladyada
Implementation Notes
--------------------
**Hardware:**
**Software and Dependencies:**
* Adafruit CircuitPython firmware for the supported boards:
https://github.com/adafruit/circuitpython/releases
* Adafruit's Bus Device library: https://github.com/adafruit/Adafruit_CircuitPython_BusDevice
"""
# imports
import time
from adafruit_bus_device.i2c_device import I2CDevice
from digitalio import Direction
from micropython import const
__version__ = "1.1.1"
__repo__ = "https://github.com/adafruit/Adafruit_CircuitPython_LIDARLite.git"
_ADDR_DEFAULT = const(0x62)
_REG_ACQ_COMMAND = const(0x00)
_CMD_RESET = const(0)
_CMD_DISTANCENOBIAS = const(3)
_CMD_DISTANCEWITHBIAS = const(4)
CONFIG_DEFAULT = 0
CONFIG_SHORTFAST = 1
CONFIG_DEFAULTFAST = 2
CONFIG_MAXRANGE = 3
CONFIG_HIGHSENSITIVE = 4
CONFIG_LOWSENSITIVE = 5
STATUS_BUSY = 0x01
STATUS_REF_OVERFLOW = 0x02
STATUS_SIGNAL_OVERFLOW = 0x04
STATUS_NO_PEAK = 0x08
STATUS_SECOND_RETURN = 0x10
STATUS_HEALTHY = 0x20
STATUS_SYS_ERROR = 0x40
# The various configuration register values, from arduino library
_LIDAR_CONFIGS = ((0x80, 0x08, 0x00), # default
(0x1D, 0x08, 0x00), # short range, high speed
(0x80, 0x00, 0x00), # default range, higher speed short range
(0xFF, 0x08, 0x00), # maximum range
(0x80, 0x08, 0x80), # high sensitivity & error
(0x80, 0x08, 0xb0)) # low sensitivity & error
class LIDARLite:
"""
A driver for the Garmin LIDAR Lite laser distance sensor.
:param i2c_bus: The `busio.I2C` object to use. This is the only
required parameter.
:param int address: (optional) The I2C address of the device to set after initialization.
"""
def __init__(self, i2c_bus, *, reset_pin=None,
configuration=CONFIG_DEFAULT, address=_ADDR_DEFAULT):
"""Initialize the hardware for the LIDAR over I2C. You can pass in an
optional reset_pin for when you call reset(). There are a few common
configurations Garmin suggests: CONFIG_DEFAULT, CONFIG_SHORTFAST,
CONFIG_DEFAULTFAST, CONFIG_MAXRANGE, CONFIG_HIGHSENSITIVE, and
CONFIG_LOWSENSITIVE. For the I2C address, the default is 0x62 but if you
pass a different number in, we'll try to change the address so multiple
LIDARs can be connected. (Note all but one need to be in reset for this
to work!)"""
self.i2c_device = I2CDevice(i2c_bus, address)
self._buf = bytearray(2)
self._bias_count = 0
self._reset = reset_pin
time.sleep(0.5)
self.configure(configuration)
self._status = self.status
def reset(self):
"""Hardware reset (if pin passed into init) or software reset. Will take
100 readings in order to 'flush' measurement unit, otherwise data is off."""
# Optional hardware reset pin
if self._reset is not None:
self._reset.direction = Direction.OUTPUT
self._reset.value = True
self._reset.value = False
time.sleep(0.01)
self._reset.value = True
else:
try:
self._write_reg(_REG_ACQ_COMMAND, _CMD_RESET)
except OSError:
pass # it doesnt respond well once reset
time.sleep(1)
# take 100 readings to 'flush' out sensor!
for _ in range(100):
try:
self.read_distance(True)
except RuntimeError:
pass
def configure(self, config):
"""Set the LIDAR desired style of measurement. There are a few common
configurations Garmin suggests: CONFIG_DEFAULT, CONFIG_SHORTFAST,
CONFIG_DEFAULTFAST, CONFIG_MAXRANGE, CONFIG_HIGHSENSITIVE, and
CONFIG_LOWSENSITIVE."""
settings = _LIDAR_CONFIGS[config]
self._write_reg(0x02, settings[0])
self._write_reg(0x04, settings[1])
self._write_reg(0x1c, settings[2])
def read_distance(self, bias=False):
"""Perform a distance reading with or without 'bias'. It's recommended
to take a bias measurement every 100 non-bias readings (they're slower)"""
if bias:
self._write_reg(_REG_ACQ_COMMAND, _CMD_DISTANCEWITHBIAS)
else:
self._write_reg(_REG_ACQ_COMMAND, _CMD_DISTANCENOBIAS)
dist = self._read_reg(0x8F, 2)
if self._status & (STATUS_NO_PEAK | STATUS_SECOND_RETURN):
raise RuntimeError("Measurement failure")
if (self._status & STATUS_SYS_ERROR) or (not self._status & STATUS_HEALTHY):
raise RuntimeError("System failure")
return dist[0] << 8 | dist[1]
@property
def distance(self):
"""The measured distance in cm. Will take a bias reading every 100 calls"""
self._bias_count -= 1
if self._bias_count < 0:
self._bias_count = 100 # every 100 reads, check bias
return self.read_distance(self._bias_count <= 0)
@property
def status(self):
"""The status byte, check datasheet for bitmask"""
buf = bytearray([0x1])
with self.i2c_device as i2c:
i2c.write_then_readinto(buf, buf)
return buf[0]
def _write_reg(self, reg, value):
self._buf[0] = reg
self._buf[1] = value
with self.i2c_device as i2c:
#print("Writing: ", [hex(i) for i in self._buf])
i2c.write(self._buf)
time.sleep(0.001) # there's a delay in arduino library
def _read_reg(self, reg, num):
while True:
self._status = self.status
if not self._status & STATUS_BUSY:
break
# no longer busy
self._buf[0] = reg
with self.i2c_device as i2c:
i2c.write_then_readinto(self._buf, self._buf, out_end=1, in_end=num)
#print("Read from ", hex(reg), [hex(i) for i in self._buf])
return self._buf
|
[
"mkoster@stack41.com"
] |
mkoster@stack41.com
|
1e03fdfc67c1b1e1fe60f6a7d9f6981745a37274
|
2337351b228818e41be3002bd38f68f77c2aa074
|
/tests/test_hash.py
|
c10a96478165e914236a8897a388fe57add9eb5a
|
[
"BSD-3-Clause"
] |
permissive
|
nocproject/noc
|
57d40c680a1499374463e472434f9595ed6d1374
|
6e6d71574e9b9d822bec572cc629a0ea73604a59
|
refs/heads/master
| 2023-08-31T01:11:33.544573
| 2023-08-30T17:31:11
| 2023-08-30T17:31:11
| 107,815,776
| 105
| 33
|
BSD-3-Clause
| 2023-07-31T07:57:45
| 2017-10-21T21:04:33
|
Python
|
UTF-8
|
Python
| false
| false
| 1,914
|
py
|
# ----------------------------------------------------------------------
# Test noc.core.hash functions
# ----------------------------------------------------------------------
# Copyright (C) 2007-2020 The NOC Project
# See LICENSE for details
# ----------------------------------------------------------------------
# Third-party modules
import pytest
# NOC modules
from noc.core.hash import hash_str, hash_int, dict_hash_int, dict_hash_int_args
@pytest.mark.parametrize(
"value,expected",
[
(0, b"J^\xa04\xb0\x0b\xaf\xb6"),
("0", b"J^\xa04\xb0\x0b\xaf\xb6"),
(None, b"\x1a3\x12\x943.\xcdm"),
("None", b"\x1a3\x12\x943.\xcdm"),
],
)
def test_hash_str(value, expected):
assert hash_str(value) == expected
@pytest.mark.parametrize(
"value,expected",
[
(0, 5358896754769768374),
("0", 5358896754769768374),
(None, 1887873096521534829),
("None", 1887873096521534829),
],
)
def test_hash_int(value, expected):
assert hash_int(value) == expected
@pytest.mark.parametrize(
"value,expected",
[
({}, -2954230017111125474),
({"k": 1}, -7829327169641555127),
({"k": "1"}, -7829327169641555127),
({"k": 1, "v": "2"}, 6473659485526827658),
({"k": 1, "v": None}, 1975760527053142894),
({"k": 1, "v": "None"}, 1975760527053142894),
],
)
def test_dict_hash_int(value, expected):
assert dict_hash_int(value) == expected
@pytest.mark.parametrize(
"value,expected",
[
({}, -2954230017111125474),
({"k": 1}, -7829327169641555127),
({"k": "1"}, -7829327169641555127),
({"k": 1, "v": "2"}, 6473659485526827658),
({"k": 1, "v": None}, 1975760527053142894),
({"k": 1, "v": "None"}, 1975760527053142894),
],
)
def test_dict_hash_int_args(value, expected):
assert dict_hash_int_args(**value) == expected
|
[
"dv@nocproject.org"
] |
dv@nocproject.org
|
5e8646400ae70762ff471fffe0fd4633446384d0
|
134ff3c0719d4c0022eb0fb7c859bdbff5ca34b2
|
/desktop/core/ext-py/django-extensions/django_extensions/management/commands/create_app.py
|
3fcf7e6053b65dd8588a8dfd011af9aec35762c9
|
[
"Apache-2.0"
] |
permissive
|
civascu/hue
|
22637f13a4cfc557716557661523131b6ac16da4
|
82f2de44789ff5a981ed725175bae7944832d1e9
|
refs/heads/master
| 2020-03-31T01:50:39.449966
| 2010-07-21T01:05:50
| 2010-07-21T01:07:15
| 788,284
| 0
| 0
|
Apache-2.0
| 2019-02-04T07:03:12
| 2010-07-21T07:34:27
|
Python
|
UTF-8
|
Python
| false
| false
| 3,259
|
py
|
import os
import re
import django_extensions
from django.core.management.base import CommandError, LabelCommand, _make_writeable
from optparse import make_option
class Command(LabelCommand):
option_list = LabelCommand.option_list + (
make_option('--template', '-t', action='store', dest='app_template',
help='The path to the app template'),
make_option('--parent_path', '-p', action='store', dest='parent_path',
help='The parent path of the app to be created'),
)
help = ("Creates a Django application directory structure based on the specified template directory.")
args = "[appname]"
label = 'application name'
requires_model_validation = False
can_import_settings = True
def handle_label(self, label, **options):
project_dir = os.getcwd()
project_name = os.path.split(project_dir)[-1]
app_name =label
app_template = options.get('app_template') or os.path.join(django_extensions.__path__[0], 'conf', 'app_template')
app_dir = os.path.join(options.get('parent_path') or project_dir, app_name)
if not os.path.exists(app_template):
raise CommandError("The template path, %r, does not exist." % app_template)
if not re.search(r'^\w+$', label):
raise CommandError("%r is not a valid application name. Please use only numbers, letters and underscores." % label)
try:
os.makedirs(app_dir)
except OSError, e:
raise CommandError(e)
copy_template(app_template, app_dir, project_name, app_name)
def copy_template(app_template, copy_to, project_name, app_name):
"""copies the specified template directory to the copy_to location"""
import shutil
# walks the template structure and copies it
for d, subdirs, files in os.walk(app_template):
relative_dir = d[len(app_template)+1:]
if relative_dir and not os.path.exists(os.path.join(copy_to, relative_dir)):
os.mkdir(os.path.join(copy_to, relative_dir))
for i, subdir in enumerate(subdirs):
if subdir.startswith('.'):
del subdirs[i]
for f in files:
if f.endswith('.pyc') or f.startswith('.DS_Store'):
continue
path_old = os.path.join(d, f)
path_new = os.path.join(copy_to, relative_dir, f.replace('app_name', app_name))
if os.path.exists(path_new):
path_new = os.path.join(copy_to, relative_dir, f)
if os.path.exists(path_new):
continue
path_new = path_new.rstrip(".tmpl")
fp_old = open(path_old, 'r')
fp_new = open(path_new, 'w')
fp_new.write(fp_old.read().replace('{{ app_name }}', app_name).replace('{{ project_name }}', project_name))
fp_old.close()
fp_new.close()
try:
shutil.copymode(path_old, path_new)
_make_writeable(path_new)
except OSError:
sys.stderr.write(style.NOTICE("Notice: Couldn't set permission bits on %s. You're probably using an uncommon filesystem setup. No problem.\n" % path_new))
|
[
"bcwalrus@cloudera.com"
] |
bcwalrus@cloudera.com
|
585762a8c98cac23f2e45d296b0bc57b3b31cfe9
|
5a25f4f5f9c7cba03f9b5848eafc01a760c88768
|
/analysis/bsens_cleanest_diff_zooms.py
|
86b02dd38170047b195bc6e7a5b624af3f12c0d6
|
[] |
no_license
|
ALMA-IMF/reduction
|
b3579a548fe20193b807a7415a040f351c879beb
|
de606cc6bc542f088223ce84082ff333739c9007
|
refs/heads/master
| 2023-06-22T13:21:13.841999
| 2023-06-12T09:17:50
| 2023-06-12T09:17:50
| 115,018,799
| 9
| 29
| null | 2023-06-12T09:17:51
| 2017-12-21T15:13:55
|
Python
|
UTF-8
|
Python
| false
| false
| 7,898
|
py
|
import requests
import re
import numpy as np
from astropy import table
import io
import time
from astropy import units as u
import radio_beam
import regions
from astropy.io import fits
from astropy.visualization import simple_norm
from astropy import stats, convolution, wcs, coordinates
from spectral_cube import SpectralCube
import pylab as pl
import spectral_cube
from spectralindex import prefixes
import warnings
warnings.filterwarnings('ignore', category=spectral_cube.utils.StokesWarning)
warnings.filterwarnings('ignore', category=UserWarning)
warnings.filterwarnings('ignore', category=pl.matplotlib.cbook.MatplotlibDeprecationWarning)
np.seterr('ignore')
def bsens_cleanest_diff(finaliter_prefix_b3, finaliter_prefix_b6,
cutoutregion, fignum=1,
finaliter_prefix_b3_bsens=None,
finaliter_prefix_b6_bsens=None,
normpars_b3=None,
normpars_b6=None,
noco="",
non2hp="",
basepath='/home/adam/work/alma-imf/reduction/', ):
image_b3 = SpectralCube.read(f'{finaliter_prefix_b3}.image.tt0', format='casa_image').subcube_from_ds9region(cutoutregion)
if finaliter_prefix_b3_bsens is None:
if non2hp:
finaliter_prefix_b3_bsens = finaliter_prefix_b3.replace("cleanest","bsens_nobright").replace("merged_12M", f"merged_bsens_12M{non2hp}")
else:
finaliter_prefix_b3_bsens = finaliter_prefix_b3.replace("cleanest","bsens").replace("merged_12M", f"merged_bsens{non2hp}_12M")
bsens_b3 = SpectralCube.read(f'{finaliter_prefix_b3_bsens}.image.tt0', format='casa_image').subcube_from_ds9region(cutoutregion)
image_b6 = SpectralCube.read(f'{finaliter_prefix_b6}.image.tt0', format='casa_image').subcube_from_ds9region(cutoutregion)
if finaliter_prefix_b6_bsens is None:
if noco:
finaliter_prefix_b6_bsens = finaliter_prefix_b6.replace("cleanest","bsens_nobright").replace("merged_12M", f"merged_bsens_12M{noco}")
else:
finaliter_prefix_b6_bsens = finaliter_prefix_b6.replace("cleanest","bsens").replace("merged_12M", f"merged_bsens{noco}_12M")
bsens_b6 = SpectralCube.read(f'{finaliter_prefix_b6_bsens}.image.tt0', format='casa_image').subcube_from_ds9region(cutoutregion)
# image_b3 = image_b3 * u.beam / image_b3.beam.sr
# image_b6 = image_b6 * u.beam / image_b6.beam.sr
# bsens_b3 = bsens_b3 * u.beam / bsens_b3.beam.sr
# bsens_b6 = bsens_b6 * u.beam / bsens_b6.beam.sr
tgt_unit = u.mJy if bsens_b6.unit.is_equivalent(u.mJy) else u.mJy/u.beam
fieldname = os.path.basename(finaliter_prefix_b6).split("_")[0]
print(fieldname)
diff_b3 = bsens_b3 - image_b3
diff_b6 = bsens_b6 - image_b6
normpars_b3_default = dict(min_percent=0.1, max_percent=99.9, stretch='linear')
normpars_b6_default = dict(min_percent=0.1, max_percent=99.9, stretch='linear')
if normpars_b3 is not None:
normpars_b3_default.update(normpars_b3)
normpars_b3 = normpars_b3_default
if normpars_b6 is not None:
normpars_b6_default.update(normpars_b6)
normpars_b6 = normpars_b6_default
fig = pl.figure(num=fignum, figsize=(6,6))
fig.clf()
ax = pl.subplot(1,1,1,label='B3', projection=diff_b3[0].wcs)
im = ax.imshow(diff_b3.to(tgt_unit)[0].value, norm=simple_norm(diff_b3.to(tgt_unit)[0].value, **normpars_b3), cmap='gray')
ax.set_xlabel('Right Ascension (ICRS)')
ax.set_ylabel('Declination (ICRS)')
cb = pl.colorbar(mappable=im)
cb.set_label("$S_\\nu$ [mJy beam$^{-1}$]")
fig2 = pl.figure(num=fignum+1, figsize=(6,6))
ax2 = pl.subplot(1,1,1,label='B6', projection=diff_b6[0].wcs)
im = ax2.imshow(diff_b6.to(tgt_unit)[0].value, norm=simple_norm(diff_b6.to(tgt_unit)[0].value, **normpars_b6), cmap='gray')
ax2.set_xlabel('Right Ascension (ICRS)')
ax2.set_ylabel('Declination (ICRS)')
cb = pl.colorbar(mappable=im)
cb.set_label("$S_\\nu$ [mJy beam$^{-1}$]")
# ax2.set_yticklabels([])
# ax2.set_ylabel("")
# lat = ax2.coords['dec']
# lat.set_ticklabel_position('r')
# lat.set_axislabel_position('r')
# lat.set_axislabel('Declination')
return fig,fig2
normpars = {'W51IRS2': {
'normpars_b3': {'max_percent': None, "min_percent": None, "min_cut":-0.5, "max_cut":0.5, 'stretch':'linear'},
'normpars_b6': {'max_percent':99.99, "min_percent": 1, 'stretch':'linear'}
},
'W43MM2': {'normpars_b6': {'max_percent': 99.5}},
"G333": {'normpars_b6': {'max_percent': None, "min_percent": None, "min_cut":-7, "max_cut":7, 'stretch':'linear'},},
}
cutoutregions = {
"G008": ("fk5; box(271.579, -21.6255, 30\",30\")",),
"G10": (
"fk5; box(272.620167, -19.93008, 30\",30\")",
),
"G12": (
"fk5; box(273.5575, -17.92900, 70\", 70\")",
),
"G328": (
"fk5; box(239.499, -53.9668, 30\", 30\")",
),
"G327": (
"fk5; box(15:53:07,-54:37:10, 45\",45\")",
),
"G333": (
"fk5; box(245.539, -50.1002, 60\",60\")",
),
"G337": (
"fk5; box(250.294, -47.135, 20\", 20\")",
),
"G338": (
"fk5; box(250.142, -45.694, 30\", 30\")",
),
"G351": (
"fk5; box(261.6787, -36.1545, 30\", 30\")",
),
"G353": (
"fk5; box(262.6120, -34.696, 60\", 60\")",
),
"W43MM3": (
"fk5; box(281.9241, -2.007, 20\", 20\")",
),
"W43MM2": (
"fk5; box(281.9025, -2.0152, 25\", 25\")",
),
"W43MM1": (
"fk5; box(18:47:46.8714384205, -1:54:23.9163751512, 25\", 25\")",
),
"W51IRS2": (
'fk5; box(19:23:39.9340,+14:31:09.099,11.912",11.502",6.5033172e-06)',
"fk5; box(19:23:39.975,+14:31:08.2,25\",25\")",
),
"W51-E": (
"fk5; box(19:23:43.90,+14:30:30.0,20\",20\")",
),
}
if __name__ == "__main__":
import os
try:
os.chdir('/orange/adamginsburg/ALMA_IMF/2017.1.01355.L/February2021Release')
except FileNotFoundError:
os.chdir('/home/adam/Dropbox_UFL/ALMA-IMF/December2020Release/')
if not os.path.exists('bsens_diff_zooms'):
os.mkdir('bsens_diff_zooms')
pl.rcParams['font.size'] = 14
pl.rcParams['image.origin'] = 'lower'
pl.rcParams['image.interpolation'] = 'none'
pl.rcParams['figure.facecolor'] = 'w'
# why did I add this override? It's wrong (I had 4 instead of 6)
#prefixes['G338']['finaliter_prefix_b6_bsens'] = 'G338.93/B6/bsens/G338.93_B6_uid___A001_X1296_X14f_continuum_merged_bsens_12M_robust0_selfcal6_finaliter'
prefixes['W43MM1']['finaliter_prefix_b6'] = 'W43-MM1/B6/cleanest/W43-MM1_B6_uid___A001_X12f_X9f_continuum_merged_12M_robust0_selfcal4_finaliter'
for fieldid, pfxs in prefixes.items():
fig1,fig2 = bsens_cleanest_diff(**pfxs, cutoutregion=cutoutregions[fieldid][0], **normpars.get(fieldid, {}))
fig1.savefig(f'bsens_diff_zooms/{fieldid}_bsens_diff_zoom_B3.png', bbox_inches='tight', dpi=300)
fig2.savefig(f'bsens_diff_zooms/{fieldid}_bsens_diff_zoom_B6.png', bbox_inches='tight', dpi=300)
prefixes['W43MM1']['finaliter_prefix_b6'] = 'W43-MM1/B6/cleanest/W43-MM1_B6_uid___A002_X996c88_X87_continuum_merged_12M_robust0_selfcal4_finaliter'
os.chdir('/orange/adamginsburg/ALMA_IMF/2017.1.01355.L/June2021Release')
if not os.path.exists('bsens_diff_zooms'):
os.mkdir('bsens_diff_zooms')
for fieldid, pfxs in prefixes.items():
fig1,fig2 = bsens_cleanest_diff(**pfxs, cutoutregion=cutoutregions[fieldid][0], **normpars.get(fieldid, {}),
noco='_noco', non2hp='_non2hp')
fig1.savefig(f'bsens_diff_zooms/{fieldid}_bsens_non2hp_diff_zoom_B3.png', bbox_inches='tight', dpi=300)
fig2.savefig(f'bsens_diff_zooms/{fieldid}_bsens_noco_diff_zoom_B6.png', bbox_inches='tight', dpi=300)
|
[
"keflavich@gmail.com"
] |
keflavich@gmail.com
|
4b858fafb09c3c580a3b53f9df313e263ed7fe98
|
63efb13760eabfc09c288d29264adfff0da6622a
|
/LeetCode/54_spiral_matrix.py
|
334481a1decda2f1c02166c8e4cbcbdeaeb8d063
|
[] |
no_license
|
KKosukeee/CodingQuestions
|
59f0002ace13ebbe3a4a36976277bcb0d3766dbd
|
01fe893ba2e37c9bda79e3081c556698f0b6d2f0
|
refs/heads/master
| 2020-05-16T04:22:31.578974
| 2020-03-30T00:27:51
| 2020-03-30T00:27:51
| 182,775,403
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,600
|
py
|
"""
Solution for 54. Spiral Matrix
https://leetcode.com/problems/spiral-matrix/
"""
class Solution:
"""
Runtime: 36 ms, faster than 77.01% of Python3 online submissions for Spiral Matrix.
Memory Usage: 13.3 MB, less than 5.18% of Python3 online submissions for Spiral Matrix.
"""
def spiralOrder(self, matrix):
"""
Given a matrix of m x n elements (m rows, n columns), return all elements of the matrix
in spiral order.
Example 1:
Input:
[
[ 1, 2, 3 ],
[ 4, 5, 6 ],
[ 7, 8, 9 ]
]
Output: [1,2,3,6,9,8,7,4,5]
Example 2:
Input:
[
[1, 2, 3, 4],
[5, 6, 7, 8],
[9,10,11,12]
]
Output: [1,2,3,4,8,12,11,10,9,5,6,7]
Args:
matrix: 2D matrix
Returns:
list<int>: list of integer values in spiral order in matrix
"""
if not matrix:
return []
# Initialize index i and j
i, j, k = 0, 0, 0
# Initialize a direction with a tuple
directions = [
(0, 1),
(1, 0),
(0, -1),
(-1, 0)
]
# Initialize result array for keeping spiral order
result = [matrix[i][j]]
matrix[i][j] = -float('inf')
# Loop, while exploring is True
while True:
if not self.check_direction(matrix, i, j, directions[k]):
k = (k + 1) % 4
if not self.check_direction(matrix, i, j, directions[k]):
break
i += directions[k][0]
j += directions[k][1]
result.append(matrix[i][j])
matrix[i][j] = -float('inf')
return result
def check_direction(self, matrix, row, col, direction):
"""
Checks if the direction is valid or not for given row and col
Args:
matrix: 2D matrix same as the main function's matrix
row: current row index value which could be >= len(matrix)
col: current col index value which could be >= len(matrix[0])
direction: tuple of integers for heading direction
Returns:
bool: True if the heading direction is valid, otherwise False
"""
new_row = row + direction[0]
new_col = col + direction[1]
if 0 <= new_row < len(matrix) and \
0 <= new_col < len(matrix[0]) and \
matrix[new_row][new_col] != -float('inf'):
return True
else:
return False
|
[
"kousuke.newlife@gmail.com"
] |
kousuke.newlife@gmail.com
|
18375f5328d2d152a2bf52b751758c80aeef94a3
|
6d69b249a81e076d79787dd08eb8957908052052
|
/wiktionary_bot/cron/send_daily_word.py
|
18e6b326e27d746aea7ad179a8240945e26ae9f0
|
[] |
no_license
|
2vitalik/wiktionary
|
02ee1f1327c3b82fc7b4d7da12083b1431b1eb8b
|
8edae2f7dcf9089084c5ce7033c4fb0b454f4dfa
|
refs/heads/master
| 2023-02-06T11:28:41.554604
| 2023-02-05T22:49:01
| 2023-02-05T22:49:01
| 121,025,447
| 7
| 2
| null | 2021-10-13T17:36:32
| 2018-02-10T15:06:24
|
Lua
|
UTF-8
|
Python
| false
| false
| 755
|
py
|
import sys; sys.path.append('../..')
from datetime import datetime
import telegram
from core.conf import conf
from libs.utils.io import json_load
from wiktionary_bot.src.semantic import Reply
from wiktionary_bot.src.slack import slack
from wiktionary_bot.src.utils import send
@slack('send_daily_word')
def send_daily_word():
bot = telegram.Bot(conf.telegram_token)
chat_id = conf.new_channel_id
now = datetime.now()
year_month = now.strftime('%Y/%m')
path = f'{conf.data_path}/word_of_day/{year_month}/latest.json'
data = json_load(path)
title = data[str(now.day)]
print(title)
text = '🔆 Слово дня\n' + Reply(title).text
send(bot, chat_id, text)
if __name__ == '__main__':
send_daily_word()
|
[
"2vitalik@gmail.com"
] |
2vitalik@gmail.com
|
6ad6c28a4be99b031ff33625ec068853131f61f7
|
445892d8512378aea29fe3ac32289193a74f1ad0
|
/16.py
|
067d858cef42ec88793d5d02f238fbe4180e54a9
|
[] |
no_license
|
nemec/euler
|
4310ca29869a253b34860df133f5f41e5b2cf7ae
|
d566a453dbd2ae50e1e450b2e43dc885015ce48d
|
refs/heads/master
| 2021-01-19T06:31:40.072085
| 2012-01-07T22:54:16
| 2012-01-07T22:54:16
| 3,127,191
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 221
|
py
|
## Project Euler
## Problem 16
##
## 2^15 = 32768 and the sum of its digits is 3 + 2 + 7 + 6 + 8 = 26.
##
## What is the sum of the digits of the number 2^1000?
print "Euler answer is:", sum(map(int, str(pow(2, 1000))))
|
[
"djnemec@gmail.com"
] |
djnemec@gmail.com
|
da353a032312bc515f977a62039f08b39d156cdb
|
32a6db4d595ef4d308ac0e2ef37c57f65a777bfc
|
/ZYCami_00_彭小钗/TestCase01/test_prime02.py
|
6ed9b88761b6de280907d63f3a4961a0cf453136
|
[] |
no_license
|
wangdan377/Python_UI
|
1c8f0b3d46272d72f849f242c39e035c6b20720b
|
6c3e23b301ffe14cbd27a5211e48c8f79169dcf9
|
refs/heads/master
| 2023-02-17T02:37:34.353523
| 2021-01-19T11:58:22
| 2021-01-19T11:58:22
| 311,855,691
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,236
|
py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from ddt import ddt,data,unpack
from Public.Function import *
from Public import Caplictily
from PO.login_element import Login_Page
from PO.Prime_Element import Prime_Page
from PO.Edit_Element import Edit_Page
from PO.Activation_page import Activation_Page
from Data.data import *
from Public.loged import *
import unittest,pytest
import time,os,sys
import warnings
from appium.webdriver import Remote
import HTMLTestRunnerCN3_pie_chart_screen
log = Logger('D:\ZYCami_00\logs\\error.log', level='debug')
@ddt #装饰器类
class Test_Prime_02(unittest.TestCase):
'''
未登录状态下,登录-编辑-云剪辑-立即开通-微信支付-关闭
已登录状态下,编辑-云剪辑-立即开通-微信支付-关闭
未登录状态下去编辑页面云剪辑,然后购买prime
已登录状态下,非prime用户,购买一周/二周-协议-隐私政策-购买设备-向上滑动'''
#初始化,使用装饰器,这样在用例执行前只初始化一次
@classmethod
def setUpClass(self):
warnings.simplefilter("ignore", ResourceWarning)
# cls.driver = appium_desired() #之前的方法
# cls.driver.start_activity(appPackage,appActivity) #另外一种启动方式
#实例化类
# self.fun = BaseFun(self.driver)
driver = Caplictily.Driver_Config()
self.driver = driver.get_driver()
self.login = Login_Page(self.driver) #登录
self.prime = Prime_Page(self.driver) #prime
self.edit = Edit_Page(self.driver) #edit
self.action = Activation_Page(self.driver) # 激活设备
self.fun = BaseFun(self.driver)
@data(*get_log_data()) #装饰测试方法,拿到几个数据就可以执行用例
@unpack #根据拿到的数据以都好及逆行拆分
@pytest.mark.flaky(rerus=3)
# def test_prime_02_01(self,username,password):
# """
# 未登录状态下,登录-编辑-云剪辑-立即开通-微信支付-关闭
# :param username: 账号
# :param password: 密码
# :return:
# """
# """try:
# self.fun.click(File_name)
# except AttributeError:
# log.logger.error('访问不到该对象属性')
# self.fun.saveScreenshot('camera01')
# except NoSuchElementException as a: # 元素不存在,则调用捕获异常处理
# log.logger.error('元素不存在')
# self.fun.saveScreenshot('camera02')
# except NoAlertPresentException as b: # 捕获意外的异常
# log.logger.warning('警告')
# self.fun.saveScreenshot('camera03')
# except Exception as result:
# log.logger.critical('严重')
# self.fun.saveScreenshot('camera04')
# else:
# pass # 没有错的情况下执行
# finally:
# pass # 有没有错,都会执行"""
# # self.login.click_File_me() #我的页面icon
# self.edit.click_File_Editor() #底部编辑icon
# self.edit.click_File_cloud_engine() #云剪辑
# self.login.click_File_tv_login()
# self.login.input_user(username)
# self.login.input_password(password)
# self.login.click_File_tv_commit()
# self.prime.click_File_open01() #立即开通
# self.action.click_File_pay_closed() # 支付方式关闭
# self.action.click_File_wx() #选择微信支付
#
# # self.prime.click_File_prime01() #点击【prime】
# # self.prime.click_File_year() #年
# # time.sleep(5)
# # self.prime.click_File_open01() #立即开通
# # time.sleep(5)
# # self.prime.click_File_Service_Agreement() #服务协议
# # self.prime.click_File_Service_Back() #服务协议返回按钮
# # self.prime.click_File_Privacy_Policy() #隐私政策
# # self.prime.click_File_Service_Back() #隐私政策返回按钮
# # time.sleep(2)
# # self.fun.swip_left02() #向左滑动
# # time.sleep(5)
# # self.prime.click_File_buy_equipment02() #购买二设备
# # time.sleep(5)
# # self.driver.keyevent(4) #返回按钮
# # time.sleep(2)
# # self.fun.swip_up()
# # time.sleep(5)
#
#
#
# """# self.driver.get_screenshot_as_file('../screen/test_camera.png') #直接存入报告
# self.fun.saveScreenshot('camera')
# self.fun.saveScreenshot('help')
#
# self.fun.click(File_enter)"""
def test_prime_02_02(self,username,password):
"""
未登录状态下去编辑页面云剪辑,然后购买prime
:param username: 账号
:param password: 密码
:return:
"""
"""try:
self.fun.click(File_name)
except AttributeError:
log.logger.error('访问不到该对象属性')
self.fun.saveScreenshot('camera01')
except NoSuchElementException as a: # 元素不存在,则调用捕获异常处理
log.logger.error('元素不存在')
self.fun.saveScreenshot('camera02')
except NoAlertPresentException as b: # 捕获意外的异常
log.logger.warning('警告')
self.fun.saveScreenshot('camera03')
except Exception as result:
log.logger.critical('严重')
self.fun.saveScreenshot('camera04')
else:
pass # 没有错的情况下执行
finally:
pass # 有没有错,都会执行"""
self.login.click_File_me() #我的页面icon
# self.login.click_File_Home() #底部编辑icon
# self.edit.click_File_cloud_engine() #云剪辑
# self.prime.click_File_open01() # 立即开通
# self.action.click_File_pay_closed() # 支付方式关闭
# self.action.click_File_wx() # 选择微信支付
# self.login.click_File_tv_login()
# self.login.input_user(username)
# self.login.input_password(password)
# self.login.click_File_tv_commit()
# self.prime.click_File_prime01() #点击【prime】
# self.prime.click_File_year() #年
# time.sleep(5)
# self.prime.click_File_open01() #立即开通
# time.sleep(5)
# self.prime.click_File_Service_Agreement() #服务协议
# self.prime.click_File_Service_Back() #服务协议返回按钮
# self.prime.click_File_Privacy_Policy() #隐私政策
# self.prime.click_File_Service_Back() #隐私政策返回按钮
# time.sleep(2)
# self.fun.swip_left02() #向左滑动
# time.sleep(5)
# self.prime.click_File_buy_equipment02() #购买二设备
# time.sleep(5)
# self.driver.keyevent(4) #返回按钮
# time.sleep(2)
# self.fun.swip_up()
# time.sleep(5)
"""# self.driver.get_screenshot_as_file('../screen/test_camera.png') #直接存入报告
self.fun.saveScreenshot('camera')
self.fun.saveScreenshot('help')
self.fun.click(File_enter)"""
# 关闭驱动
@classmethod
def tearDownClass(self):
self.driver.quit()
if __name__ == '__main__':
unittest.main()
"""suite = unittest.TestSuite()
suite.addTest(Camera_test("test_camera"))
soundbox_device = 'XYBK01011204300001'
now = time.strftime("%Y-%m-%d %H_%M_%S")
##将当前时间加入到报告文件名称中,定义测试报告存放路径
filename = '../report/' + now + 'result.html' # 第一种方法,相对路径下
'''# filename = '../report/report' + now + 'result.html' # 第二种方法,相对路径下+report报告名字
# filename = '../report'+ os.sep+ 'report' + now + 'result.html' #第二种方法,相对路径下,上级目录../report文件夹下,分隔符下文件名 os.sep+ 'report'
# filename = os.getcwd() + os.sep+ 'report' + now + 'result.html' #相对路径下,该路径下的,所以就到test_case下面了
# filename = os.path.dirname(os.path.dirname(__file__))+ os.sep + 'report' + now + 'result.html' #相对路径下,该路径下的,所以就到test_case下面了
# filename = 'D:\py\ZY_Cami\\report\\report' + now + 'result.html' #绝对路径下
# filename = '../' + now + 'result.html' #../是上级目录,还是在上级目录下
# filename = '../' + os.sep + 'report' + now + 'result.html' #还是在上级目录下
# filename = '../report' + now + 'result.html' #还是在上级目录下'''
fp = open(filename, 'wb')
runner = HTMLTestRunnerCN3_pie_chart_screen.HTMLTestRunner(stream=fp, title='测试报告', tester='王丹',
device=(soundbox_device),
description='用例执行情况:')
runner.run(suite)
# runner.run(Suit())
fp.close()"""
|
[
"1065913054@qq.com"
] |
1065913054@qq.com
|
de53abff3d0ed3259297bf19dad05ed4f09e5723
|
b8cc6d34ad44bf5c28fcca9e0df01d9ebe0ee339
|
/Python100例/Python练习实例87.py
|
eb040332bbd64b5daa5056ab2b402bd6c2c73df0
|
[] |
no_license
|
python-yc/pycharm_script
|
ae0e72898ef44a9de47e7548170a030c0a752eb5
|
c8947849090c71e131df5dc32173ebe9754df951
|
refs/heads/master
| 2023-01-05T06:16:33.857668
| 2020-10-31T08:09:53
| 2020-10-31T08:09:53
| 296,778,670
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 289
|
py
|
'''
题目:回答结果(结构体变量传递)。
程序分析:无。
'''
if __name__ == '__main__':
class Student():
x = 0
c = 0
def f(stu):
stu.x = 20
stu.c = 'c'
a = Student()
a.x = 3
a.c = 'a'
f(a)
print(a.x,a.c)
|
[
"15655982512.com"
] |
15655982512.com
|
1a874b1f92374058db5c3ca342920bc09dacd61a
|
aea8fea216234fd48269e4a1830b345c52d85de2
|
/fhir/resources/STU3/deviceusestatement.py
|
7db6646ba3f74ade538fe51cd3a2428909343d91
|
[
"BSD-3-Clause"
] |
permissive
|
mmabey/fhir.resources
|
67fce95c6b35bfdc3cbbc8036e02c962a6a7340c
|
cc73718e9762c04726cd7de240c8f2dd5313cbe1
|
refs/heads/master
| 2023-04-12T15:50:30.104992
| 2020-04-11T17:21:36
| 2020-04-11T17:21:36
| 269,712,884
| 0
| 0
|
NOASSERTION
| 2020-06-05T17:03:04
| 2020-06-05T17:03:04
| null |
UTF-8
|
Python
| false
| false
| 6,807
|
py
|
# -*- coding: utf-8 -*-
"""
Profile: http://hl7.org/fhir/StructureDefinition/DeviceUseStatement
Release: STU3
Version: 3.0.2
Revision: 11917
Last updated: 2019-10-24T11:53:00+11:00
"""
import sys
from . import domainresource
class DeviceUseStatement(domainresource.DomainResource):
""" Record of use of a device.
A record of a device being used by a patient where the record is the result
of a report from the patient or another clinician.
"""
resource_type = "DeviceUseStatement"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.bodySite = None
""" Target body site.
Type `CodeableConcept` (represented as `dict` in JSON). """
self.device = None
""" Reference to device used.
Type `FHIRReference` referencing `['Device']` (represented as `dict` in JSON). """
self.identifier = None
""" External identifier for this record.
List of `Identifier` items (represented as `dict` in JSON). """
self.indication = None
""" Why device was used.
List of `CodeableConcept` items (represented as `dict` in JSON). """
self.note = None
""" Addition details (comments, instructions).
List of `Annotation` items (represented as `dict` in JSON). """
self.recordedOn = None
""" When statement was recorded.
Type `FHIRDate` (represented as `str` in JSON). """
self.source = None
""" Who made the statement.
Type `FHIRReference` referencing `['Patient'], ['Practitioner'], ['RelatedPerson']` (represented as `dict` in JSON). """
self.status = None
""" active | completed | entered-in-error +.
Type `str`. """
self.subject = None
""" Patient using device.
Type `FHIRReference` referencing `['Patient'], ['Group']` (represented as `dict` in JSON). """
self.timingDateTime = None
""" How often the device was used.
Type `FHIRDate` (represented as `str` in JSON). """
self.timingPeriod = None
""" How often the device was used.
Type `Period` (represented as `dict` in JSON). """
self.timingTiming = None
""" How often the device was used.
Type `Timing` (represented as `dict` in JSON). """
self.whenUsed = None
""" Period device was used.
Type `Period` (represented as `dict` in JSON). """
super(DeviceUseStatement, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(DeviceUseStatement, self).elementProperties()
js.extend(
[
(
"bodySite",
"bodySite",
codeableconcept.CodeableConcept,
"CodeableConcept",
False,
None,
False,
),
(
"device",
"device",
fhirreference.FHIRReference,
"Reference",
False,
None,
True,
),
(
"identifier",
"identifier",
identifier.Identifier,
"Identifier",
True,
None,
False,
),
(
"indication",
"indication",
codeableconcept.CodeableConcept,
"CodeableConcept",
True,
None,
False,
),
(
"note",
"note",
annotation.Annotation,
"Annotation",
True,
None,
False,
),
(
"recordedOn",
"recordedOn",
fhirdate.FHIRDate,
"dateTime",
False,
None,
False,
),
(
"source",
"source",
fhirreference.FHIRReference,
"Reference",
False,
None,
False,
),
("status", "status", str, "code", False, None, True),
(
"subject",
"subject",
fhirreference.FHIRReference,
"Reference",
False,
None,
True,
),
(
"timingDateTime",
"timingDateTime",
fhirdate.FHIRDate,
"dateTime",
False,
"timing",
False,
),
(
"timingPeriod",
"timingPeriod",
period.Period,
"Period",
False,
"timing",
False,
),
(
"timingTiming",
"timingTiming",
timing.Timing,
"Timing",
False,
"timing",
False,
),
("whenUsed", "whenUsed", period.Period, "Period", False, None, False),
]
)
return js
try:
from . import annotation
except ImportError:
annotation = sys.modules[__package__ + ".annotation"]
try:
from . import codeableconcept
except ImportError:
codeableconcept = sys.modules[__package__ + ".codeableconcept"]
try:
from . import fhirdate
except ImportError:
fhirdate = sys.modules[__package__ + ".fhirdate"]
try:
from . import fhirreference
except ImportError:
fhirreference = sys.modules[__package__ + ".fhirreference"]
try:
from . import identifier
except ImportError:
identifier = sys.modules[__package__ + ".identifier"]
try:
from . import period
except ImportError:
period = sys.modules[__package__ + ".period"]
try:
from . import timing
except ImportError:
timing = sys.modules[__package__ + ".timing"]
|
[
"connect2nazrul@gmail.com"
] |
connect2nazrul@gmail.com
|
37c42c2db2f6ccdc031e2a57d830f319f7285191
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/452/usersdata/278/104002/submittedfiles/avenida.py
|
b830fcc4174b52263fd249e580b8cecb56557fb8
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146
| 2017-12-22T16:05:45
| 2017-12-22T16:05:45
| 69,566,344
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 937
|
py
|
# -*- coding: utf-8 -*-
m = int(input('Digite o número de quadras no sentido Norte-Sul: '))
while (m<2 or m>1000):
m = int(input('Digite o número de quadras [2,1000] no sentido Norte-Sul: '))
n = int(input('digite o número de quadras no sentido Leste-Oeste: '))
while (n<2 or n>1000):
n = int(input('digite o número de quadras [2,1000] no sentido Leste-Oeste: '))
valor1 = []
valor2 = []
for i in range (0,m,1):
for j in range (0,n,1):
valor1.append(int(input('Digite o valor de desapropriação da quadra%d%d: ' %(i,j))))
valor2.append(valor1)
soma=0
valor=[]
for j in range (0,n,1):
for i in range (0,m,1):
soma=soma+valor2[i][j]
valor.append(soma)
x=n
while (n!=1):
if i+1<x:
for i in range (0,n,1):
if valor[i]<valor[i+1]:
del valor[i+1]
else valor[i]>valor[i+1]:
del valor[i]
print(valor[0])
|
[
"rafael.mota@ufca.edu.br"
] |
rafael.mota@ufca.edu.br
|
a7c2ca14b118fd55db1f803488e33a2b02b59688
|
209c2dd5f0ae41ccd41d84e8c86fe4a7e3321715
|
/tests/test_services.py
|
4ff7b8ac73dbcb8b987af56b24d2064124738a01
|
[
"MIT"
] |
permissive
|
vladimirmyshkovski/django-url-shorter
|
4d17b165c14b8584bdabc0a60bd6cab03f88d2dd
|
dcea9f77af951ec3cfc41fbbc4bab951ccab7f41
|
refs/heads/master
| 2021-09-15T13:02:59.286830
| 2018-06-02T16:59:59
| 2018-06-02T16:59:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,021
|
py
|
from django.test import TestCase
from url_shorter import services
from url_shorter import models
from . import factories
class TestCreateNewURL(TestCase):
@classmethod
def setUpTestData(cls):
#cls.new_url = factories.URLFactory()
cls.user = factories.UserFactory()
cls.new_url = services.create_new_url(
'https://google.com/',
cls.user
)
cls.new_short_url = services.generate_short_url()
def setUp(self):
pass
def test_new_short_url_created(self):
self.assertNotEqual(self.new_short_url, None)
def test_new_short_url_is_string(self):
self.assertTrue(type(self.new_short_url))
def test_jew_short_url_length(self):
self.assertEqual(
len(self.new_short_url),
6
)
def test_new_url_created(self):
self.assertNotEqual(
self.new_url.short_url,
None
)
def test_new_url_is_URL_instance(self):
self.assertTrue(
isinstance(self.new_url, models.URL)
)
def test_new_url_equal_long_url(self):
self.assertEqual(
self.new_url.long_url,
'https://google.com/'
)
def test_new_url_short_url_is_string(self):
self.assertEqual(
type(self.new_url.short_url),
type('')
)
def test_create_many_new_urls_with_one_long_url(self):
for _ in range(10):
url = services.create_new_url('https://google.com/', self.user)
self.assertEqual(
self.new_url,
url
)
def test_create_new_url_without_long_url(self):
url = services.create_new_url('', self.user)
self.assertEqual(
url,
''
)
def test_create_new_url_withput_long_url_equal_instance(self):
url = services.create_new_url('', self.user)
self.assertFalse(isinstance(url, models.URL))
def tearDown(self):
pass
|
[
"narnikgamarnikus@gmail.com"
] |
narnikgamarnikus@gmail.com
|
f5aa734ec84bd5ce364e5dd9f8182e2fe20138ce
|
2a2b26f3fbdd16d99fd65f390acc37cff6783805
|
/backend/tin_marin_23659/urls.py
|
e29fe74e819d07acbddd4a80408b93aca7f3153f
|
[] |
no_license
|
crowdbotics-apps/tin-marin-23659
|
fb6b8dcb98c069a6543039bd95b2c821eed39300
|
f50588680e57d8840dc1e406d5ac5171d67f3c54
|
refs/heads/master
| 2023-02-09T00:02:16.087858
| 2021-01-03T22:16:05
| 2021-01-03T22:16:05
| 326,510,896
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,973
|
py
|
"""tin_marin_23659 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from allauth.account.views import confirm_email
from rest_framework import permissions
from drf_yasg.views import get_schema_view
from drf_yasg import openapi
urlpatterns = [
path("", include("home.urls")),
path("accounts/", include("allauth.urls")),
path("modules/", include("modules.urls")),
path("api/v1/", include("home.api.v1.urls")),
path("admin/", admin.site.urls),
path("users/", include("users.urls", namespace="users")),
path("rest-auth/", include("rest_auth.urls")),
# Override email confirm to use allauth's HTML view instead of rest_auth's API view
path("rest-auth/registration/account-confirm-email/<str:key>/", confirm_email),
path("rest-auth/registration/", include("rest_auth.registration.urls")),
]
admin.site.site_header = "Tin Marin"
admin.site.site_title = "Tin Marin Admin Portal"
admin.site.index_title = "Tin Marin Admin"
# swagger
api_info = openapi.Info(
title="Tin Marin API",
default_version="v1",
description="API documentation for Tin Marin App",
)
schema_view = get_schema_view(
api_info,
public=True,
permission_classes=(permissions.IsAuthenticated,),
)
urlpatterns += [
path("api-docs/", schema_view.with_ui("swagger", cache_timeout=0), name="api_docs")
]
|
[
"team@crowdbotics.com"
] |
team@crowdbotics.com
|
220733b7bec929fe2135f19393091bb7b9da15c5
|
cbf9f600374d7510988632d7dba145c8ff0cd1f0
|
/abc/187/d.py
|
d87bf83a875df88d7799159669ee7b2738c9db82
|
[] |
no_license
|
sakakazu2468/AtCoder_py
|
d0945d03ad562474e40e413abcec39ded61e6855
|
34bdf39ee9647e7aee17e48c928ce5288a1bfaa5
|
refs/heads/master
| 2022-04-27T18:32:28.825004
| 2022-04-21T07:27:00
| 2022-04-21T07:27:00
| 225,844,364
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 368
|
py
|
import heapq
n = int(input())
aoki = 0
takahashi = 0
eva_heap = []
for i in range(n):
a, b = map(int, input().split())
aoki += a
eva_heap.append([-1*(2*a+b), a, b])
heapq.heapify(eva_heap)
for i in range(n):
pop = heapq.heappop(eva_heap)
takahashi += pop[1] + pop[2]
aoki -= pop[1]
if takahashi > aoki:
print(i+1)
break
|
[
"sakakazu2468@icloud.com"
] |
sakakazu2468@icloud.com
|
24ccc45f17a0ad0467b37575e53011fbc3fd29dc
|
dffbcc5d83153dd8b3ca91f91fd80311b266c586
|
/lesweets/templatetags/extratags.py
|
f13d381679ba34bc506d684c5d01df3c0a206616
|
[] |
no_license
|
jayoshih/activity-builder
|
0d2aa57a55266cc9d4abd554f367cdbee855195e
|
3b76a670e0365458f9422efcc2cc5b354b848074
|
refs/heads/master
| 2021-05-01T22:09:49.078050
| 2018-02-24T05:27:23
| 2018-02-24T05:27:23
| 120,987,938
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 669
|
py
|
import json
import re
from django import template
from django.template.defaultfilters import stringfilter
register = template.Library()
@register.filter(is_safe=True)
def parsestory(value):
story = value["story"]
MATHLIBS_REGEX = r'(\[([^\]]+)\])'
for match in re.findall(MATHLIBS_REGEX, story):
try:
data = next(m for m in value["items"] if m["id"] == match[1])
span = "<b class='display-blank {}'>{}</b>".format(data["id"], data["label"])
story = story.replace(match[0], span)
except StopIteration as e:
print("No item found in {} with id {}".format(value["id"], match))
return story
|
[
"jordanyoshihara@gmail.com"
] |
jordanyoshihara@gmail.com
|
ac3585e83cbdf80f4e384846177b3ce97a51323c
|
f07a42f652f46106dee4749277d41c302e2b7406
|
/Data Set/bug-fixing-4/f68bf254a5a9503bbaef64e23bd53cd85527d20e-<_sort_labels>-bug.py
|
2f291b6818e245eb814d0a21821e59925da51495
|
[] |
no_license
|
wsgan001/PyFPattern
|
e0fe06341cc5d51b3ad0fe29b84098d140ed54d1
|
cc347e32745f99c0cd95e79a18ddacc4574d7faa
|
refs/heads/main
| 2023-08-25T23:48:26.112133
| 2021-10-23T14:11:22
| 2021-10-23T14:11:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 399
|
py
|
def _sort_labels(uniques, left, right):
if (not isinstance(uniques, np.ndarray)):
uniques = Index(uniques).values
l = len(left)
labels = np.concatenate([left, right])
(_, new_labels) = sorting.safe_sort(uniques, labels, na_sentinel=(- 1))
new_labels = _ensure_int64(new_labels)
(new_left, new_right) = (new_labels[:l], new_labels[l:])
return (new_left, new_right)
|
[
"dg1732004@smail.nju.edu.cn"
] |
dg1732004@smail.nju.edu.cn
|
7826e402c224ee5ded49a02aeca550616e2b3421
|
d12946acff8dab7127720e9f7393acc6b6c22daf
|
/ngcasa/flagging/manual_unflag.py
|
2399198632f1ba319a8d0afd754504f72054063f
|
[
"Apache-2.0"
] |
permissive
|
keflavich/cngi_prototype
|
ff84c6d36e823148d3e21d6c2a2eb8298893a5da
|
cf6ea38a8dd7219c351bcc5a10e9a38dcf733ae5
|
refs/heads/master
| 2023-02-01T09:21:47.847739
| 2020-12-14T21:03:16
| 2020-12-14T21:03:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,050
|
py
|
# Copyright 2020 AUI, Inc. Washington DC, USA
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
this module will be included in the api
"""
def manual_unflag(vis_dataset, flag_parms, storage_parms):
"""
.. todo::
This function is not yet implemented
Define a set of data selection queries to mark as flags.
Inputs :
(1) list of selection queries
(2) array name for output flags. Default = FLAG
Returns
-------
vis_dataset : xarray.core.dataset.Dataset
"""
|
[
"ryanraba@gmail.com"
] |
ryanraba@gmail.com
|
db411ddca642519eaddf1710497d10f4c1122f36
|
c77148a25435b50a35fceab36112fba18dbb0866
|
/backup/Jun17/units/TatsuNullifier.py
|
f22337f88388c799dec174265e8621b1e779f14b
|
[] |
no_license
|
SozBroz/PrismataBot
|
51fbecf90950d13eb52606a5b18984b5474746ba
|
f375ca8dc396abbca4134f70cb262fc78b90a17e
|
refs/heads/master
| 2020-05-31T13:37:50.102010
| 2019-06-06T02:48:01
| 2019-06-06T02:48:01
| 136,826,949
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 689
|
py
|
#!/usr/bin.python3.6
class TatsuNullifier:
def __init__(self,owner):
self.owner=owner
self.lifespan=-1
self.frontline=False
self.cooldown=1
self.defaultBlocking=False
self.assignedBlocking=False
self.health=2
self.fragile=False
self.attack=2
self.startTurnDict={
"attack":2
}
self.onClickDict={
}
self.onClickCost={
}
def __str__(self):
return "Tatsu Nullifier"
def startTurn(self):
return True
def canClick(self):
return True
def onClick(self):
self.owner.freeze(5)
def info(self):
print("Health: "+str(self.health))
def TatsuNullifierCost():
buyCostDict={
"gold":12,
"red":4
}
return buyCostDict,True,4,[],"Tatsu Nullifier"
|
[
"Phil@oc1140302110.ibm.com"
] |
Phil@oc1140302110.ibm.com
|
123e7e6a2f7aed9ff30733adf3eddbf3b0ea4f4f
|
9131dd03ff2880fca2a5883572784f8e51046e41
|
/env/lib/python3.6/site-packages/clicksend_client/models/fax_message_collection.py
|
92d275d5d89e723c11c3188f79eed1f58a23cad1
|
[] |
no_license
|
aviadm24/coronaap
|
fe10619ae42a8c839cd0a2c2c522187c5f21fbc7
|
5608c2d77cb3441b48ba51da04c06a187fb09488
|
refs/heads/master
| 2022-12-09T21:35:17.179422
| 2021-01-28T08:21:49
| 2021-01-28T08:21:49
| 249,938,200
| 0
| 0
| null | 2021-09-22T18:47:51
| 2020-03-25T09:36:10
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 4,785
|
py
|
# coding: utf-8
"""
ClickSend v3 API
This is an official SDK for [ClickSend](https://clicksend.com) Below you will find a current list of the available methods for clicksend. *NOTE: You will need to create a free account to use the API. You can register [here](https://dashboard.clicksend.com/#/signup/step1/)..* # noqa: E501
OpenAPI spec version: 3.1
Contact: support@clicksend.com
Generated by: https://github.com/clicksend-api/clicksend-codegen.git
"""
import pprint
import re # noqa: F401
import six
class FaxMessageCollection(object):
"""NOTE: This class is auto generated by the clicksend code generator program.
Do not edit the class manually.
"""
"""
Attributes:
clicksend_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
clicksend_types = {
'messages': 'list[FaxMessage]',
'file_url': 'str'
}
attribute_map = {
'messages': 'messages',
'file_url': 'file_url'
}
discriminator_value_class_map = {
}
def __init__(self, messages=None, file_url=None): # noqa: E501
"""FaxMessageCollection - a model defined in Swagger""" # noqa: E501
self._messages = None
self._file_url = None
self.discriminator = 'classType'
self.messages = messages
self.file_url = file_url
@property
def messages(self):
"""Gets the messages of this FaxMessageCollection. # noqa: E501
Array of FaxMessage items # noqa: E501
:return: The messages of this FaxMessageCollection. # noqa: E501
:rtype: list[FaxMessage]
"""
return self._messages
@messages.setter
def messages(self, messages):
"""Sets the messages of this FaxMessageCollection.
Array of FaxMessage items # noqa: E501
:param messages: The messages of this FaxMessageCollection. # noqa: E501
:type: list[FaxMessage]
"""
if messages is None:
raise ValueError("Invalid value for `messages`, must not be `None`") # noqa: E501
self._messages = messages
@property
def file_url(self):
"""Gets the file_url of this FaxMessageCollection. # noqa: E501
URL of file to send # noqa: E501
:return: The file_url of this FaxMessageCollection. # noqa: E501
:rtype: str
"""
return self._file_url
@file_url.setter
def file_url(self, file_url):
"""Sets the file_url of this FaxMessageCollection.
URL of file to send # noqa: E501
:param file_url: The file_url of this FaxMessageCollection. # noqa: E501
:type: str
"""
if file_url is None:
raise ValueError("Invalid value for `file_url`, must not be `None`") # noqa: E501
self._file_url = file_url
def get_real_child_model(self, data):
"""Returns the real base class specified by the discriminator"""
discriminator_value = data[self.discriminator].lower()
return self.discriminator_value_class_map.get(discriminator_value)
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.clicksend_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(FaxMessageCollection, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, FaxMessageCollection):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"aviadm24@gmail.com"
] |
aviadm24@gmail.com
|
bf6c54d9c768f84150459c9cd82e12ccf61504a4
|
bc441bb06b8948288f110af63feda4e798f30225
|
/translate_sdk/model/msgsender/send_message_request_pb2.pyi
|
1a877b900d8b0e65122b3d5884b44703adb7b282
|
[
"Apache-2.0"
] |
permissive
|
easyopsapis/easyops-api-python
|
23204f8846a332c30f5f3ff627bf220940137b6b
|
adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0
|
refs/heads/master
| 2020-06-26T23:38:27.308803
| 2020-06-16T07:25:41
| 2020-06-16T07:25:41
| 199,773,131
| 5
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,938
|
pyi
|
# @generated by generate_proto_mypy_stubs.py. Do not edit!
import sys
from google.protobuf.descriptor import (
Descriptor as google___protobuf___descriptor___Descriptor,
)
from google.protobuf.message import (
Message as google___protobuf___message___Message,
)
from translate_sdk.model.msgsender.send_message_request_data_pb2 import (
SendMessageRequestData as translate_sdk___model___msgsender___send_message_request_data_pb2___SendMessageRequestData,
)
from typing import (
Optional as typing___Optional,
Union as typing___Union,
)
from typing_extensions import (
Literal as typing_extensions___Literal,
)
builtin___bool = bool
builtin___bytes = bytes
builtin___float = float
builtin___int = int
if sys.version_info < (3,):
builtin___buffer = buffer
builtin___unicode = unicode
class SendMessageRequest(google___protobuf___message___Message):
DESCRIPTOR: google___protobuf___descriptor___Descriptor = ...
@property
def data(self) -> translate_sdk___model___msgsender___send_message_request_data_pb2___SendMessageRequestData: ...
def __init__(self,
*,
data : typing___Optional[translate_sdk___model___msgsender___send_message_request_data_pb2___SendMessageRequestData] = None,
) -> None: ...
if sys.version_info >= (3,):
@classmethod
def FromString(cls, s: builtin___bytes) -> SendMessageRequest: ...
else:
@classmethod
def FromString(cls, s: typing___Union[builtin___bytes, builtin___buffer, builtin___unicode]) -> SendMessageRequest: ...
def MergeFrom(self, other_msg: google___protobuf___message___Message) -> None: ...
def CopyFrom(self, other_msg: google___protobuf___message___Message) -> None: ...
def HasField(self, field_name: typing_extensions___Literal[u"data",b"data"]) -> builtin___bool: ...
def ClearField(self, field_name: typing_extensions___Literal[u"data",b"data"]) -> None: ...
|
[
"service@easyops.cn"
] |
service@easyops.cn
|
cf7f4d296d73068a1dc89316da6367097e02ebb5
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_103/ch171_2020_06_21_22_12_16_899924.py
|
135086fcb73f5f97d48136cc47c30773eeb720b1
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628
| 2020-12-16T05:21:31
| 2020-12-16T05:21:31
| 306,735,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 580
|
py
|
class Carrinho:
def __init__(self,adiciona,total_do_produto):
self.adiciona = adiciona
self.total_do_produto= {}
def adiciona(self,nome_do_produto,preco):
self.nome_produto= nome_produto
self.preco = preco
if nome_produto not in total_do_produto:
total_do_produto[nome_produto] = preco
else:
total_do_produto[nome_produto]+= preco
def total_do_produto(self,nome_produto):
return total_do_produto[nome_produto]
|
[
"you@example.com"
] |
you@example.com
|
af095b8c5a638874a1b7bd9875f1dbdc6558e8d1
|
ee00ebe5e71c36b05fbff993b19e9723b963313f
|
/1086_High_Five.py
|
54383bdd24da9e14847397d5e189ffb6be5523ec
|
[] |
no_license
|
26XINXIN/leetcode
|
f365560d93604a28abf399707b333f3c11f924ec
|
78ed11f34fd03e9a188c9c6cb352e883016d05d9
|
refs/heads/master
| 2021-06-28T16:31:45.103879
| 2020-09-19T20:33:55
| 2020-09-19T20:33:55
| 144,975,903
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 617
|
py
|
from heapq import heappush, heappop
class Solution:
def highFive(self, items: List[List[int]]) -> List[List[int]]:
scores = dict()
for sid, score in items:
if sid not in scores:
scores[sid] = [-score]
else:
heappush(scores[sid], -score)
print(scores)
lscores = list()
for sid, score in scores.items():
avg = 0
for _ in range(5):
avg += -heappop(score)
avg = int(avg / 5)
lscores.append([sid, avg])
lscores.sort()
return lscores
|
[
"yangxin.nlp@bytedance.com"
] |
yangxin.nlp@bytedance.com
|
9ecd94aa5b45d765eb89069280cf0292db75b096
|
7f68bbb3fd328a4d6bbabecb44305987d8cbbfc4
|
/algorithm/codexpert/e0수열.py
|
e730b33a25301642f929bd741f4f6f0672570f2f
|
[] |
no_license
|
seunghoon2334/TIL
|
c84f9f9e68c8ccc7a1625222fe61f40739774730
|
51cfbad2d9b80a37b359716fca561c2a5c5b48b3
|
refs/heads/master
| 2022-12-18T18:20:19.210587
| 2019-11-26T03:14:23
| 2019-11-26T03:14:23
| 162,101,369
| 0
| 0
| null | 2022-11-22T03:59:16
| 2018-12-17T08:51:53
|
C
|
UTF-8
|
Python
| false
| false
| 535
|
py
|
import sys
sys.stdin = open("e0수열.txt")
n = int(input())
nums = list(map(int, input().split()))
result1 = 0
cnt1 = 1
for i in range(n-1):
if nums[i]<=nums[i+1]:
cnt1 += 1
else:
if cnt1>result1:
result1 = cnt1
cnt1 = 1
if cnt1>result1:
result1 = cnt1
result2 = 0
cnt2 = 1
for i in range(n-1):
if nums[i]>=nums[i+1]:
cnt2 += 1
else:
if cnt2>result2:
result2 = cnt2
cnt2 = 1
if cnt2>result2:
result2 = cnt2
print(max(result1,result2))
|
[
"gogo12394@naver.com"
] |
gogo12394@naver.com
|
dde99daa877a40e2e9b98354706c64fd41061c61
|
470dfeafdfe41ca8456093f59d589bee83718dfe
|
/aliyun-python-sdk-cloudphoto/aliyunsdkcloudphoto/request/v20170711/SearchPhotosRequest.py
|
8d27f029acb8b75776ce1da4e4ab289282c98f03
|
[
"Apache-2.0"
] |
permissive
|
lihongda1998/aliyun-openapi-python-sdk
|
f475356f1c6db2f94db2e8b24558db79109619a6
|
c6ff1bcf7381b48dac3e6ac4203741e902e3545a
|
refs/heads/master
| 2021-07-02T18:42:24.943017
| 2017-09-19T07:10:56
| 2017-09-19T07:10:56
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,610
|
py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
class SearchPhotosRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'CloudPhoto', '2017-07-11', 'SearchPhotos','cloudphoto')
self.set_protocol_type('https');
def get_Size(self):
return self.get_query_params().get('Size')
def set_Size(self,Size):
self.add_query_param('Size',Size)
def get_StoreName(self):
return self.get_query_params().get('StoreName')
def set_StoreName(self,StoreName):
self.add_query_param('StoreName',StoreName)
def get_Page(self):
return self.get_query_params().get('Page')
def set_Page(self,Page):
self.add_query_param('Page',Page)
def get_Keyword(self):
return self.get_query_params().get('Keyword')
def set_Keyword(self,Keyword):
self.add_query_param('Keyword',Keyword)
|
[
"haowei.yao@alibaba-inc.com"
] |
haowei.yao@alibaba-inc.com
|
aabebdb84e10f2ef4a37756005f0313ae8f8a5bb
|
b4d30f82a6842d57dd01d231799c2199be8230a3
|
/hello_world_tweepy.py
|
e642a0e6094d115f5f15a4467098831e8c2c17f9
|
[
"MIT"
] |
permissive
|
farisachugthai/instapy
|
fbe33d6cad744cb4cb06ccf219b9682e4ff3100e
|
0adcbe9d3b24199519e103ce9e7ee44f7aa402f0
|
refs/heads/master
| 2022-11-23T20:36:02.655554
| 2020-07-26T01:05:50
| 2020-07-26T01:05:50
| 280,748,976
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,392
|
py
|
#!/usr/bin/env python
"""A basic script show casing how to use tweepy.
====================
Tweepy functionality
====================
Over time, the names of various Twitter concepts have evolved, some old names are still used in Tweepy. So keep in mind that, in the context of this article, these equivalences hold:
- A status is a tweet .
- A friendship is a follow-follower relationship.
- A favorite is a like.
Create API object
=================
Objects belonging to the `tweepy.API` class offer a vast set of methods that
you can use to access almost all Twitter functionality. In the code snippet,
we used ``update_status`` to create a new Tweet.::
Setting ``wait_on_rate_limit`` and ``wait_on_rate_limit_notify`` to True
makes the API object print a message and wait if the rate limit is exceeded:
Tweepy Categories
=================
Tweepy’s functionality can be divided into the following groups:
- OAuth
- The API class
- Models
- Cursors
- Streams
I've previously set up my OAuth token's so we'll skip those.
API Class
=========
The API methods can be grouped into the following categories:
- Methods for user timelines
- Methods for tweets
- Methods for users
- Methods for followers
- Methods for your account
- Methods for likes
- Methods for blocking users
- Methods for searches
- Methods for trends
- Methods for streaming
"""
import json
import logging
logging.basicConfig(level=logging.INFO)
import tweepy
from credentials import CONSUMER_KEY, CONSUMER_SECRET, ACCESS_TOKEN, ACCESS_TOKEN_SECRET
auth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)
auth.set_access_token(ACCESS_TOKEN, ACCESS_TOKEN_SECRET)
api = tweepy.API(auth, wait_on_rate_limit=True, wait_on_rate_limit_notify=True)
def run_tweepy():
"""Authenticate to Twitter. """
try:
api.verify_credentials()
logging.info("Authentication OK")
except: # noqa
logging.error("Error during authentication.")
# Create a tweet.
# 07/25/2020: Works!
api.update_status("Hello tweepy")
def read_timeline():
"""Calls ``home_timeline``.
A tweepy API method used to get the last 20 entries in your timeline.
"""
timeline = api.home_timeline()
for tweet in timeline:
print(f"{tweet.user.name} said {tweet.text}")
def get_user(username):
user = api.get_user(username)
print("User Details")
print(user.name)
print(user.description)
print(user.location)
print("Last 20 followers:")
for follower in user.followers():
print(follower.name)
def follow_user(username):
api.create_friendship(username)
def update_profile_description(message):
api.update_profile(description=message)
def like_most_recent_tweet():
tweets = api.home_timeline(count=1)
tweet = tweets[0]
print(f"Liking tweet {tweet.id} of {tweet.author.name}")
api.create_favorite(tweet.id)
def search_twitter(string=None, returned=10):
"""Using these methods, you can search tweets using text, language, and other filters.
For example, you can try this code to get the 10 most recent public tweets that are in English and contain the word "Python":
"""
for tweet in api.search(q=string, lang="en", rpp=returned):
print(f"{tweet.user.name}:{tweet.text}")
def trending_now():
trends_result = api.trends_place(1)
for trend in trends_result[0]["trends"]:
print(trend["name"])
class MyStreamListener(tweepy.StreamListener):
"""
Streaming allows you to actively watch for tweets that match certain
criteria in real time. This means that when there aren’t any new tweet
matching the criteria, then the program will wait until a new tweet is
created and then process it.
To use streaming you have to create two objects:
1. The stream object uses the Twitter API to get tweets that match some criteria. This object is the source of tweets that are then processed by a stream listener.
2. The stream listener receives tweets from the stream.
"""
def __init__(self, api):
self.api = api
self.me = api.me()
def on_status(self, tweet):
print(f"{tweet.user.name}:{tweet.text}")
def on_error(self, status):
print("Error detected")
def generate_stream():
"""
We created the stream using tweepy.Stream, passing the authentication
credentials and our stream listener. To start getting tweets from the
stream, you have to call the stream’s filter(), passing the criteria to
use to filter tweets. Then, for each new tweet that matches the criteria,
the stream object invokes the stream listener’s on_status().
"""
tweets_listener = MyStreamListener(api)
stream = tweepy.Stream(api.auth, tweets_listener)
stream.filter(track=["Python", "Django", "Tweepy"], languages=["en"])
def follow_mentioners():
"""
Tweepy uses its own model classes to encapsulate the responses from various
Twitter API methods. This gives you a convenient way to use the results from
API operations.
The model classes are:
- User
- Status
- Friendship
- SearchResults
Since each tweet object returned by mentions_timeline() belongs to the
Status class, you can use:
- favorite() to mark it as Liked
- user to get its author
This user attribute, tweet.user, is also an object that belongs to User,
so you can use follow() to add the tweet’s author to the list of people
you follow.
Leveraging Tweepy models enables you to create concise and understandable
code.
"""
tweets = api.mentions_timeline()
for tweet in tweets:
tweet.favorite()
tweet.user.follow()
def create_cursor():
"""Cursors handle paginated results automatically.
Instantiate them by passing a method of the ``api`` object.
In the example, we used home_timeline() as the source since we wanted tweets
from the timeline. The Cursor object has an items() method that returns an
iterable you can use to iterate over the results. You can pass items() the
number of result items that you want to get.
"""
for tweet in tweepy.Cursor(api.home_timeline).items(100):
print(f"{tweet.user.name} said: {tweet.text}")
if __name__ == "__main__":
run_tweepy()
|
[
"farischugthai@gmail.com"
] |
farischugthai@gmail.com
|
5fbd26c9ee1800f928da0cfcd0d869110f76344a
|
163bbb4e0920dedd5941e3edfb2d8706ba75627d
|
/Code/CodeRecords/2558/60619/285468.py
|
b9e5d0b42906f6129d626a05905013c89f266675
|
[] |
no_license
|
AdamZhouSE/pythonHomework
|
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
|
ffc5606817a666aa6241cfab27364326f5c066ff
|
refs/heads/master
| 2022-11-24T08:05:22.122011
| 2020-07-28T16:21:24
| 2020-07-28T16:21:24
| 259,576,640
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 367
|
py
|
t = int(input())
for ind in range(t):
s = input()
if s == "}{{}}{{{":
print(3)
elif s == "{{}}}}":
print(1)
elif s == "{{}{{{}{{}}{{" or s == "{{}{{{}{{}{" or s == "}{{}}{{{{":
print(-1)
elif s == "{{{{}}}}":
print(0)
elif s == "{{{{}}}}}{" or s == "{{}{{{}{{}}{{}":
print(2)
else:
print(s)
|
[
"1069583789@qq.com"
] |
1069583789@qq.com
|
cc7c473f7472d2a40fcd7d825f9ea7caa08f5ced
|
971e0efcc68b8f7cfb1040c38008426f7bcf9d2e
|
/tests/model_control/detailed/transf_Difference/model_control_one_enabled_Difference_PolyTrend_Seasonal_DayOfMonth_NoAR.py
|
d4e66886be254d972152d5397cca35ab358649bb
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
antoinecarme/pyaf
|
a105d172c2e7544f8d580d75f28b751351dd83b6
|
b12db77cb3fa9292e774b2b33db8ce732647c35e
|
refs/heads/master
| 2023-09-01T09:30:59.967219
| 2023-07-28T20:15:53
| 2023-07-28T20:15:53
| 70,790,978
| 457
| 77
|
BSD-3-Clause
| 2023-03-08T21:45:40
| 2016-10-13T09:30:30
|
Python
|
UTF-8
|
Python
| false
| false
| 164
|
py
|
import tests.model_control.test_ozone_custom_models_enabled as testmod
testmod.build_model( ['Difference'] , ['PolyTrend'] , ['Seasonal_DayOfMonth'] , ['NoAR'] );
|
[
"antoine.carme@laposte.net"
] |
antoine.carme@laposte.net
|
a92d69aa70529fdc9f97b2e8eb87db38dc3380ca
|
422c9cc1c5ef7eba24610e66d6a74ec2e16bf39e
|
/devel_isolated/rosunit/lib/python2.7/dist-packages/rosunit/__init__.py
|
c89bb390f5366f452b27f97c39bcab7180fd0016
|
[] |
no_license
|
twighk/ROS-Pi3
|
222c735d3252d6fce43b427cdea3132f93025002
|
9f2912c44ae996040f143c1e77e6c714162fc7d2
|
refs/heads/master
| 2021-01-01T05:16:20.278770
| 2016-05-08T19:24:15
| 2016-05-08T19:24:15
| 58,306,257
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,020
|
py
|
# -*- coding: utf-8 -*-
# generated from catkin/cmake/template/__init__.py.in
# keep symbol table as clean as possible by deleting all unnecessary symbols
from os import path as os_path
from sys import path as sys_path
from pkgutil import extend_path
__extended_path = "/opt/ros_catkin_ws/src/ros/rosunit/src".split(";")
for p in reversed(__extended_path):
sys_path.insert(0, p)
del p
del sys_path
__path__ = extend_path(__path__, __name__)
del extend_path
__execfiles = []
for p in __extended_path:
src_init_file = os_path.join(p, __name__ + '.py')
if os_path.isfile(src_init_file):
__execfiles.append(src_init_file)
else:
src_init_file = os_path.join(p, __name__, '__init__.py')
if os_path.isfile(src_init_file):
__execfiles.append(src_init_file)
del src_init_file
del p
del os_path
del __extended_path
for __execfile in __execfiles:
with open(__execfile, 'r') as __fh:
exec(__fh.read())
del __fh
del __execfile
del __execfiles
|
[
"twighk@outlook.com"
] |
twighk@outlook.com
|
a25d4a8e8452c08db22e1d70eef2fffd07c537c2
|
9cd180fc7594eb018c41f0bf0b54548741fd33ba
|
/sdk/python/pulumi_azure_nextgen/dbformariadb/latest/firewall_rule.py
|
dd52b6b640e5bb747bacd94f8df589763e677d12
|
[
"Apache-2.0",
"BSD-3-Clause"
] |
permissive
|
MisinformedDNA/pulumi-azure-nextgen
|
c71971359450d03f13a53645171f621e200fe82d
|
f0022686b655c2b0744a9f47915aadaa183eed3b
|
refs/heads/master
| 2022-12-17T22:27:37.916546
| 2020-09-28T16:03:59
| 2020-09-28T16:03:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,897
|
py
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
__all__ = ['FirewallRule']
class FirewallRule(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
end_ip_address: Optional[pulumi.Input[str]] = None,
firewall_rule_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
server_name: Optional[pulumi.Input[str]] = None,
start_ip_address: Optional[pulumi.Input[str]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
Represents a server firewall rule.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] end_ip_address: The end IP address of the server firewall rule. Must be IPv4 format.
:param pulumi.Input[str] firewall_rule_name: The name of the server firewall rule.
:param pulumi.Input[str] resource_group_name: The name of the resource group. The name is case insensitive.
:param pulumi.Input[str] server_name: The name of the server.
:param pulumi.Input[str] start_ip_address: The start IP address of the server firewall rule. Must be IPv4 format.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
if end_ip_address is None:
raise TypeError("Missing required property 'end_ip_address'")
__props__['end_ip_address'] = end_ip_address
if firewall_rule_name is None:
raise TypeError("Missing required property 'firewall_rule_name'")
__props__['firewall_rule_name'] = firewall_rule_name
if resource_group_name is None:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
if server_name is None:
raise TypeError("Missing required property 'server_name'")
__props__['server_name'] = server_name
if start_ip_address is None:
raise TypeError("Missing required property 'start_ip_address'")
__props__['start_ip_address'] = start_ip_address
__props__['name'] = None
__props__['type'] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:dbformariadb/v20180601:FirewallRule"), pulumi.Alias(type_="azure-nextgen:dbformariadb/v20180601preview:FirewallRule")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(FirewallRule, __self__).__init__(
'azure-nextgen:dbformariadb/latest:FirewallRule',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'FirewallRule':
"""
Get an existing FirewallRule resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
return FirewallRule(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="endIpAddress")
def end_ip_address(self) -> pulumi.Output[str]:
"""
The end IP address of the server firewall rule. Must be IPv4 format.
"""
return pulumi.get(self, "end_ip_address")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name of the resource
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="startIpAddress")
def start_ip_address(self) -> pulumi.Output[str]:
"""
The start IP address of the server firewall rule. Must be IPv4 format.
"""
return pulumi.get(self, "start_ip_address")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
The type of the resource. Ex- Microsoft.Compute/virtualMachines or Microsoft.Storage/storageAccounts.
"""
return pulumi.get(self, "type")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
|
[
"public@paulstack.co.uk"
] |
public@paulstack.co.uk
|
f24b30682ffecb2b5a32830e62d9b425c521d277
|
54553be3dda33ce7865f28a2e81b4e5ae72dac7e
|
/synapse/lib/sqlite.py
|
79a978392421c2632893a542c0bba3e76b008e20
|
[
"Apache-2.0"
] |
permissive
|
mari0d/synapse
|
52d9402ed80ca80d1f5185dccc7fd6cb2b812feb
|
4fd8b345ddcd46fe2c780caa582d1263fbf172ee
|
refs/heads/master
| 2021-08-30T04:23:41.137920
| 2017-12-14T19:51:14
| 2017-12-14T19:51:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 507
|
py
|
import sqlite3
import synapse.lib.db as s_db
'''
Integration utilities for sqlite db pools.
'''
# turn on db cache sharing
sqlite3.enable_shared_cache(1)
def pool(size, path, **kwargs):
'''
Create an sqlite connection pool.
Args:
size (int): Number of connections in the pool
'''
def ctor():
db = sqlite3.connect(path, check_same_thread=False)
db.cursor().execute('PRAGMA read_uncommitted=1').close()
return db
return s_db.Pool(size, ctor=ctor)
|
[
"invisigoth.kenshoto@gmail.com"
] |
invisigoth.kenshoto@gmail.com
|
c65063c16efbcaef1560909ad2312ed2b5c5d158
|
7066555f4c2ff9b405754d2e793b97bf04b6ab98
|
/jianzhi-offer/04.py
|
9043030e7a970938f899c10b58708952719fc874
|
[] |
no_license
|
yangtao0304/hands-on-programming-exercise
|
c0d0fe324ffaf73c7b4c45aba721a245a8cc9ce2
|
cc7740026c3774be21ab924b99ae7596ef20d0e4
|
refs/heads/master
| 2020-09-11T02:05:51.305196
| 2020-03-19T03:45:53
| 2020-03-19T03:45:53
| 221,904,831
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 448
|
py
|
class Solution:
def findNumberIn2DArray(self, matrix: List[List[int]], target: int) -> bool:
R = len(matrix)
if R == 0:
return False
C = len(matrix[0])
r, c = 0, C-1
while 0 <= r < R and 0 <= c < C:
if matrix[r][c] > target:
c -= 1
elif matrix[r][c] < target:
r += 1
else:
return True
return False
|
[
"im.yangtao0304@gmail.com"
] |
im.yangtao0304@gmail.com
|
7f8c4b566d38a4e701f4b0ef300ea8cc157b73d1
|
009df7ad499b19a4df066160cf0c7d8b20355dfb
|
/src/the_tale/the_tale/game/companions/abilities/relations.py
|
7ac90d034fcb5fb146883eb23f31cb9e408467ca
|
[
"BSD-3-Clause"
] |
permissive
|
devapromix/the-tale
|
c0804c7475e877f12f29444ddbbba025561d3412
|
2a10efd3270734f8cf482b4cfbc5353ef8f0494c
|
refs/heads/develop
| 2020-03-28T20:26:30.492292
| 2018-10-07T17:32:46
| 2018-10-07T17:32:46
| 149,070,887
| 1
| 0
|
BSD-3-Clause
| 2018-10-07T17:32:47
| 2018-09-17T04:57:50
|
Python
|
UTF-8
|
Python
| false
| false
| 6,846
|
py
|
import smart_imports
smart_imports.all()
class METATYPE(rels_django.DjangoEnum):
description = rels.Column()
records = (('TRAVEL', 0, 'дорожная', 'влияет на скорость путешествия героя'),
('BATTLE', 1, 'боевая', 'влияет на битвы'),
('MONEY', 2, 'денежная', 'влияет на деньги и предметы'),
('OTHER', 3, 'необычная', 'имеет особый эффект'),
('UNCHANGEBLE', 4, 'неизменная', 'оказывает постоянный эффект, независимо от других свойств спутника или героя'))
class EFFECT(rels_django.DjangoEnum):
metatype = rels.Column(unique=False)
records = (('COHERENCE_SPEED', 0, 'скорость изменения слаженности', METATYPE.UNCHANGEBLE),
('CHANGE_HABITS', 1, 'изменение характера', METATYPE.UNCHANGEBLE),
('QUEST_MONEY_REWARD', 2, 'денежная награда за задание', METATYPE.MONEY),
('MAX_BAG_SIZE', 3, 'максимальный размер рюкзака', METATYPE.UNCHANGEBLE),
('POLITICS_POWER', 4, 'бонус к влиянию', METATYPE.OTHER),
('MAGIC_DAMAGE_BONUS', 5, 'бонус к магическому урону героя', METATYPE.BATTLE),
('PHYSIC_DAMAGE_BONUS', 6, 'бонус к физическому урону героя', METATYPE.BATTLE),
('SPEED', 7, 'бонус к скорости движения героя', METATYPE.TRAVEL),
('INITIATIVE', 9, 'инициатива', METATYPE.BATTLE),
('BATTLE_PROBABILITY', 10, 'вероятность начала боя', METATYPE.TRAVEL),
('LOOT_PROBABILITY', 11, 'вероятность получить добычу', METATYPE.BATTLE),
('COMPANION_DAMAGE', 12, 'урон по спутнику', METATYPE.UNCHANGEBLE),
('COMPANION_DAMAGE_PROBABILITY', 13, 'вероятность получить урон', METATYPE.BATTLE),
('COMPANION_STEAL_MONEY', 14, 'спутник крадёт деньги', METATYPE.MONEY),
('COMPANION_STEAL_ITEM', 15, 'спутник крадёт предметы', METATYPE.MONEY),
('COMPANION_SPARE_PARTS', 16, 'спутник разваливается на дорогие запчасти', METATYPE.UNCHANGEBLE),
('COMPANION_EXPERIENCE', 17, 'спутник так или иначе приносит опыт', METATYPE.OTHER),
('COMPANION_DOUBLE_ENERGY_REGENERATION', 18, 'герой может восстновить в 2 раза больше энергии', METATYPE.OTHER),
('COMPANION_REGENERATION', 19, 'спутник как-либо восстанавливает своё здоровье', METATYPE.OTHER),
('COMPANION_EAT', 20, 'спутник требует покупки еды', METATYPE.MONEY),
('COMPANION_EAT_DISCOUNT', 21, 'у спутника есть скидка на покупку еды', METATYPE.MONEY),
('COMPANION_DRINK_ARTIFACT', 22, 'спутник пропивает артефакты при посещении города', METATYPE.MONEY),
('COMPANION_EXORCIST', 23, 'спутник является экзорцистом', METATYPE.BATTLE),
('REST_LENGTH', 24, 'изменение скорости лечения на отдыхе', METATYPE.TRAVEL),
('IDLE_LENGTH', 25, 'изменение времени бездействия', METATYPE.TRAVEL),
('COMPANION_BLOCK_PROBABILITY', 26, 'вероятность блока спутника', METATYPE.BATTLE),
('HUCKSTER', 27, 'спутник даёт бонус к цене продажи и покупки', METATYPE.MONEY),
('MIGHT_CRIT_CHANCE', 28, 'шанс критического срабатывания способности хранителя', METATYPE.OTHER),
('BATTLE_ABILITY_HIT', 29, 'небольшое увеличение инициативы и способность удар', METATYPE.BATTLE),
('BATTLE_ABILITY_STRONG_HIT', 30, 'небольшое увеличение инициативы и способность сильный удар', METATYPE.BATTLE),
('BATTLE_ABILITY_RUN_UP_PUSH', 31, 'небольшое увеличение инициативы и способность ошеломление', METATYPE.BATTLE),
('BATTLE_ABILITY_FIREBALL', 32, 'небольшое увеличение инициативы и способность пиромания', METATYPE.BATTLE),
('BATTLE_ABILITY_POSION_CLOUD', 33, 'небольшое увеличение инициативы и способность ядовитость', METATYPE.BATTLE),
('BATTLE_ABILITY_FREEZING', 34, 'небольшое увеличение инициативы и способность контроль', METATYPE.BATTLE),
('COMPANION_TELEPORTATION', 35, 'спутник как-либо перемещает героя в пути', METATYPE.TRAVEL),
('DEATHY', 36, 'для смерти, распугивает всех', METATYPE.UNCHANGEBLE),
('RARITY', 37, 'редкость', METATYPE.UNCHANGEBLE),
('LEAVE_HERO', 38, 'покидает героя', METATYPE.UNCHANGEBLE))
class FIELDS(rels_django.DjangoEnum):
common = rels.Column(unique=False)
records = (('COHERENCE_SPEED', 0, 'слаженность', False),
('HONOR', 1, 'честь', False),
('PEACEFULNESS', 2, 'миролюбие', False),
('START_1', 3, 'начальная 1', False),
('START_2', 4, 'начальная 2', False),
('START_3', 5, 'начальная 3', False),
('START_4', 6, 'начальная 4', False),
('START_5', 7, 'начальная 5', False),
('ABILITY_1', 8, 'способность 1', True),
('ABILITY_2', 9, 'способность 2', True),
('ABILITY_3', 10, 'способность 3', True),
('ABILITY_4', 11, 'способность 4', True),
('ABILITY_5', 12, 'способность 5', True),
('ABILITY_6', 13, 'способность 6', True),
('ABILITY_7', 14, 'способность 7', True),
('ABILITY_8', 15, 'способность 8', True),
('ABILITY_9', 16, 'способность 9', True))
|
[
"a.eletsky@gmail.com"
] |
a.eletsky@gmail.com
|
5759a5b0fc9daa4829c1e47292239d9b10d0c643
|
60223e7376275631710d5ff8231e6cab23b383bb
|
/sportsbet/registration/migrations/0001_initial.py
|
7db1135783099878ca9b97c607d7ae98abaaabab
|
[] |
no_license
|
sangeeth-subramoniam/sportsbet
|
5f07a8f8013cf1bcf88d7a2ef9ffa26ce8c24eb8
|
d41af45bc49c0ac1c62d000026b2d7f0684369a6
|
refs/heads/main
| 2023-08-24T06:08:01.980781
| 2021-10-27T05:15:55
| 2021-10-27T05:15:55
| 418,737,197
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 917
|
py
|
# Generated by Django 3.2.8 on 2021-10-19 02:32
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='user_profile',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('website', models.URLField(blank=True)),
('profile_picture', models.ImageField(blank=True, upload_to='profile_pictures')),
('bio', models.CharField(blank=True, max_length=300)),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
|
[
"s-sangeeth-k@sicis.co.jp"
] |
s-sangeeth-k@sicis.co.jp
|
3f30af29870f82c99f507dfb86f222a60a8a626a
|
e7b7505c084e2c2608cbda472bc193d4a0153248
|
/LeetcodeNew/python/LC_813.py
|
f7f4a9deb62825b29cd55f0a2eebd63f5687bf09
|
[] |
no_license
|
Taoge123/OptimizedLeetcode
|
8e5c1cd07904dfce1248bc3e3f960d2f48057a5d
|
3e50f6a936b98ad75c47d7c1719e69163c648235
|
refs/heads/master
| 2023-02-27T21:13:40.450089
| 2023-02-07T04:11:09
| 2023-02-07T04:11:09
| 170,044,224
| 9
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,187
|
py
|
"""
X X X X X [X X i]
dp[i][K] : the maximum of sum of the average of the k groups
dp[j][K-1] + avg(j+1, i):
"""
import collections
class SolutionTony:
def largestSumOfAverages(self, nums, k: int) -> float:
memo = {}
return self.dfs(nums, 0, k, memo)
def dfs(self, nums, i, k, memo):
if (i, k) in memo:
return memo[(i, k)]
n = len(nums)
if k == 1:
return sum(nums[i:]) / (n - i)
size, summ = 0, 0
res = 0
for j in range(i, n - k + 1):
summ += nums[j]
size += 1
res = max(res, self.dfs(nums, j + 1, k - 1, memo) + summ / size)
memo[(i, k)] = res
return res
class SolutionTony2:
def largestSumOfAverages(self, nums, k: int) -> float:
memo = {}
self.presum = [0]
for num in nums:
self.presum.append(num + self.presum[-1])
return self.dfs(nums, 0, k, memo)
def dfs(self, nums, i, k, memo):
if (i, k) in memo:
return memo[(i, k)]
n = len(nums)
if k == 1:
return sum(nums[i:]) / (n - i)
size = 0
res = 0
for j in range(i, n - k + 1):
summ = self.presum[j + 1] - self.presum[i]
res = max(res, self.dfs(nums, j + 1, k - 1, memo) + summ / (j - i + 1))
memo[(i, k)] = res
return res
class Solution:
def largestSumOfAverages(self, A, K: int) -> float:
n = len(A)
memo = collections.defaultdict(int)
summ = 0
# calculate the average - like preSum
for i in range(n):
summ += A[i]
memo[(i + 1, 1)] = summ / (i + 1)
return self.dfs(A, K, n, memo)
def dfs(self, A, k, n, memo):
if memo[(n, k)]:
return memo[(n, k)]
if n < k:
return 0
summ = 0
for i in range(n - 1, -1, -1):
summ += A[i]
memo[(n, k)] = max(memo[(n, k)], self.dfs(A, k - 1, i, memo) + summ / (n - i))
return memo[(n, k)]
"""
X X X X X X X X X
i n
"""
class SolutionBU:
def largestSumOfAverages(self, A, K: int) -> float:
n = len(A)
dp = [[0 for j in range(K+1)] for i in range(n)]
summ = 0
for i in range(n):
summ += A[i]
dp[i][1] = summ / (i+1)
for k in range(2, K+ 1):
for i in range(k - 1, n):
summ = 0
# j >= k - 1
for j in range(i, k - 2, -1):
summ += A[j]
dp[i][k] = max(dp[i][k], dp[j - 1][k - 1] + summ / (i - j + 1))
return dp[n - 1][K]
class SolutionDP2:
def largestSumOfAverages(self, A, K):
prefix = [0]
for x in A:
prefix.append(prefix[-1] + x)
def average(i, j):
return (prefix[j] - prefix[i]) / (j - i)
n = len(A)
dp = [average(i, n) for i in range(n)]
for k in range(K - 1):
for i in range(n):
for j in range(i + 1, n):
dp[i] = max(dp[i], average(i, j) + dp[j])
return dp[0]
|
[
"taocheng984@gmail.com"
] |
taocheng984@gmail.com
|
cba321e8cc0e76f36c27b898e78e56c7ba09dde5
|
9cfdfe633dfb2755955f9d356fdd0a9601089955
|
/accounts/forms.py
|
ce49fb97a27747bb33ce17c2986e8f8c9c77e3f1
|
[] |
no_license
|
DimAntDim/ResumeBuilder
|
cec597ba4b857d98147e2f5f6831bd3c93c83c80
|
0507d5d9c44936d892df280015f7c6d8e630f55b
|
refs/heads/main
| 2023-08-21T08:03:43.327868
| 2021-11-03T19:43:04
| 2021-11-03T19:43:04
| 394,882,643
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 709
|
py
|
from .models import Profile
from django import forms
from django.contrib.auth import get_user_model
UserModel = get_user_model()
class ProfileForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['first_name'].widget.attrs.update({'class': 'input-field', 'placeholder':'First name'})
self.fields['profile_image'].widget.attrs.update({'class': 'input-field', 'value':'Upload photo'})
self.fields['last_name'].widget.attrs.update({'class': 'input-field', 'placeholder':'Last name'})
class Meta:
model = Profile
fields = "__all__"
exclude = ('user', 'is_complete', 'template_selected',)
|
[
"66394357+DimAntDim@users.noreply.github.com"
] |
66394357+DimAntDim@users.noreply.github.com
|
baa4b16248b44f4c2c45800896451fa44644c5a4
|
6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4
|
/9AMT6SC4Jz8tExihs_9.py
|
bb2c304e4cf2aa2ec2362ec9ace44483c5877dc3
|
[] |
no_license
|
daniel-reich/ubiquitous-fiesta
|
26e80f0082f8589e51d359ce7953117a3da7d38c
|
9af2700dbe59284f5697e612491499841a6c126f
|
refs/heads/master
| 2023-04-05T06:40:37.328213
| 2021-04-06T20:17:44
| 2021-04-06T20:17:44
| 355,318,759
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 413
|
py
|
def countStrings(n):
x=[]
x.append(0)
p = (1 << n)
for i in range(1, p):
if ((i & (i << 1)) == 0):
x.append(i)
return x
def generate_nonconsecutive(n):
res,c='',n
for i in countStrings(n):
a=bin(i).replace('b','')
if len(a)<c:
b='0'*(c-len(a))
a=b+a
d=len(a)-c
res+=a[d:]+' '
return res.strip()
|
[
"daniel.reich@danielreichs-MacBook-Pro.local"
] |
daniel.reich@danielreichs-MacBook-Pro.local
|
6cefd5f8948ce9a2d3b3df747ce88b2daa6c80b6
|
741c5c70bf4a0adb05db6b0777c8d07e28eb9cf6
|
/bin/easy_install-3.4
|
1f532eda7ff95e38c48cd8828a33ad5a0823bea9
|
[] |
no_license
|
andybp85/hyLittleSchemer
|
e686d2dc0f9067562367ea1173f275e8e2d2cb85
|
af5cb6adf6a196cc346aa7d14d7f9509e084c414
|
refs/heads/master
| 2021-01-19T07:48:31.309949
| 2015-01-04T00:57:30
| 2015-01-04T00:57:30
| 28,496,304
| 6
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 271
|
4
|
#!/Users/andrew/dev/python/virtualenvs/HY3/bin/python3.4
# -*- coding: utf-8 -*-
import re
import sys
from setuptools.command.easy_install import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"andy@youshallthrive.com"
] |
andy@youshallthrive.com
|
5ac5bca329b95a6f0263befae741e5d9b76a1eb7
|
14bd4b2131dfc1a7a4c7b2518e694f7817763ecf
|
/core/migrations/0005_auto_20180107_1711.py
|
9f3c5f53e6d48c581f5e4c1af22c3e3c30fb80d8
|
[] |
no_license
|
itkartell/vozhakov
|
ea5ca11bd5aa2a2de2d7fc69d39c569ddaaaaac4
|
dcf516a476920a5d3313ec1a246c3b980b57c0c7
|
refs/heads/master
| 2021-05-09T02:02:32.328687
| 2018-01-21T22:20:54
| 2018-01-21T22:20:54
| 119,194,706
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 742
|
py
|
# Generated by Django 2.0 on 2018-01-07 17:11
import ckeditor_uploader.fields
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0004_auto_20180107_1709'),
]
operations = [
migrations.AddField(
model_name='video',
name='description',
field=ckeditor_uploader.fields.RichTextUploadingField(blank=True, null=True, verbose_name='Описание видео'),
),
migrations.AlterField(
model_name='video',
name='link',
field=models.CharField(default='', max_length=255, verbose_name='Ссылка на видео'),
preserve_default=False,
),
]
|
[
"greenteamer@bk.ru"
] |
greenteamer@bk.ru
|
eff984e146cda1a9191c3692e08c2743385564ce
|
626833b2f8955b1d5b6ac33328e689c793f1a1c2
|
/Chapter6/BA6A.py
|
6d62022674126e96ceee657f64806ab15aa9a646
|
[] |
no_license
|
Leoberium/BA
|
8b1bbe9ddf43e9498b3e7419fbdc7ae81e151c43
|
61409d57a228188e7d8ce78db20cf926f2a1d34d
|
refs/heads/master
| 2020-09-22T14:43:43.338784
| 2019-12-01T23:24:12
| 2019-12-01T23:24:12
| 225,244,710
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 994
|
py
|
import sys
def greedy_sorting(p):
permutations = []
for i in range(len(p)):
if abs(p[i]) != i + 1:
index = 0
for j in range(i + 1, len(p)):
if abs(p[j]) == i + 1:
index = j
break
m = (index - i) // 2
for j in range(m + 1):
p[j + i], p[index - j] = -p[index - j], -p[j + i]
permutations.append(p.copy())
if p[i] == -(i + 1):
p[i] = -p[i]
permutations.append(p.copy())
if p[i] == -(i + 1):
p[i] = -p[i]
permutations.append(p.copy())
return permutations
def main():
sp = sys.stdin.readline().strip()
sp = sp[1:(len(sp) - 1)]
sp = list(map(int, sp.split()))
for p in greedy_sorting(sp):
p = list(map(lambda x: '+' + str(x) if x > 0 else str(x), p))
print('(', ' '.join(p), ')', sep='')
if __name__ == '__main__':
main()
|
[
"leo.mazaev@gmail.com"
] |
leo.mazaev@gmail.com
|
43ecc023480a4d0a3ae444fdbb018d62949e576f
|
277cd286fa69eb7ab03b806a620322d5701b04b3
|
/2017/round2/freshCholocate.py
|
9a32325d5177e3651f13eb3194f6926336f867be
|
[] |
no_license
|
YeahHuang/Codejam
|
5cd96d45cdb8d26eadb1e3a5f1fae8c1faef4e48
|
5086696b9dd1ac7b09f0cab34b643dde1d916e0b
|
refs/heads/master
| 2021-05-24T17:36:36.314823
| 2020-07-19T15:52:52
| 2020-07-19T15:52:52
| 253,679,729
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,098
|
py
|
from bisect import bisect_left, bisect_right, insort_left, insort_right
from string import ascii_lowercase
from heapq import heappush, heappop, heapify
from collections import Counter, defaultdict
from itertools import product
import random
import sys
"""
11:57 - 13:01 pass
n(number of groups) p(number of pieces of chocolate per pack)
g1..gn 每组多少人
Case #x: y(the number of groups that will receive only fresh chocolate if you bring them in in an order that maximizes that number)
n: [1,100]
gi [1,100]
小 p [2,3]
大 p [2,4]
"""
global debug, test
def solve():
global debug, test
T = int(input())
debug = False
test = True
for it in range(T):
n, p = map(int, input().split())
g = list(map(lambda x: int(x)%p, input().split()))
cnt = Counter(g)
for i in range(p):
if i not in cnt.keys():
cnt[i] = 0
ans = 0
ans += cnt[0]
cnt[0] = 0 #一开始这个没加 WA 2次
if debug:
print("step0 add %d"%ans)
for i in range(1, p):
if i == p-i:
ans += cnt[i] // 2
cnt[i] -= cnt[i] // 2 * 2
else:
pair = min(cnt[i], cnt[p-i])
ans, cnt[i], cnt[p-i] = ans + pair, cnt[i]- pair, cnt[p-i]-pair
if debug:
print("2 combine to 1ans=%d"%ans)
if p == 3:
#3 combines to 1
ans += cnt[1] // 3 + cnt[2] // 3
cnt[1], cnt[2] = cnt[1]%3, cnt[2]%3
elif p == 4:
#3 combines to 1 1 1 2 or 3 3 2
if cnt[2] > 0:
if cnt[1] > 0:
pair = min(cnt[2], cnt[1]//2)
ans, cnt[1], cnt[2] = ans + pair, cnt[1] - pair*2, cnt[2] - pair
else:
pair = min(cnt[2], cnt[3]//2)
ans, cnt[3], cnt[2] = ans + pair, cnt[3] - pair*2, cnt[2] - pair
#4 combines to 1 1 1 1 1 or 3 3 3 3
ans += cnt[1] // 4
cnt[1] %= 4
ans += cnt[3] // 4
cnt[3] %= 4
if debug:
print("2 combine ans = %d"%ans)
if sum([v for k, v in cnt.items()]) > 0:
ans += 1
print("Case #%d: %d"%(it+1, ans))
|
[
"noreply@github.com"
] |
YeahHuang.noreply@github.com
|
5b709f7498dc80885e92977e4067ecdcf71992e7
|
15f321878face2af9317363c5f6de1e5ddd9b749
|
/solutions_python/Problem_109/38.py
|
7c4de50235fef69c7b9596e465413432228daf45
|
[] |
no_license
|
dr-dos-ok/Code_Jam_Webscraper
|
c06fd59870842664cd79c41eb460a09553e1c80a
|
26a35bf114a3aa30fc4c677ef069d95f41665cc0
|
refs/heads/master
| 2020-04-06T08:17:40.938460
| 2018-10-14T10:12:47
| 2018-10-14T10:12:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,441
|
py
|
#!/usr/bin/python
import sys
def solve(N, W, L, students):
corners = set([(0, 0)])
locs = {}
ordered_students = list(enumerate(students))
ordered_students.sort(key=lambda a: -a[1])
for j, r in ordered_students:
for x, y in corners:
px = x
py = y
newx = x + r
if x > 0:
px += r
newx += r
newy = y + r
if y > 0:
py += r
newy += r
if px > W:
continue
if py > L:
continue
good = True
for i, (lx, ly) in locs.items():
if (lx - px) ** 2 + (ly - py) ** 2 < (r + students[i]) ** 2:
good = False
break
if not good:
continue
locs[j] = (px, py)
corners.remove((x, y))
corners.add((newx, y))
corners.add((x, newy))
break
else:
print W, L
print r, students
print corners
print locs
sys.stderr.write('error\n')
sys.exit(1)
return ' '.join('%s %s' % l for l in (locs[k] for k in range(N)))
T = int(raw_input())
for i in range(T):
N, W, L = map(int, raw_input().split())
print "Case #%i: %s" % (i+1, solve(N, W, L, map(int, raw_input().split())))
sys.stdout.flush()
|
[
"miliar1732@gmail.com"
] |
miliar1732@gmail.com
|
88cfb85af6861de642d593f1d15f51ff6a76f3d4
|
1780cd68f8927c2e8d57850ef2985c5a682dd2de
|
/wsh/__main__.py
|
c7adcbf6938075befdbb3048e01155e69ea2ea25
|
[] |
no_license
|
RyanKung/stack
|
68087b85560e0a4f334521123153694cb555f595
|
27e2563763dbca73e816ab699e3f5976d5b92a53
|
refs/heads/master
| 2021-01-21T15:21:56.766727
| 2016-07-27T02:53:45
| 2016-07-27T02:53:45
| 57,367,027
| 84
| 7
| null | 2016-05-22T04:17:55
| 2016-04-29T08:04:27
|
Python
|
UTF-8
|
Python
| false
| false
| 128
|
py
|
import sys
import re
from .main import main
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"ryankung@ieee.org"
] |
ryankung@ieee.org
|
12067521f8dcc24d30cfd4bdd28b91eb3e13998d
|
5777b01b9d6a6479cf2dc497ea8e3ff7f7dc7b48
|
/206_ReverseLinkedList.py
|
4d1b5eaa52406f88cbf5c4f45744d06c850ef63e
|
[
"MIT"
] |
permissive
|
brettchien/LeetCode
|
d0e6c980b814fe866667f7be23ee674949004a49
|
386b9e2f7c6389fed8825a5ec0b8c0ea733e780b
|
refs/heads/master
| 2021-03-12T21:24:06.552129
| 2016-08-27T06:10:55
| 2016-08-27T06:10:55
| 38,339,280
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 577
|
py
|
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution:
# @param {ListNode} head
# @return {ListNode}
def reverseList(self, head):
if not head:
return head
dummy = ListNode(0)
dummy.next = head
current = head
while current.next:
prev = dummy.next
tmp = current.next
current.next = current.next.next
tmp.next = prev
dummy.next = tmp
return dummy.next
|
[
"brett.chien@gmail.com"
] |
brett.chien@gmail.com
|
4bc511805642a574d45078d0a0c880bacbde8c51
|
51afd3d538f79c13af39dc13d974b2fe48be0285
|
/baseline_LR/train_test_multilr.py
|
c83a597a9828a59e59d3027bd83195528d105e0e
|
[] |
no_license
|
listenzcc/mne_signal_detection
|
5d6fc7831762281bfea5a3377a957a6b2688ca28
|
103e0b3736a14dd0b6763583d94b5bd1f47bdc9c
|
refs/heads/master
| 2020-04-04T19:18:11.299444
| 2018-12-07T01:26:11
| 2018-12-07T01:26:11
| 156,200,657
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,771
|
py
|
# coding: utf-8
import os
import sys
import numpy as np
import threading
import matplotlib.pyplot as plt
from sklearn.linear_model import LogisticRegression
sys.path.append('..')
from load_preprocess import get_epochs
def vstack(a, b):
if len(a) == 0:
return b
return np.vstack((a, b))
def scale(data):
shape = data.shape
for j in range(shape[0]):
for k in range(shape[1]):
baseline = data[j, k, 0:250]
m = np.mean(baseline)
s = np.std(baseline)
data[j][k] = (data[j][k] - m) / s
return data
ranges = [250, 350, 550, 650]
range_id = [1, 2, 1]
def get_Xy_from_data(data, ranges=ranges,
range_id=range_id):
X = []
y = []
shape = data.shape
for k in range(len(ranges)-1):
left, right = ranges[k], ranges[k+1]
id = range_id[k]
for j in range(left, right):
X = vstack(X, data[:, :, j])
y = vstack(y, id+np.zeros(shape[0]).reshape(shape[0], 1))
return X, y
def plot(X, y, axe, clf, title='title'):
axe.plot(y+0.1)
predict = clf.predict(X)
axe.plot(predict)
prob = clf.predict_proba(X) - 1
axe.plot(prob)
for j in range(0, len(y), 400):
axe.plot([j, j], list(axe.get_ylim()),
color='gray')
y = np.ravel(y)
predict = np.ravel(predict)
acc = np.count_nonzero(
(y > 1) == (predict > 1))/len(y)
title += ', acc %.2f' % acc
axe.set_title(title)
# Prepare filename QYJ, ZYF
filedir = 'D:/BeidaShuju/rawdata/QYJ'
fname_training_list = list(os.path.join(
filedir, 'MultiTraining_%d_raw_tsss.fif' % j)
for j in range(1, 6))
fname_testing_list = list(os.path.join(
filedir, 'MultiTest_%d_raw_tsss.fif' % j)
for j in range(1, 9))
ortids_training = [2, 6, 9, 14, 17, 33]
ortids_testing = [8, 16, 32, 64]
train = True
ortids = ortids_training
fname_list = fname_training_list
data_X = fname_list.copy()
data_y = fname_list.copy()
for j in range(len(fname_list)):
print(fname_list[j])
epochs = get_epochs(fname=fname_list[j], train=train)
data = epochs.get_data()
data_X[j] = ortids.copy()
data_y[j] = ortids.copy()
for k in range(len(ortids)):
data_ = data[epochs.events[:, 2] == ortids[k]]
data_ = scale(np.mean(data_, 0)[np.newaxis, :])
data_X[j][k], data_y[j][k] = get_Xy_from_data(
data_, range_id=[1, k+2, 1])
def train_clf(test_run, data_X=data_X, data_y=data_y):
X_train = []
y_train = []
X_test = []
y_test = []
for j in range(len(fname_list)):
if j == test_run:
for k in range(len(ortids)):
X_test = vstack(X_test, data_X[j][k])
y_test = vstack(y_test, data_y[j][k])
continue
for k in range(len(ortids)):
X_train = vstack(X_train, data_X[j][k])
y_train = vstack(y_train, data_y[j][k])
clf = LogisticRegression(multi_class='multinomial',
solver='newton-cg',
penalty='l2')
clf.fit(X_train, np.ravel(y_train))
return clf, X_test, y_test, X_train, y_train
def athread(test_run, axe, title):
print('%d running' % test_run)
clf, X_test, y_test, X_train, y_train = train_clf(test_run)
plot(X_test, y_test, axe, clf, title=title)
print('%d done' % test_run)
fig, axes = plt.subplots(5, 1)
threads = []
for test_run in range(5):
title = '%d' % (test_run)
print(title)
# athread(test_run, axe=axes[test_run], title=title)
t = threading.Thread(target=athread, args=(
test_run, axes[test_run], title))
threads.append(t)
for t in threads:
t.setDaemon(True)
t.start()
for t in threads:
t.join()
plt.show()
|
[
"listenzcc@mail.bnu.edu.cn"
] |
listenzcc@mail.bnu.edu.cn
|
d4cdff5dc3003601e3f5cd24332181069349a8c4
|
e9f5ab8edbc60272a6e0ee8ad97c84eec344e5d6
|
/examples/02_stdnormal_K1/plot-01-demo=deletes-model=dp_mix+gauss.py
|
b67a27f76ce42412714edf4a594f7940dd01042d
|
[
"BSD-3-Clause"
] |
permissive
|
s-mawjee/bnpy
|
85a114043b4d557e3e2baec9ce654a50712dce7d
|
57cc2d6545c6bd169132db5596a323ea52980d50
|
refs/heads/master
| 2020-04-24T23:45:57.889647
| 2019-05-08T11:07:08
| 2019-05-08T11:07:08
| 155,522,778
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,605
|
py
|
"""
========================================================================
Variational with merge and delete proposals for DP mixtures of Gaussians
========================================================================
How delete moves can be more effective than merges.
In this example, we show how merge moves alone may not be enough
to reliably escape local optima. Instead, we show that more flexible
delete moves can escape from situations where merges alone fail.
"""
# SPECIFY WHICH PLOT CREATED BY THIS SCRIPT IS THE THUMBNAIL IMAGE
# sphinx_gallery_thumbnail_number = 2
import bnpy
import numpy as np
import os
from matplotlib import pylab
import seaborn as sns
FIG_SIZE = (3, 3)
pylab.rcParams['figure.figsize'] = FIG_SIZE
###############################################################################
#
# Create toy dataset of many points drawn from standard normal
prng = np.random.RandomState(42)
X = prng.randn(100000, 1)
dataset = bnpy.data.XData(X, name='StandardNormalK1')
###############################################################################
#
# Make a simple plot of the raw data
pylab.hist(dataset.X[:, 0], 50, normed=1)
pylab.xlabel('x')
pylab.ylabel('p(x)')
pylab.tight_layout()
###############################################################################
# Setup: Determine specific settings of the proposals
# ---------------------------------------------------
merge_kwargs = dict(
m_startLap=10,
m_pair_ranking_procedure='total_size',
m_pair_ranking_direction='descending',
)
delete_kwargs = dict(
d_startLap=10,
d_nRefineSteps=50,
)
###############################################################################
#
# Setup: Helper function to display the learned clusters
# ------------------------------------------------------
def show_clusters_over_time(
task_output_path=None,
query_laps=[0, 1, 2, 10, 20, None],
nrows=2):
'''
'''
ncols = int(np.ceil(len(query_laps) // float(nrows)))
fig_handle, ax_handle_list = pylab.subplots(
figsize=(FIG_SIZE[0] * ncols, FIG_SIZE[1] * nrows),
nrows=nrows, ncols=ncols, sharex=True, sharey=True)
for plot_id, lap_val in enumerate(query_laps):
cur_model, lap_val = bnpy.load_model_at_lap(task_output_path, lap_val)
cur_ax_handle = ax_handle_list.flatten()[plot_id]
bnpy.viz.PlotComps.plotCompsFromHModel(
cur_model, dataset=dataset, ax_handle=cur_ax_handle)
cur_ax_handle.set_xlim([-4.5, 4.5])
cur_ax_handle.set_xlabel("lap: %d" % lap_val)
pylab.tight_layout()
###############################################################################
#
# Run with *merge* moves only, from K=5 initial clusters
# --------------------------------------------------------
#
# Unfortunately, no pairwise merge is accepted.
# The model is stuck using 5 clusters when one cluster would do.
gamma = 5.0
sF = 0.1
K = 5
m_trained_model, m_info_dict = bnpy.run(
dataset, 'DPMixtureModel', 'Gauss', 'memoVB',
output_path=('/tmp/StandardNormalK1/' +
'trymoves-K=%d-gamma=%s-ECovMat=%s*eye-moves=merge,shuffle/' % (
K, gamma, sF)),
nLap=100, nTask=1, nBatch=1,
gamma0=gamma, sF=sF, ECovMat='eye',
K=K, initname='randexamplesbydist',
moves='merge,shuffle',
**dict(**merge_kwargs))
show_clusters_over_time(m_info_dict['task_output_path'])
###############################################################################
#
# Run with *delete* moves, from K=5 initial clusters
# --------------------------------------------------------
#
# More flexible delete moves *are* accepted.
d_trained_model, d_info_dict = bnpy.run(
dataset, 'DPMixtureModel', 'Gauss', 'memoVB',
output_path=('/tmp/StandardNormalK1/' +
'trymoves-K=%d-gamma=%s-ECovMat=%s*eye-moves=delete,shuffle/' % (
K, gamma, sF)),
nLap=100, nTask=1, nBatch=1,
gamma0=gamma, sF=sF, ECovMat='eye',
K=K, initname='randexamplesbydist',
moves='delete,shuffle',
**dict(delete_kwargs))
show_clusters_over_time(d_info_dict['task_output_path'])
###############################################################################
#
# Loss function trace plot
# ------------------------
#
pylab.plot(
m_info_dict['lap_history'][1:],
m_info_dict['loss_history'][1:], 'k.-',
label='vb_with_merges')
pylab.plot(
d_info_dict['lap_history'][1:],
d_info_dict['loss_history'][1:], 'b.-',
label='vb_with_deletes')
pylab.legend(loc='upper right')
pylab.xlabel('num. laps')
pylab.ylabel('loss')
pylab.tight_layout()
|
[
"mike@michaelchughes.com"
] |
mike@michaelchughes.com
|
e02578080980b2b450e344b20eee1ab7eb42e084
|
8dc84558f0058d90dfc4955e905dab1b22d12c08
|
/third_party/chromite/lib/cros_logging_unittest.py
|
5ec365c3addccd5a85b0afd3193dc35ebed13c47
|
[
"LGPL-2.0-or-later",
"GPL-1.0-or-later",
"MIT",
"Apache-2.0",
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
meniossin/src
|
42a95cc6c4a9c71d43d62bc4311224ca1fd61e03
|
44f73f7e76119e5ab415d4593ac66485e65d700a
|
refs/heads/master
| 2022-12-16T20:17:03.747113
| 2020-09-03T10:43:12
| 2020-09-03T10:43:12
| 263,710,168
| 1
| 0
|
BSD-3-Clause
| 2020-05-13T18:20:09
| 2020-05-13T18:20:08
| null |
UTF-8
|
Python
| false
| false
| 3,796
|
py
|
# -*- coding: utf-8 -*-
# Copyright 2015 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Tests for cros_logging."""
from __future__ import print_function
import sys
from chromite.lib import cros_logging as logging
from chromite.lib import cros_test_lib
class CrosloggingTest(cros_test_lib.OutputTestCase):
"""Test logging works as expected."""
def setUp(self):
self.logger = logging.getLogger()
sh = logging.StreamHandler(sys.stdout)
self.logger.addHandler(sh)
def AssertLogContainsMsg(self, msg, functor, *args, **kwargs):
"""Asserts that calling functor logs a line that contains msg.
Args:
msg: The message to look for.
functor: A function taking no arguments to test.
*args, **kwargs: passthrough arguments to AssertLogContainsMsg.
"""
with self.OutputCapturer():
functor()
self.AssertOutputContainsLine(msg, *args, **kwargs)
def testNotice(self):
"""Test logging.notice works and is between INFO and WARNING."""
msg = 'notice message'
self.logger.setLevel(logging.INFO)
self.AssertLogContainsMsg(msg, lambda: logging.notice(msg))
self.logger.setLevel(logging.WARNING)
self.AssertLogContainsMsg(msg, lambda: logging.notice(msg), invert=True)
def testPrintBuildbotFunctionsNoMarker(self):
"""PrintBuildbot* without markers should not be recognized by buildbot."""
self.AssertLogContainsMsg('@@@STEP_LINK@',
lambda: logging.PrintBuildbotLink('name', 'url'),
check_stderr=True, invert=True)
self.AssertLogContainsMsg('@@@@STEP_TEXT@',
lambda: logging.PrintBuildbotStepText('text'),
check_stderr=True, invert=True)
self.AssertLogContainsMsg('@@@STEP_WARNINGS@@@',
logging.PrintBuildbotStepWarnings,
check_stderr=True, invert=True)
self.AssertLogContainsMsg('@@@STEP_FAILURE@@@',
logging.PrintBuildbotStepFailure,
check_stderr=True, invert=True)
self.AssertLogContainsMsg('@@@BUILD_STEP',
lambda: logging.PrintBuildbotStepName('name'),
check_stderr=True, invert=True)
self.AssertLogContainsMsg(
'@@@SET_BUILD_PROPERTY',
lambda: logging.PrintBuildbotSetBuildProperty('name', {'a': 'value'}),
check_stderr=True, invert=True)
def testPrintBuildbotFunctionsWithMarker(self):
"""PrintBuildbot* with markers should be recognized by buildbot."""
logging.EnableBuildbotMarkers()
self.AssertLogContainsMsg('@@@STEP_LINK@name@url@@@',
lambda: logging.PrintBuildbotLink('name', 'url'),
check_stderr=True)
self.AssertLogContainsMsg('@@@STEP_TEXT@text@@@',
lambda: logging.PrintBuildbotStepText('text'),
check_stderr=True)
self.AssertLogContainsMsg('@@@STEP_WARNINGS@@@',
logging.PrintBuildbotStepWarnings,
check_stderr=True)
self.AssertLogContainsMsg('@@@STEP_FAILURE@@@',
logging.PrintBuildbotStepFailure,
check_stderr=True)
self.AssertLogContainsMsg('@@@BUILD_STEP@name@@@',
lambda: logging.PrintBuildbotStepName('name'),
check_stderr=True)
self.AssertLogContainsMsg(
'@@@SET_BUILD_PROPERTY@name@"value"@@@',
lambda: logging.PrintBuildbotSetBuildProperty('name', 'value'),
check_stderr=True)
|
[
"arnaud@geometry.ee"
] |
arnaud@geometry.ee
|
1eed5a6b4240f4958b2f31bd6573c8e90732fd33
|
d3ce58c4576431df14de0990f45cfd574f0aa45f
|
/.history/user/views_20201012031349.py
|
c2b13bfdbd18379d7610d2eee0c80aff2d6309f3
|
[] |
no_license
|
rahulsolankib/portfolio
|
fe93f0e6b0b28990f0b9fad84dbf7c3aa07243c4
|
281ed429e2590376aee4649b2ea7b3e8facaf6f1
|
refs/heads/master
| 2023-01-02T06:55:21.319094
| 2020-10-26T08:55:22
| 2020-10-26T08:55:22
| 305,586,595
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 659
|
py
|
from django.shortcuts import render,redirect
from django.contrib.auth.forms import UserCreationForm
from django.contrib import messages
from .forms import UserRegisterForm
# Create your views here.
def register(request):
if request.method == 'POST':
form = UserRegisterForm(request.POST)
if form.is_valid():
form.save()
username = form.cleaned_data.get('username')
messages.success(request, f'Account created for {username}!')
return redirect('Start-Page')
else:
form = UserCreationForm()
return render(request,'user/register.html',{'form':form,'title':'Register Here!'})
|
[
"rahulsolankib@gmail.com"
] |
rahulsolankib@gmail.com
|
30a6d77a390396db8c45a785cd1129a10f348acb
|
b8dfb4371270042da2828b8f3e3da9a1ee9d0e83
|
/chat/migrations/0002_message_created.py
|
296f446a5b7beab992b26586c1d1b4b66dbb2116
|
[] |
no_license
|
skiboorg/puzzle_game
|
4bcfcad8a5e9647b413cab3b6f05b7aaa92811b3
|
c38a6b5af9c50da0a1e978df3851d7d78db41dfe
|
refs/heads/master
| 2023-01-22T15:48:58.962659
| 2020-11-25T07:27:01
| 2020-11-25T07:27:01
| 263,743,360
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 391
|
py
|
# Generated by Django 3.0.6 on 2020-05-16 12:16
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('chat', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='message',
name='created',
field=models.DateTimeField(auto_now_add=True, null=True),
),
]
|
[
"ddnnss.i1@gmail.com"
] |
ddnnss.i1@gmail.com
|
f14882f969949e9cb17aeffd927c279b87368ab3
|
1d483945b82db39d2c5a0cd31522c2780c2c43ad
|
/my_app/ss_lib/Ssheet_class.py
|
6b13db5aa008fc257c6cecf841cf911bc6309015
|
[] |
no_license
|
jpisano99/scrub_email
|
f2dfaa47437a1e5102baa62687df84e855be8835
|
0aaff6d8ffd5adc6549d490c396d8ae125ca6ad4
|
refs/heads/master
| 2020-07-24T22:35:07.855516
| 2019-09-16T15:06:52
| 2019-09-16T15:06:52
| 208,071,291
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,060
|
py
|
from .smartsheet_basic_functions import *
ss_config = dict(
SS_TOKEN=my_app.my_secrets.passwords["SS_TOKEN"]
)
class Ssheet:
ss_token = ss_config['SS_TOKEN']
ss = smartsheet.Smartsheet(ss_token)
def __init__(self, name, meta_data_only=False):
self.name = name
self.sheet = {}
self.id = 0
self.columns = {}
self.rows = {}
self.col_name_idx = {}
self.col_id_idx = {}
self.refresh(meta_data_only)
def refresh(self, meta_data_only):
self.sheet = ss_get_sheet(self.ss, self.name)
self.id = self.sheet['id']
# If this self.sheet does NOT exist the self.id will equal -1
# so skip refreshing the following to avoid an error
if self.id != -1:
self.columns = ss_get_col_data(self.ss, self.id)
# self.rows = ss_get_row_data(self.ss, self.id)
self.col_name_idx = ss_col_name_idx(self.ss, self.columns)
self.col_id_idx = ss_col_id_idx(self.ss, self.columns)
if not meta_data_only:
self.rows = ss_get_row_data(self.ss, self.id)
def row_lookup(self, col_name, row_value):
# Return a list of all row_ids
# Where col_name contains row_value
row_ids = []
col_id = self.col_name_idx[col_name]
for row in self.rows:
cells = row['cells']
for cell in cells:
if cell['columnId'] == col_id:
cell_val = cell['value'] if 'value' in cell else '' # In case cell has no value assign null
if cell_val == row_value:
row_ids.append(row['id'])
return row_ids
def get_rows(self):
row_dict = {}
for row in self.rows:
row_record = {}
for cell in row['cells']:
raw_cell_val = cell['value'] if 'value' in cell else ''
raw_col_name = self.col_id_idx[cell['columnId']]
row_record[raw_col_name] = raw_cell_val
row_dict[row['rowNumber']] = row_record
return row_dict
def add_rows(self, add_rows):
ss_add_rows(self.ss, self.id, add_rows)
return
def del_rows(self, del_rows):
ss_del_rows(self.ss, self.id, del_rows)
return
def add_cols(self, add_cols):
ss_add_column(self.ss, self.id, add_cols)
return
def del_cols(self, del_cols):
ss_del_column(self.ss, self.id, del_cols)
return
def mod_cell(self, col_id, row_dict):
ss_mod_cell(self.ss, self.id, col_id, row_dict)
return
def create_sheet(self, name, _col_dict):
# If our self.id was a -1 we can create a sheet
# Just pass in a 'name' and a col_dict
# Example(s):
# my_columns = [{'primary': True, 'title': 'ERP Customer Name', 'type': 'TEXT_NUMBER'},
# {'title': 'End Customer Ultimate Name', 'type': 'TEXT_NUMBER'},
# {'title': 'col1', 'type': 'CHECKBOX', 'symbol': 'STAR'}]
ss_create_sheet(self.ss, name, _col_dict)
self.name = name
self.refresh(True)
return
def delete_sheet(self):
ss_delete_sheet(self.ss, self.name)
self.refresh(True)
return
def __repr__(self):
return "Ssheet('{}')".format(self.name)
# def __iter__(self):
# return self
#
# def __next__(self):
# self.index += 1
# if self.index == len(self.sql_to_ss):
# raise StopIteration
# return self.sql_to_ss[self.index]
if __name__ == "__main__":
my_ss = Ssheet('Tetration On-Demand POV Raw Data')
print(my_ss)
print("Sheet Data: ", my_ss.sheet)
print(my_ss.sheet['id'])
print("Columns: ", my_ss.columns)
print("Column Dict: ", my_ss.col_dict)
print("Row Data: ", my_ss.rows)
exit()
print('Row IDs: ', my_ss.row_lookup('cisco_owner_name', 'Chris McHenry'))
# Add Columns Example
# my_cols = []
# my_cols.append({
# 'title': 'New Picklist Column 2',
# 'type': 'PICKLIST',
# 'options': [
# 'First',
# 'Second',
# 'Third'],
# 'index': 4})
# my_cols.append({
# 'title': 'New Date Column1',
# 'type': 'DATE',
# 'validation': True,
# 'index': 4})
# my_ss.add_cols(my_cols)
# my_ss.refresh()
# Delete Column Example
# my_col_id = my_ss.col_dict['New Date Column']
# my_ss.del_cols(my_col_id)
# my_ss.refresh()
# # Add Rows Example
# my_col_id = my_ss.col_dict['cisco_owner_name']
# my_col1_id = my_ss.col_dict['cisco_owner']
#
# cell_data = []
# my_rows = []
# cell_data.append({
# 'column_id': my_col_id,
# 'value': 'blanche',
# 'strict': False})
# cell_data.append({
# 'column_id': my_col1_id,
# 'value': 'stan',
# 'strict': False})
# my_rows.append(cell_data)
#
# cell_data = []
# cell_data.append({
# 'column_id': my_col1_id,
# 'value': 'blanche',
# 'strict': False})
# my_rows.append(cell_data)
#
# my_ss.add_rows(my_rows)
# # Call this to update our sheet object
# my_ss.refresh()
# print("Added Rows: ", len(my_ss.rows))
# Delete Rows Example
# print("Deleted # of Rows BEFORE: ", len(my_ss.rows))
# print('Row IDs: ', my_ss.row_lookup('cisco_owner', 'stan'))
# rows_to_delete = my_ss.row_lookup('cisco_owner', 'stan')
# my_ss.del_rows(rows_to_delete)
# my_ss.refresh()
# print("Deleted # of Rows AFTER: ", len(my_ss.rows))
# Modify Cells Example
my_row_ids = my_ss.row_lookup('cisco_owner_name', 'ang')
my_col_id = my_ss.col_dict['company_name']
new_val = 'client director'
my_row_dict = {}
for id in my_row_ids:
my_row_dict[id] = new_val
my_ss.mod_cell(my_col_id, my_row_dict)
my_ss.refresh()
exit()
# RESPONSE DEBUG CODE - DO NOT DELETE
# print(json.dumps(my_ss.rows, indent=2))
|
[
"jpisano@cisco.com"
] |
jpisano@cisco.com
|
c7687c1fc9bb0a10bffee0abdafcf0a9bdbfa4c2
|
0366bccae8841bbf6ecaad70660aae89bb0f6394
|
/19_OOP_polymorphism.py/4_example.py
|
6b6eaf7e022d461d103323dc43a5c7a9f62a3f52
|
[] |
no_license
|
KobiShashs/Python
|
8a5bdddcaef84b455795c5393cbacee5967493f7
|
e748973ad0b3e12c5fb87648783531783282832a
|
refs/heads/master
| 2021-04-05T20:18:57.715805
| 2020-04-02T21:51:44
| 2020-04-02T21:51:44
| 248,597,057
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 692
|
py
|
class BigThing:
def __init__(self, something):
self._something = something
def size(self):
if(type(self._something) == int):
print(self._something)
else:
print(len(self._something))
class BigCat(BigThing):
def __init__(self, something, weight):
super().__init__(something)
self._weight = weight
def size(self):
if(self._weight > 20):
print("Very Fat")
elif (self._weight > 15):
print("Fat")
else:
print("OK")
def main():
my_thing = BigThing("balloon")
print(my_thing.size())
cutie = BigCat("mitzy", 22)
print(cutie.size())
main()
|
[
"kobi.shasha@gmail.com"
] |
kobi.shasha@gmail.com
|
09171235eb4b872b6ae2dd265aee80bd82353d98
|
f09dc121f213f2881df3572288b7ee5b39246d73
|
/aliyun-python-sdk-ecs/aliyunsdkecs/request/v20140526/ModifyInstanceMaintenanceAttributesRequest.py
|
2c3e59f0c7a53b3514866f689d76e78e1a24c31f
|
[
"Apache-2.0"
] |
permissive
|
hetw/aliyun-openapi-python-sdk
|
2f31378ad6be0896fb8090423f607e9c7d3ae774
|
7443eacee9fbbaa93c7975c6dbec92d3c364c577
|
refs/heads/master
| 2023-01-19T22:42:36.214770
| 2020-12-04T10:55:14
| 2020-12-04T10:55:14
| 318,689,093
| 1
| 0
|
NOASSERTION
| 2020-12-05T03:03:03
| 2020-12-05T03:03:03
| null |
UTF-8
|
Python
| false
| false
| 3,168
|
py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkecs.endpoint import endpoint_data
class ModifyInstanceMaintenanceAttributesRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Ecs', '2014-05-26', 'ModifyInstanceMaintenanceAttributes')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ResourceOwnerId(self):
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self,ResourceOwnerId):
self.add_query_param('ResourceOwnerId',ResourceOwnerId)
def get_MaintenanceWindows(self):
return self.get_query_params().get('MaintenanceWindow')
def set_MaintenanceWindows(self, MaintenanceWindows):
for depth1 in range(len(MaintenanceWindows)):
if MaintenanceWindows[depth1].get('StartTime') is not None:
self.add_query_param('MaintenanceWindow.' + str(depth1 + 1) + '.StartTime', MaintenanceWindows[depth1].get('StartTime'))
if MaintenanceWindows[depth1].get('EndTime') is not None:
self.add_query_param('MaintenanceWindow.' + str(depth1 + 1) + '.EndTime', MaintenanceWindows[depth1].get('EndTime'))
def get_ActionOnMaintenance(self):
return self.get_query_params().get('ActionOnMaintenance')
def set_ActionOnMaintenance(self,ActionOnMaintenance):
self.add_query_param('ActionOnMaintenance',ActionOnMaintenance)
def get_ResourceOwnerAccount(self):
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self,ResourceOwnerAccount):
self.add_query_param('ResourceOwnerAccount',ResourceOwnerAccount)
def get_OwnerAccount(self):
return self.get_query_params().get('OwnerAccount')
def set_OwnerAccount(self,OwnerAccount):
self.add_query_param('OwnerAccount',OwnerAccount)
def get_OwnerId(self):
return self.get_query_params().get('OwnerId')
def set_OwnerId(self,OwnerId):
self.add_query_param('OwnerId',OwnerId)
def get_InstanceIds(self):
return self.get_query_params().get('InstanceId')
def set_InstanceIds(self, InstanceIds):
for depth1 in range(len(InstanceIds)):
if InstanceIds[depth1] is not None:
self.add_query_param('InstanceId.' + str(depth1 + 1) , InstanceIds[depth1])
|
[
"sdk-team@alibabacloud.com"
] |
sdk-team@alibabacloud.com
|
175ca4dbc99b9d492d6ae7075d14c6c264b624c7
|
503d2f8f5f5f547acb82f7299d86886691966ca5
|
/atcoder/diverta2019_b.py
|
91c65200aac281406b1f81daf84852c467ff3220
|
[] |
no_license
|
Hironobu-Kawaguchi/atcoder
|
3fcb649cb920dd837a1ced6713bbb939ecc090a9
|
df4b55cc7d557bf61607ffde8bda8655cf129017
|
refs/heads/master
| 2023-08-21T14:13:13.856604
| 2023-08-12T14:53:03
| 2023-08-12T14:53:03
| 197,216,790
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 271
|
py
|
# https://atcoder.jp/contests/diverta2019/tasks/diverta2019_b
R, G, B, N = map(int, input().split())
ans = 0
for r in range(N//R+1):
tmp = N - r * R
for g in range(tmp//G+1):
if (N - r * R - g * G) % B == 0:
ans += 1
print(ans)
|
[
"hironobukawaguchi3@gmail.com"
] |
hironobukawaguchi3@gmail.com
|
fa67cb1779f7feb966a476e6984cc58af6ff7cf3
|
688be2b7e2aef0f7484f390d0a1dc105e3217b5e
|
/product/urls.py
|
9819a5d59007108354d65c5c2464a9f98ab6b2fc
|
[] |
no_license
|
extreme1337/olx-clone
|
0fffac94f3d6bce0085bc52526357a6f2b81bd47
|
d7ad9e5d90f9dbb73d1fbe8b82b8d0d2b6996922
|
refs/heads/main
| 2023-03-01T11:00:22.760807
| 2021-02-10T08:27:13
| 2021-02-10T08:27:13
| 337,080,866
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 311
|
py
|
from django.urls import path
from . import views
app_name = 'product'
urlpatterns = [
path('', views.productlist, name='product_list'),
path('<slug:category_slug>', views.productlist, name='product_list_category'),
path('detail/<slug:product_slug>', views.productdetail, name='product_detail'),
]
|
[
"marko.miseljic.14@gmail.com"
] |
marko.miseljic.14@gmail.com
|
cc097148e3d7e18a8f47a9cb3025b59a1f608088
|
c102085883a0c066f3d7edf7d81e4057ed745748
|
/pypy3/lilac.py
|
767f629c87e8c52708e572cf27fe02b4ef234ad9
|
[] |
no_license
|
jcstr/arch4edu
|
58afe3a59727bb57afd1decdd6988392d151c29b
|
76104e81ef0097a6c21f3d241b92772656b39703
|
refs/heads/master
| 2020-05-15T15:19:40.301478
| 2019-04-20T06:21:14
| 2019-04-20T06:21:14
| 182,370,458
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 990
|
py
|
#!/usr/bin/env python3
from lilaclib import *
update_on = [{'archpkg': 'pypy3'}]
build_prefix = ['extra-armv6h', 'extra-armv7h']
time_limit_hours = 72
def pre_build():
download_official_pkgbuild('pypy3')
add_arch(['armv6h', 'armv7h'])
for line in edit_file('PKGBUILD'):
if line.startswith('source=('):
print(line.replace('(', '(a243e4e0b21c.patch::"https://bitbucket.org/pypy/pypy/commits/a243e4e0b21c968ba3fb42e3a575be24a2d6461b/raw"\n'))
elif line.startswith('sha512sums=('):
print(line.replace('(', '("480e1e9fc11d703ad167ca0fe9473e5216b02f2b39a1251ac9f673252d65a7837cbbcfbebba8a941542ef5c044cb6021b83cec218cdede12b3cfd2fa28e5dff2"\n'))
elif line.startswith(' patch'):
print(line)
print(' patch -Np1 -i ${srcdir}/a243e4e0b21c.patch')
else:
print(line)
def post_build():
git_add_files('PKGBUILD')
git_commit()
if __name__ == '__main__':
single_main('extra-x86_64')
|
[
"i@jingbei.li"
] |
i@jingbei.li
|
b0dfaa6f3182873be00588c860c03b8b3989b65d
|
52b5773617a1b972a905de4d692540d26ff74926
|
/.history/cylicRot_20200715001137.py
|
b769c8a9d001bcac07d9fa2a6eb925b43dd3895f
|
[] |
no_license
|
MaryanneNjeri/pythonModules
|
56f54bf098ae58ea069bf33f11ae94fa8eedcabc
|
f4e56b1e4dda2349267af634a46f6b9df6686020
|
refs/heads/master
| 2022-12-16T02:59:19.896129
| 2020-09-11T12:05:22
| 2020-09-11T12:05:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 543
|
py
|
# given an array rotate it k times to the right
def rotate(A,K):
# first I'd rotate the array once
# so how do we rotate the array
# we move the last element to the firs place and
# the rest follow suit
# moving elements to the right in an array
# [3,8,9,7,6]
# 3 ---> 0 now 3 ---> 1
# 8 ---> 1 now 8 ---> 2
# A[0] = A[len(A)-1]
# [6 ,3,8,9,7]
# what do you notice that A[i] = A[i+1]
# [lst[-1]] + lst[:-1]
print(A)
rotate([3, 8, 9, 7, 6], 3)
|
[
"mary.jereh@gmail.com"
] |
mary.jereh@gmail.com
|
720904416d61d2eb860e892e6a3bedc06be3beb9
|
0febba6ea7a07550e3c7677dfb4037f3ea9662e4
|
/mysite/settings.py
|
dea9ef93351be4c69ed1fcb3f4a893bfa014fe75
|
[] |
no_license
|
abubych/my-first-blog
|
88341a6e2b9a006aa22fc1b243230c95e51a400a
|
8e39bd6a0d964bcaa99db7bfbe89a1755d75c113
|
refs/heads/master
| 2020-09-26T22:09:31.931145
| 2019-12-06T15:03:10
| 2019-12-06T15:03:10
| 219,852,145
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,155
|
py
|
"""
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 2.2.7.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'dqq83fazc22^&ki+opifgrs2vucm!$@%hp5k!w&ux409mcenr)'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'blog',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Europe/Kiev'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
|
[
"you@example.com"
] |
you@example.com
|
12f2d2c34e5181bb8883b981c7dd86ec76bba947
|
c8ea4fe0dccca928b92234b72a7a8d9cd6cf4d14
|
/tests/eth2/beacon/types/test_proposer_slashings.py
|
2d70091de7d9c186937546a3f8b370369a0c2677
|
[
"MIT"
] |
permissive
|
kclowes/trinity
|
b6bc4f7c57ade1651cf9b2ca9ca88493f3485007
|
f0400c78a6d828dd266b1f31dd3fa7aacf97486d
|
refs/heads/master
| 2020-04-16T16:11:28.531260
| 2019-01-14T17:03:56
| 2019-01-14T17:44:58
| 165,728,497
| 0
| 0
|
MIT
| 2019-01-14T20:17:01
| 2019-01-14T20:17:00
| null |
UTF-8
|
Python
| false
| false
| 665
|
py
|
from eth2.beacon.types.proposer_slashings import (
ProposerSlashing,
)
def test_defaults(sample_proposer_slashing_params):
slashing = ProposerSlashing(**sample_proposer_slashing_params)
assert slashing.proposer_index == sample_proposer_slashing_params['proposer_index']
assert slashing.proposal_data_1 == sample_proposer_slashing_params['proposal_data_1']
assert slashing.proposal_signature_1 == sample_proposer_slashing_params['proposal_signature_1']
assert slashing.proposal_data_2 == sample_proposer_slashing_params['proposal_data_2']
assert slashing.proposal_signature_2 == sample_proposer_slashing_params['proposal_signature_2']
|
[
"hwwang156@gmail.com"
] |
hwwang156@gmail.com
|
cdd80aa7d4fd23ee92ffbfa5085fa51294fb6d22
|
614cad3588af9c0e51e0bb98963075e3195e92f5
|
/models/onet/config.py
|
5ab763d53ab51f06d527d863d05d4dbdd603df17
|
[] |
no_license
|
dragonlong/haoi-pose
|
2810dae7f9afd0a26b3d0a5962fd9ae8a5abac58
|
43388efd911feecde588b27a753de353b8e28265
|
refs/heads/master
| 2023-07-01T14:18:29.029484
| 2021-08-10T10:57:42
| 2021-08-10T10:57:42
| 294,602,794
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,532
|
py
|
import torch
import torch.distributions as dist
from torch import nn
import os
import __init__
from models.encoder import encoder_dict
from models.onet import models, training, generation
# from im2mesh import data
# from im2mesh import config
import dataset as data
from utils import config
def get_model(cfg, device=None, dataset=None, **kwargs):
''' Return the Occupancy Network model.
Args:
cfg (dict): imported yaml config
device (device): pytorch device
dataset (dataset): dataset
'''
decoder = cfg['model']['decoder']
encoder = cfg['model']['encoder']
encoder_latent = cfg['model']['encoder_latent']
dim = cfg['data']['dim']
z_dim = cfg['model']['z_dim']
c_dim = cfg['model']['c_dim']
decoder_kwargs = cfg['model']['decoder_kwargs']
encoder_kwargs = cfg['model']['encoder_kwargs']
encoder_latent_kwargs = cfg['model']['encoder_latent_kwargs']
decoder = models.decoder_dict[decoder](
dim=dim, z_dim=z_dim, c_dim=c_dim,
**decoder_kwargs
)
if z_dim != 0:
encoder_latent = models.encoder_latent_dict[encoder_latent](
dim=dim, z_dim=z_dim, c_dim=c_dim,
**encoder_latent_kwargs
)
else:
encoder_latent = None
if encoder == 'idx':
encoder = nn.Embedding(len(dataset), c_dim)
elif encoder is not None:
encoder = encoder_dict[encoder](
c_dim=c_dim,
**encoder_kwargs
)
else:
encoder = None
p0_z = get_prior_z(cfg, device)
model = models.OccupancyNetwork(
decoder, encoder, encoder_latent, p0_z, device=device
)
return model
def get_trainer(model, optimizer, cfg, device, **kwargs):
''' Returns the trainer object.
Args:
model (nn.Module): the Occupancy Network model
optimizer (optimizer): pytorch optimizer object
cfg (dict): imported yaml config
device (device): pytorch device
'''
threshold = cfg['test']['threshold']
out_dir = cfg['training']['out_dir']
vis_dir = os.path.join(out_dir, 'vis')
input_type = cfg['data']['input_type']
trainer = training.Trainer(
model, optimizer,
device=device, input_type=input_type,
vis_dir=vis_dir, threshold=threshold,
eval_sample=cfg['training']['eval_sample'],
)
return trainer
def get_generator(model, cfg, device, **kwargs):
''' Returns the generator object.
Args:
model (nn.Module): Occupancy Network model
cfg (dict): imported yaml config
device (device): pytorch device
'''
preprocessor = config.get_preprocessor(cfg, device=device)
generator = generation.Generator3D(
model,
device=device,
threshold=cfg['test']['threshold'],
resolution0=cfg['generation']['resolution_0'],
upsampling_steps=cfg['generation']['upsampling_steps'],
sample=cfg['generation']['use_sampling'],
refinement_step=cfg['generation']['refinement_step'],
simplify_nfaces=cfg['generation']['simplify_nfaces'],
preprocessor=preprocessor,
)
return generator
def get_prior_z(cfg, device, **kwargs):
''' Returns prior distribution for latent code z.
Args:
cfg (dict): imported yaml config
device (device): pytorch device
'''
z_dim = cfg['model']['z_dim']
p0_z = dist.Normal(
torch.zeros(z_dim, device=device),
torch.ones(z_dim, device=device)
)
return p0_z
def get_data_fields(mode, cfg):
''' Returns the data fields.
Args:
mode (str): the mode which is used
cfg (dict): imported yaml config
'''
points_transform = data.SubsamplePoints(cfg['data']['points_subsample'])
with_transforms = cfg['model']['use_camera']
fields = {}
fields['points'] = data.PointsField(
cfg['data']['points_file'], points_transform,
with_transforms=with_transforms,
unpackbits=cfg['data']['points_unpackbits'],
)
if mode in ('val', 'test'):
points_iou_file = cfg['data']['points_iou_file']
voxels_file = cfg['data']['voxels_file']
if points_iou_file is not None:
fields['points_iou'] = data.PointsField(
points_iou_file,
with_transforms=with_transforms,
unpackbits=cfg['data']['points_unpackbits'],
)
if voxels_file is not None:
fields['voxels'] = data.VoxelsField(voxels_file)
return fields
|
[
"lxiaol9@vt.edu"
] |
lxiaol9@vt.edu
|
7e404da28c3fe14b73b70c442e023410f00a3ade
|
a62fdd0beb6c47cc704c1192b68b0bcfcd024304
|
/Python/II/19-TXTEDIT2/3/ui_form.py
|
a4d1d6b8f2c118bffdff979ebf87d1580422b60e
|
[] |
no_license
|
a6461/Qt-PyQt
|
da1895b4faccda80b8079ecdca79f1ea525daa0a
|
404bd7fbbc432ebeaa1a486fc8e005d47aed9cfd
|
refs/heads/master
| 2020-03-14T22:16:48.714825
| 2018-06-12T20:45:58
| 2018-06-12T20:45:58
| 131,817,506
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,492
|
py
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'form.ui'
#
# Created by: PyQt5 UI code generator 5.10.1
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Form(object):
def setupUi(self, Form):
Form.setObjectName("Form")
Form.resize(500, 300)
self.centralwidget = QtWidgets.QWidget(Form)
self.centralwidget.setObjectName("centralwidget")
self.gridLayout = QtWidgets.QGridLayout(self.centralwidget)
self.gridLayout.setContentsMargins(0, 0, 0, 0)
self.gridLayout.setSpacing(0)
self.gridLayout.setObjectName("gridLayout")
self.textEdit = QtWidgets.QTextEdit(self.centralwidget)
self.textEdit.setObjectName("textEdit")
self.gridLayout.addWidget(self.textEdit, 0, 0, 1, 1)
Form.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(Form)
self.menubar.setGeometry(QtCore.QRect(0, 0, 500, 21))
self.menubar.setObjectName("menubar")
self.menuFile = QtWidgets.QMenu(self.menubar)
self.menuFile.setObjectName("menuFile")
self.menuFormat = QtWidgets.QMenu(self.menubar)
self.menuFormat.setObjectName("menuFormat")
self.menuColors = QtWidgets.QMenu(self.menuFormat)
self.menuColors.setObjectName("menuColors")
Form.setMenuBar(self.menubar)
self.actionNew = QtWidgets.QAction(Form)
self.actionNew.setObjectName("actionNew")
self.actionOpen = QtWidgets.QAction(Form)
self.actionOpen.setObjectName("actionOpen")
self.actionSave = QtWidgets.QAction(Form)
self.actionSave.setObjectName("actionSave")
self.actionSaveAs = QtWidgets.QAction(Form)
self.actionSaveAs.setObjectName("actionSaveAs")
self.actionExit = QtWidgets.QAction(Form)
self.actionExit.setObjectName("actionExit")
self.actionBold = QtWidgets.QAction(Form)
self.actionBold.setCheckable(True)
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.actionBold.setFont(font)
self.actionBold.setObjectName("actionBold")
self.actionItalic = QtWidgets.QAction(Form)
self.actionItalic.setCheckable(True)
font = QtGui.QFont()
font.setItalic(True)
self.actionItalic.setFont(font)
self.actionItalic.setObjectName("actionItalic")
self.actionUnderline = QtWidgets.QAction(Form)
self.actionUnderline.setCheckable(True)
font = QtGui.QFont()
font.setUnderline(True)
self.actionUnderline.setFont(font)
self.actionUnderline.setObjectName("actionUnderline")
self.actionStrikeOut = QtWidgets.QAction(Form)
self.actionStrikeOut.setCheckable(True)
font = QtGui.QFont()
font.setStrikeOut(True)
self.actionStrikeOut.setFont(font)
self.actionStrikeOut.setObjectName("actionStrikeOut")
self.actionLeft = QtWidgets.QAction(Form)
self.actionLeft.setCheckable(True)
self.actionLeft.setChecked(True)
self.actionLeft.setObjectName("actionLeft")
self.actionCenter = QtWidgets.QAction(Form)
self.actionCenter.setCheckable(True)
self.actionCenter.setObjectName("actionCenter")
self.actionRight = QtWidgets.QAction(Form)
self.actionRight.setCheckable(True)
self.actionRight.setObjectName("actionRight")
self.actionFontColor = QtWidgets.QAction(Form)
self.actionFontColor.setObjectName("actionFontColor")
self.actionBackgroundColor = QtWidgets.QAction(Form)
self.actionBackgroundColor.setObjectName("actionBackgroundColor")
self.actionFont = QtWidgets.QAction(Form)
self.actionFont.setObjectName("actionFont")
self.actionJustify = QtWidgets.QAction(Form)
self.actionJustify.setCheckable(True)
self.actionJustify.setObjectName("actionJustify")
self.menuFile.addAction(self.actionNew)
self.menuFile.addAction(self.actionOpen)
self.menuFile.addAction(self.actionSave)
self.menuFile.addAction(self.actionSaveAs)
self.menuFile.addSeparator()
self.menuFile.addAction(self.actionExit)
self.menuColors.addAction(self.actionFontColor)
self.menuColors.addAction(self.actionBackgroundColor)
self.menuFormat.addAction(self.actionBold)
self.menuFormat.addAction(self.actionItalic)
self.menuFormat.addAction(self.actionUnderline)
self.menuFormat.addAction(self.actionStrikeOut)
self.menuFormat.addSeparator()
self.menuFormat.addAction(self.actionLeft)
self.menuFormat.addAction(self.actionCenter)
self.menuFormat.addAction(self.actionRight)
self.menuFormat.addAction(self.actionJustify)
self.menuFormat.addSeparator()
self.menuFormat.addAction(self.menuColors.menuAction())
self.menuFormat.addAction(self.actionFont)
self.menubar.addAction(self.menuFile.menuAction())
self.menubar.addAction(self.menuFormat.menuAction())
self.retranslateUi(Form)
self.actionExit.triggered.connect(Form.close)
QtCore.QMetaObject.connectSlotsByName(Form)
def retranslateUi(self, Form):
_translate = QtCore.QCoreApplication.translate
Form.setWindowTitle(_translate("Form", "Text Editor"))
self.menuFile.setTitle(_translate("Form", "&File"))
self.menuFormat.setTitle(_translate("Form", "F&ormat"))
self.menuColors.setTitle(_translate("Form", "&Colors"))
self.actionNew.setText(_translate("Form", "&New"))
self.actionNew.setShortcut(_translate("Form", "Ctrl+N"))
self.actionOpen.setText(_translate("Form", "&Open"))
self.actionOpen.setShortcut(_translate("Form", "Ctrl+O"))
self.actionSave.setText(_translate("Form", "&Save"))
self.actionSave.setShortcut(_translate("Form", "Ctrl+S"))
self.actionSaveAs.setText(_translate("Form", "Save &As"))
self.actionSaveAs.setShortcut(_translate("Form", "F12"))
self.actionExit.setText(_translate("Form", "E&xit"))
self.actionBold.setText(_translate("Form", "&Bold"))
self.actionBold.setShortcut(_translate("Form", "Ctrl+B"))
self.actionItalic.setText(_translate("Form", "&Italic"))
self.actionItalic.setShortcut(_translate("Form", "Ctrl+I"))
self.actionUnderline.setText(_translate("Form", "&Underline"))
self.actionUnderline.setShortcut(_translate("Form", "Ctrl+U"))
self.actionStrikeOut.setText(_translate("Form", "S&trikeout"))
self.actionStrikeOut.setShortcut(_translate("Form", "Ctrl+T"))
self.actionLeft.setText(_translate("Form", "&Left alignment"))
self.actionLeft.setShortcut(_translate("Form", "Ctrl+L"))
self.actionCenter.setText(_translate("Form", "C&enter"))
self.actionCenter.setShortcut(_translate("Form", "Ctrl+E"))
self.actionRight.setText(_translate("Form", "&Right alignment"))
self.actionRight.setShortcut(_translate("Form", "Ctrl+R"))
self.actionFontColor.setText(_translate("Form", "&Font color..."))
self.actionBackgroundColor.setText(_translate("Form", "&Background color..."))
self.actionFont.setText(_translate("Form", "&Font..."))
self.actionJustify.setText(_translate("Form", "&Justify"))
self.actionJustify.setShortcut(_translate("Form", "Ctrl+J"))
|
[
"a6461@yandex.ru"
] |
a6461@yandex.ru
|
70cb2348662187ca633adcf0cdd99bff60a49366
|
cd40b7cc395f36740000ed4a4144b1c0666ab0fd
|
/hstrat/test_drive/generate_template_phylogeny/_evolve_fitness_trait_population_/__init__.py
|
120b54b41250aec6ea25e07702d1f1cbcbf146b6
|
[
"MIT"
] |
permissive
|
mmore500/hstrat
|
94fd22c86a87a5707590b9398ef679444ed82d6d
|
b2d2caded1db5e2dc681d9f171d7c74b322c55c3
|
refs/heads/master
| 2023-08-31T03:36:44.457576
| 2023-08-25T14:39:29
| 2023-08-25T14:39:29
| 464,531,144
| 5
| 2
|
NOASSERTION
| 2023-08-25T13:07:52
| 2022-02-28T15:11:45
|
Python
|
UTF-8
|
Python
| false
| false
| 575
|
py
|
"""Implementation for `evolve_fitness_trait_population`."""
from ._apply_island_migrations import _apply_island_migrations
from ._apply_mutation import _apply_mutation
from ._apply_niche_invasions import _apply_niche_invasions
from ._get_island_id import _get_island_id
from ._get_niche_id import _get_niche_id
from ._select_parents import _select_parents
# adapted from https://stackoverflow.com/a/31079085
__all__ = [
"_apply_island_migrations",
"_apply_mutation",
"_apply_niche_invasions",
"_get_island_id",
"_get_niche_id",
"_select_parents",
]
|
[
"mmore500.login+gpg@gmail.com"
] |
mmore500.login+gpg@gmail.com
|
c84c74cb7e09d90664a0eec0ef4a514fc53fc7d3
|
77311ad9622a7d8b88707d7cee3f44de7c8860cb
|
/res_bw/scripts/common/lib/email/mime/application.py
|
c28b866a4335bc589db5c85b4d96b4a6b04c04fd
|
[] |
no_license
|
webiumsk/WOT-0.9.14-CT
|
9b193191505a4560df4e872e022eebf59308057e
|
cfe0b03e511d02c36ce185f308eb48f13ecc05ca
|
refs/heads/master
| 2021-01-10T02:14:10.830715
| 2016-02-14T11:59:59
| 2016-02-14T11:59:59
| 51,606,676
| 0
| 0
| null | null | null | null |
WINDOWS-1250
|
Python
| false
| false
| 1,525
|
py
|
# 2016.02.14 12:47:54 Střední Evropa (běžný čas)
# Embedded file name: scripts/common/Lib/email/mime/application.py
"""Class representing application/* type MIME documents."""
__all__ = ['MIMEApplication']
from email import encoders
from email.mime.nonmultipart import MIMENonMultipart
class MIMEApplication(MIMENonMultipart):
"""Class for generating application/* MIME documents."""
def __init__(self, _data, _subtype = 'octet-stream', _encoder = encoders.encode_base64, **_params):
"""Create an application/* type MIME document.
_data is a string containing the raw application data.
_subtype is the MIME content type subtype, defaulting to
'octet-stream'.
_encoder is a function which will perform the actual encoding for
transport of the application data, defaulting to base64 encoding.
Any additional keyword arguments are passed to the base class
constructor, which turns them into parameters on the Content-Type
header.
"""
if _subtype is None:
raise TypeError('Invalid application MIME subtype')
MIMENonMultipart.__init__(self, 'application', _subtype, **_params)
self.set_payload(_data)
_encoder(self)
return
# okay decompyling c:\Users\PC\wotsources\files\originals\res_bw\scripts\common\lib\email\mime\application.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2016.02.14 12:47:54 Střední Evropa (běžný čas)
|
[
"info@webium.sk"
] |
info@webium.sk
|
91385116b3fdd55a4da2fd97f1c0d310dc86f93b
|
3b74b57d5f513e98d1ad5404c09aeb7b7a03ed63
|
/solow/tests_solow/test_ces.py
|
b39266ad585f3e2e99a4867cb3fa058212f302e9
|
[] |
no_license
|
vgregory757/QuantEcon.applications
|
0b7a13aa05798c15ff5943adb99d4c01585a2d76
|
4f542e20034b1ca6cfbca2708cb6e29509af401d
|
refs/heads/master
| 2020-04-05T22:52:45.978767
| 2017-03-15T19:53:30
| 2017-03-15T19:53:30
| 51,265,260
| 2
| 1
| null | 2016-02-07T20:39:34
| 2016-02-07T20:39:33
| null |
UTF-8
|
Python
| false
| false
| 2,652
|
py
|
"""
Test suite for ces.py module.
@author : David R. Pugh
@date : 2014-12-08
"""
import nose
import numpy as np
from .... models.solow import ces
params = {'A0': 1.0, 'g': 0.02, 'L0': 1.0, 'n': 0.02, 's': 0.15,
'alpha': 0.33, 'sigma': 1.1, 'delta': 0.05}
model = ces.CESModel(params)
def test_steady_state():
"""Compare analytic steady state with numerical steady state."""
eps = 1e-1
for g in np.linspace(eps, 0.05, 4):
for n in np.linspace(eps, 0.05, 4):
for s in np.linspace(eps, 1-eps, 4):
for alpha in np.linspace(eps, 1-eps, 4):
for delta in np.linspace(eps, 1-eps, 4):
for sigma in np.linspace(eps, 2.0, 4):
tmp_params = {'A0': 1.0, 'g': g, 'L0': 1.0, 'n': n,
's': s, 'alpha': alpha, 'delta': delta,
'sigma': sigma}
try:
model.params = tmp_params
# use root finder to compute the steady state
actual_ss = model.steady_state
expected_ss = model.find_steady_state(1e-12, 1e9)
# conduct the test (numerical precision limits!)
nose.tools.assert_almost_equals(actual_ss,
expected_ss,
places=6)
# handles params with non finite steady state
except AttributeError:
continue
def test_validate_params():
"""Testing validation of params attribute."""
invalid_params_0 = {'A0': 1.0, 'g': 0.02, 'L0': 1.0, 'n': 0.02, 's': 0.15,
'alpha': 1.33, 'delta': 0.03, 'sigma': 1.2}
invalid_params_1 = {'A0': 1.0, 'g': 0.02, 'L0': 1.0, 'n': 0.02, 's': 0.15,
'alpha': 0.33, 'delta': 0.03, 'sigma': 0.0}
invalid_params_2 = {'A0': 1.0, 'g': 0.01, 'L0': 1.0, 'n': 0.01, 's': 0.12,
'alpha': 0.75, 'delta': 0.01, 'sigma': 2.0}
# alpha must be in (0, 1)
with nose.tools.assert_raises(AttributeError):
ces.CESModel(invalid_params_0)
# sigma must be strictly positive
with nose.tools.assert_raises(AttributeError):
ces.CESModel(invalid_params_1)
# parameters inconsistent with finite steady state
with nose.tools.assert_raises(AttributeError):
ces.CESModel(invalid_params_2)
|
[
"mamckay@gmail.com"
] |
mamckay@gmail.com
|
2c4b41bdd50001fb5c1c59e6267ace6e6328bb99
|
b44b690c96cfbaba35fa3cc32e8da4442adb9fad
|
/Python/0151. Reverse Words in a String.py
|
321978c8c6a222f9d220eacb369fc4854db04e12
|
[] |
no_license
|
faisalraza33/leetcode
|
24d610c6884e218719d82a5c79f1695cb6463d68
|
d7cf4ffba14c6f1ff4551634f4002b53dfeae9b7
|
refs/heads/master
| 2022-08-10T02:05:21.932664
| 2022-07-05T09:59:47
| 2022-07-05T09:59:47
| 238,060,131
| 0
| 0
| null | 2020-02-03T20:54:51
| 2020-02-03T20:54:50
| null |
UTF-8
|
Python
| false
| false
| 1,218
|
py
|
# Given an input string s, reverse the order of the words.
# A word is defined as a sequence of non-space characters. The words in s will be separated by at least one space.
# Return a string of the words in reverse order concatenated by a single space.
# Note that s may contain leading or trailing spaces or multiple spaces between two words. The returned string should only have a single space separating the words. Do not include any extra spaces.
#
# Example 1:
#
# Input: s = "the sky is blue"
# Output: "blue is sky the"
#
# Example 2:
#
# Input: s = " hello world "
# Output: "world hello"
# Explanation: Your reversed string should not contain leading or trailing spaces.
#
# Example 3:
#
# Input: s = "a good example"
# Output: "example good a"
# Explanation: You need to reduce multiple spaces between two words to a single space in the reversed string.
#
# Constraints:
#
# 1 <= s.length <= 10^4
# s contains English letters (upper-case and lower-case), digits, and spaces ' '.
# There is at least one word in s.
class Solution:
def reverseWords(self, s: str) -> str:
# "a b ".split(" ") # ['a', 'b', '']
# "a b ".split() # ['a', 'b']
return " ".join(s.split()[::-1])
|
[
"Hongbo.Miao@outlook.com"
] |
Hongbo.Miao@outlook.com
|
dafa4f1cff7207f3e58d1b7ccc1374c0626541d2
|
d993f821da125498b6dfb01792fcd24c83ae7e34
|
/selfself/Custom_Logger.py
|
b5e3ddece1261f6cbd1aac598eaa40a1be777d40
|
[] |
no_license
|
Arjuna1513/Python_Practice_Programs
|
2c8370d927c8bade2d2b0b5bd0345c7d5f139202
|
7c72600d72f68afee62ee64be25d961822429aeb
|
refs/heads/master
| 2020-06-24T02:36:03.186924
| 2019-07-25T14:31:02
| 2019-07-25T14:31:02
| 198,824,589
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 603
|
py
|
import logging
import inspect
class CustomLogger:
def custom_logger(self, logLevel=logging.INFO):
loggerName = inspect.stack()[1][3]
logger = logging.getLogger(loggerName)
logger.setLevel(logging.DEBUG)
fHandler = logging.FileHandler("Automation.log", mode='a')
fHandler.setLevel(logLevel)
formatter = logging.Formatter("%(asctime)s : %(name)s : %(levelname)s : %(message)s"
, datefmt="%d/%m/%Y %H:%M:%S")
fHandler.setFormatter(formatter)
logger.addHandler(fHandler)
return logger
|
[
"malli00022@gmail.com"
] |
malli00022@gmail.com
|
e6b6647a488e26f74160f7438b57eade28904ee2
|
f1961c86e6da14f35c21d7235f4fc8a89fabdcad
|
/DailyProgrammer/DP20120716C.py
|
fbf41c1a899349a2b6e8c51a9ac8c6c23b1ec13b
|
[
"MIT"
] |
permissive
|
DayGitH/Python-Challenges
|
d4930bdd85cd1a977d8f6192775ca956a375fcde
|
bc32f1332a92fcc2dfa6f5ea4d95f8a8d64c3edf
|
refs/heads/master
| 2021-01-17T13:01:03.784523
| 2018-06-29T23:49:04
| 2018-06-29T23:49:04
| 58,497,683
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,015
|
py
|
"""
Write a program that is able to find all words in a Boggle [http://en.wikipedia.org/wiki/Boggle] board. For a word list,
you can use this file [http://code.google.com/p/dotnetperls-controls/downloads/detail?name=enable1.txt].
How many words can you find in the following 10x10 Boggle board?
T N L E P I A C N M
T R E H O C F E I W
S C E R E D A M E A
A O M O G O O F I E
G A C M H N L E R X
T D E R J T F O A S
I T A R T I N H N T
R N L Y I V S C R T
A X E P S S H A C F
I U I I I A I W T T
Thanks to Medicalizawhat for suggesting this problem (and to Cosmologicon for providing a word list and some
commentary) at /r/dailyprogrammer_ideas! If you have a problem that you think would be good for us, why not head
over there and suggest it?
"""
class Node:
""" from https://en.wikipedia.org/wiki/Trie#Algorithms """
def __init__(self):
self.children = {} # mapping from character ==> Node
self.value = None
def find(node, key):
""" from https://en.wikipedia.org/wiki/Trie#Algorithms """
for char in key:
if char in node.children:
node = node.children[char]
else:
return None
return node.value
def insert(root, string, value):
""" from https://en.wikipedia.org/wiki/Trie#Algorithms """
node = root
i = 0
while i < len(string):
if string[i] in node.children:
node = node.children[string[i]]
i += 1
else:
break
# append new nodes for the remaining characters, if any
while i < len(string):
node.children[string[i]] = Node()
node = node.children[string[i]]
i += 1
# store value in the terminal node
node.value = value
def create_trie():
with open('enable1.txt', 'r') as f:
data = f.read().split()
node = Node()
for d in data:
insert(node, d, d)
return node
def main():
node = create_trie()
print(find(node, 'decipherer'))
if __name__ == "__main__":
main()
|
[
"akber91@gmail.com"
] |
akber91@gmail.com
|
af364988e9423021730917fdab606389a84c8240
|
7a1a65b0cda41ea204fad4848934db143ebf199a
|
/automatedprocesses_thirdstage/oath_core_yesterday_media.py
|
cdb09b67be3c5a1a2f87b835ad27edef4a1dd403
|
[] |
no_license
|
bpopovich44/ReaperSec
|
4b015e448ed5ce23316bd9b9e33966373daea9c0
|
22acba4d84313e62dbbf95cf2a5465283a6491b0
|
refs/heads/master
| 2021-05-02T18:26:11.875122
| 2019-06-22T15:02:09
| 2019-06-22T15:02:09
| 120,664,056
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,227
|
py
|
#!/usr/bin/python2.7
import sys
import json
import time
import datetime
import aol_api
from mysql.connector import MySQLConnection, Error
from python_dbconfig import read_db_config
from data_file import report_book
from data_file import platforms
from data_file import fees
todaysdate = time.strftime("%Y-%m-%d %H:%M:%S")
yesterday = datetime.date.fromordinal(datetime.date.today().toordinal()-1).strftime("%F")
def connect():
# """Gets AOL Data and writes them to a MySQL table"""
db = "mysql_sl"
report_type = "core_yesterday_media"
p_name = sys.argv[1]
p_id = platforms[p_name]["id"]
gross_rev = platforms[p_name]["fee"]
r = fees["aol_platform"]
a_cost = fees["aol_cost"]
platform_rev = p_name + "_revenue"
db_updated = False
# Connect To DB:
db_config = read_db_config(db)
try:
#print('Connecting to database...')
conn = MySQLConnection(**db_config)
if conn.is_connected():
#print('Connection established.')
cursor = conn.cursor()
# calls get_access_token function and starts script
logintoken = aol_api.get_access_token(p_name)
#print logintoken
for report in report_book[report_type][p_name]:
result = aol_api.run_existing_report(logintoken, str(report))
#print(result)
if len(result) == 0:
break
row_count_value = json.loads(result)['row_count']
if int(row_count_value) >= 1:
if db_updated == False:
sql = "DROP TABLE IF EXISTS " + p_name + "_core_yesterday_media"
cursor.execute(sql)
sql = "CREATE TABLE " + p_name + "_core_yesterday_media (date varchar(25), hour int, inventory_source varchar(255), \
media varchar(255), ad_opportunities bigint, market_opportunities bigint, ad_attempts bigint, \
ad_impressions bigint, ad_errors bigint, ad_revenue decimal(15, 5), aol_cost decimal(15,5), \
epiphany_gross_revenue decimal(15, 5)," + p_name + "_revenue decimal(15, 5), clicks int, \
iab_viewability_measurable_ad_impressions bigint, iab_viewable_ad_impressions bigint, platform int)"
cursor.execute(sql)
db_updated = True
print(str(todaysdate) + " Running " + p_name + "_core_yesterday_media report # " + str(report))
for x in json.loads(result)['data']:
date = x['row'][0]
hour = x['row'][1]
inventory_source = x['row'][2]
media = x['row'][3].replace('"', " ")
ad_opportunities = x['row'][4]
market_opportunities = x['row'][5]
ad_attempts = x['row'][6]
ad_impressions = x['row'][7]
ad_errors = x['row'][8]
ad_revenue = x['row'][9]
aol_cos = x['row'][9]
epiphany_gross_rev = x['row'][9]
platform_rev = x['row'][9]
clicks = x['row'][10]
iab_viewability_measurable_ad_impressions = x['row'][11]
iab_viewable_ad_impressions = x['row'][12]
platform = str(p_id)
list = (date, hour, inventory_source, media, ad_opportunities, market_opportunities, ad_attempts, \
ad_impressions, ad_errors, ad_revenue, aol_cos, epiphany_gross_rev, platform_rev, clicks, \
iab_viewability_measurable_ad_impressions, iab_viewable_ad_impressions, platform)
#print(list)
if p_name == 'dna':
aol_cost = "0"
epiphany_gross_revenue = "0"
platform_revenue = "0"
else:
aol_cost = float(float(aol_cos) * float(a_cost))
epiphany_gross_revenue = float(float(epiphany_gross_rev) * float(gross_rev))
platform_revenue = float(float(platform_rev) * float(r))
sql = """INSERT INTO """ + p_name + """_core_yesterday_media VALUES ("%s", "%s", "%s", "%s", "%s", "%s", \
"%s", "%s", "%s", "%s", "%s", "%s", "%s", "%s", "%s", "%s", "%s")""" % (date, hour, inventory_source, \
media, ad_opportunities, market_opportunities, ad_attempts, ad_impressions, ad_errors, ad_revenue, \
aol_cost, epiphany_gross_revenue, platform_revenue, clicks, iab_viewability_measurable_ad_impressions, \
iab_viewable_ad_impressions, platform)
cursor.execute(sql)
cursor.execute('commit')
else:
print('Connection failed')
except Error as error:
print(error)
finally:
conn.close()
#print('Connection closed.')
if __name__ == '__main__':
connect()
|
[
"bpopovich4@gmail.com"
] |
bpopovich4@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.