blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
fb02c790e807e4e0ff7b77e6c05b3c41d216d7d9
|
2f3ef2315da7277c0f236d87108de95f0b917adf
|
/Tetris/FastMonteCarlo.py
|
81935c6be495626931992c91e7cabe44159847ef
|
[] |
no_license
|
Andrew-Fryer/Games
|
da1567d41e5a9bc1e897022bffdd675143c352a4
|
2579dcc7b0aff3ad171f962e29624cbe4abeca7e
|
refs/heads/master
| 2021-10-25T14:43:53.770963
| 2019-04-04T19:09:25
| 2019-04-04T19:09:25
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,348
|
py
|
import sys
import Game
import Option
import pygame
import random
numTestGames = 200
numMoves = 3
class MCTSPlayer(object):
def playGame(self):
game = Game.Game(isRendered=True)
while game.isAlive():
bestOption = Option.Option(None, 0, 0, None, None, None, None)
bestNumLinesCleared = 0
for oriCurrent in range(game.currentPiece.numOrientations()):
for posCurrent in range(Game.gridWidth - game.currentPiece.width(oriCurrent) + 1):
myGame = Game.Game(clone=game)
myGame.currentPiece.ori = oriCurrent
myGame.currentPiece.col = posCurrent
myGame.dropPiece()
# Monte Carlo this game state
averageNumLinesCleared = 0
for i in range(numTestGames):
testGame = Game.Game(clone=myGame)
for moves in range(numMoves):
testGame.currentPiece.ori = random.randint(0, testGame.currentPiece.numOrientations() - 1)
testGame.currentPiece.col = random.randint(0, Game.gridWidth - testGame.currentPiece.width(testGame.currentPiece.ori))
testGame.dropPiece()
averageNumLinesCleared += testGame.numLinesCleared - game.numLinesCleared
averageNumLinesCleared /= numTestGames
if averageNumLinesCleared > bestNumLinesCleared:
bestOption = Option.Option(None, oriCurrent, posCurrent, None, None, None, None)
bestNumLinesCleared = averageNumLinesCleared
game.currentPiece.ori = bestOption.oriCurrent
game.currentPiece.col = bestOption.posCurrent
game.dropPiece()
game.render()
print("here")
game.render()
print("Score: " + str(game.numLinesCleared))
#game.screen.blit(Game.font.render("Score: " + str(game.numLinesCleared), True, (0, 0, 255)), (10, 100))
pygame.display.update()
keyPressed = False
while not keyPressed:
pygame.time.wait(100)
for event in pygame.event.get():
keyPressed = event.type
pygame.quit()
#sys.exit()
MCTSPlayer().playGame()
|
[
"andrewtfryer@gmail.com"
] |
andrewtfryer@gmail.com
|
e2f0aa31dd82c3ab0ecd58a48bcae21024c0aee2
|
457f33c622d1f97f1c525cdf0b14e9019981d2fc
|
/maven/downloader.py
|
33193417fe177eb051d1ea8d79a5636a3a7177a8
|
[
"Apache-2.0"
] |
permissive
|
IShamraI/corda-rpc-client
|
d480394569a07d9b7415d54142a91971244e8947
|
b5ceff312dd3d1bb48e98066688687693d52d110
|
refs/heads/master
| 2021-05-26T23:55:14.370700
| 2020-05-07T14:33:25
| 2020-05-07T14:33:25
| 254,195,637
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,276
|
py
|
import hashlib
import os
from .requestor import Requestor,RequestException
from .resolver import Resolver
from .artifact import Artifact
import sys
import getopt
class Downloader(object):
def __init__(self, base="https://repo1.maven.org/maven2", username=None, password=None):
self.requestor = Requestor(username, password)
self.resolver = Resolver(base, self.requestor)
def download(self, artifact, filename=None, suppress_log=False):
filename = artifact.get_filename(filename)
url = self.resolver.uri_for_artifact(artifact)
if not self.verify_md5(filename, url + ".md5"):
if not suppress_log:
print("Downloading artifact " + str(artifact))
hook=self._chunk_report
else:
hook=self._chunk_report_suppress
onError = lambda uri, err: self._throwDownloadFailed("Failed to download artifact " + str(artifact) + "from " + uri)
response = self.requestor.request(url, onError, lambda r: r)
if response:
with open(filename, 'w') as f:
self._write_chunks(response, f, report_hook=hook)
if not suppress_log:
print("Downloaded artifact %s to %s" % (artifact, filename))
return (artifact, True)
else:
return (artifact, False)
else:
if not suppress_log:
print("%s is already up to date" % artifact)
return (artifact, True)
def _throwDownloadFailed(self, msg):
raise RequestException(msg)
def _chunk_report_suppress(self, bytes_so_far, chunk_size, total_size):
pass
def _chunk_report(self, bytes_so_far, chunk_size, total_size):
percent = float(bytes_so_far) / total_size
percent = round(percent*100, 2)
sys.stdout.write("Downloaded %d of %d bytes (%0.2f%%)\r" %
(bytes_so_far, total_size, percent))
if bytes_so_far >= total_size:
sys.stdout.write('\n')
def _write_chunks(self, response, file, chunk_size=8192, report_hook=None):
total_size = response.info().getheader('Content-Length').strip()
total_size = int(total_size)
bytes_so_far = 0
while 1:
chunk = response.read(chunk_size)
bytes_so_far += len(chunk)
if not chunk:
break
file.write(chunk)
if report_hook:
report_hook(bytes_so_far, chunk_size, total_size)
return bytes_so_far
def verify_md5(self, file, remote_md5):
if not os.path.exists(file):
return False
else:
local_md5 = self._local_md5(file)
onError = lambda uri, err: self._throwDownloadFailed("Failed to download MD5 from " + uri)
remote = self.requestor.request(remote_md5, onError, lambda r: r.read())
return local_md5 == remote
def _local_md5(self, file):
md5 = hashlib.md5()
with open(file, 'rb') as f:
for chunk in iter(lambda: f.read(8192), ''):
md5.update(chunk)
return md5.hexdigest()
__doc__ = """
Usage:
%(program_name)s <options> Maven-Coordinate filename
Options:
-m <url> --maven-repo=<url>
-u <username> --username=<username>
-p <password> --password=<password>
Maven-Coordinate are defined by: http://maven.apache.org/pom.html#Maven_Coordinates
The possible options are:
- groupId:artifactId:version
- groupId:artifactId:packaging:version
- groupId:artifactId:packaging:classifier:version
filename is optional. If not supplied the filename will be <artifactId>.<extension>
The filename directory must exist prior to download.
Example:
%(program_name)s "org.apache.solr:solr:war:3.5.0"
"""
def main():
try:
opts, args = getopt.getopt(sys.argv[1:], "m:u:p:", ["maven-repo=", "username=", "password="])
except getopt.GetoptError as err:
# print help information and exit:
print str(err) # will print something like "option -a not recognized"
usage()
sys.exit(2)
if not len(args):
print "No maven coordiantes supplied"
usage()
sys.exit(2)
else:
options = dict(opts)
base = options.get("-m")
if not base:
base = options.get("--maven-repo")
if not base:
base = "https://repo1.maven.org/maven2"
username = options.get("-u")
if not username:
username = options.get("--username")
password = options.get("-p")
if not password:
options.get("--password")
dl = Downloader(base, username, password)
artifact = Artifact.parse(args[0])
filename = None
if len(args) == 2:
filename = args[1]
try:
if dl.download(artifact, filename):
sys.exit(0)
else:
usage()
sys.exit(1)
except RequestException as e:
print e.msg
sys.exit(1)
def usage():
print(__doc__ % {'program_name': os.path.basename(sys.argv[0])})
if __name__ == '__main__':
main()
|
[
"isshamray@gmail.com"
] |
isshamray@gmail.com
|
a1f4138c41e900107dbd27e714eb9b1f65240994
|
521611492b89ab9692c240542d7a01274af8fd4e
|
/lab7/task4.py
|
bbabb08ef47201e88771caf5795d35678d275b54
|
[] |
no_license
|
quinterojosuej/cst205Lab4
|
4f6a845f9c7212162982c8f359427c690a1ece38
|
ae732e108c14a0077340819ae5f211a703f61f68
|
refs/heads/master
| 2020-04-20T23:38:31.843853
| 2019-04-13T21:24:04
| 2019-04-13T21:24:04
| 169,173,599
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,682
|
py
|
from colormath.color_objects import sRGBColor, LabColor
from colormath.color_conversions import convert_color
from colormath.color_diff import delta_e_cie2000
#import math
from PIL import Image
def distance(color_1, color_2):
rgb_one = (color_1[0],color_1[1],color_1[2])
#print(rgb_one)
rgb_two = (color_2[0],color_2[1],color_2[2])
# start with the two rgb tuples
#rgb_one = (255, 255, 255)
#rgb_two = (0, 0, 0)
# next you have to initialize sRGBColor objects from your tuples
s_rgb_one = sRGBColor(rgb_one[0], rgb_one[1], rgb_one[2])
s_rgb_two = sRGBColor(rgb_two[0], rgb_two[1], rgb_two[2])
# next you convert the sRGBColor object to a LabColor
lab_rgb_one = convert_color(s_rgb_one, LabColor)
lab_rgb_two = convert_color(s_rgb_two, LabColor)
# now you can calculate the distance of the two LabColors using the delta_e function
"""
print("The distance between white and black is", delta_e_cie2000(lab_rgb_one, lab_rgb_two))
print("The distance between black and black is", delta_e_cie2000(lab_rgb_two, lab_rgb_two))
"""
fore = delta_e_cie2000(lab_rgb_one, lab_rgb_two)
#print(fore)
return fore
def chromakey(source, bg):
for x in range(source.width):
for y in range(source.height):
cur_pixel = source.getpixel((x,y))
green = (0, 190, 60)
if distance(cur_pixel, green) < 10:
# grab the color at the same spot from the new background
source.putpixel((x,y), bg.getpixel((x,y)))
source.save("chroma1.png")
weather = Image.open("boiBossess.jpg")
fruit = Image.open("green.jpg")
chromakey(weather, fruit)
|
[
"quinram@LAPTOP-P4UO3BRS.localdomain"
] |
quinram@LAPTOP-P4UO3BRS.localdomain
|
01ccbd3c81a8bd38aa1edfa4c45d575369d38eb0
|
5687de67e0ad7d7036457dc725e388f2322d72bc
|
/Fundamentals/test.py
|
6258ab40c792a6981d3551730b499f1ea86071de
|
[] |
no_license
|
Imranmkt/CodeBreakersCode
|
caa99c909493fc4e58720f0a5f3c1f92ba437453
|
b9a5ad7cebf7757f0d25a98ab872f9bfce666278
|
refs/heads/master
| 2022-11-20T06:42:07.227844
| 2020-07-13T20:39:35
| 2020-07-13T20:39:35
| 266,943,027
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 48
|
py
|
def heelo_world():
print('Trying something new')
|
[
"imran@Mds-MacBook-Air.attlocal.net"
] |
imran@Mds-MacBook-Air.attlocal.net
|
2f32460151f3690b3107a110060359a2aff08a1e
|
adb956b716b64964b67b2dd288b85aff23585270
|
/src/palmiche/umbrella/block_time_average.py
|
3beec47cf5953b6341f99a2d1e7cad20f2889862
|
[
"Apache-2.0"
] |
permissive
|
ale94mleon/Palmiche
|
2486e9a8b5e7cca6af8637c16739a2df9c279a9f
|
b79223b82da21fc12cb04aa9a7527a360e2fc090
|
refs/heads/main
| 2023-04-30T08:45:41.580009
| 2023-04-26T11:04:54
| 2023-04-26T11:04:54
| 547,183,955
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,765
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from palmiche.utils import xvg, tools
import matplotlib.pyplot as plt
import os
import tqdm
import multiprocessing as mp
import copy
import re
def cmdwham(it = "tpr_files.dat",
if_ = "pullf_files.dat",
is_ = "coordsel.dat",
unit = "kJ",
temperature = 303.15,
out = "out.xvg",
hist = "hist.xvg",
nice = None,
b = None,
e = None):
string = f"gmx wham -it {it} -if {if_} -unit {unit} -is {is_} -temp {temperature} -o {out} -hist {hist}"
if b: string+= f" -b {b}"
if e: string+= f" -e {e}"
if nice: string+= f" -nice {nice}"
string += ''
tools.run(string)
def cmdwham_star(keywords):
return cmdwham(**keywords)
def pmf(time, NumBlocks, out_folder = 'BLOCKS', figsize = (16,9), figname = 'PMF_time_block_average.pdf', plot = True, wham = True, wham_jobs = 1, **keywords):
f"""Give the time block average PMF
Args:
time (int): time in ps
NumBlocks (int): number of blocks to analysis.
out_folder (str, optional): Where aal the xvg will be saved. Defaults to 'BLOCKS'.
figsize (tuple, optional): The size of the figure to matplotlib. Defaults to (10,9).
figname (str, optional): The name of the figure to export. Defaults to PMF_time_block_average.pdf.
plot (bool, optional): If True, a figure will be created with name {figname}. Defaults to False.
wham (bool, optional): If True, gmx wham will be executed. Defaults to False.\
wham_jobs (int, optional): How many wham jobs are launched in parallel. Defaults to 1.
**keywords: All the necessary options of the function cmdwham.
"""
intervals = range(0, time + int(time/NumBlocks), int(time/NumBlocks))
tools.makedirs(out_folder)
xvg_label_paths = []
list_keywords = []
for (i, init) in enumerate(intervals):
try:
keywords['b'] = init
keywords['e'] = intervals[i+1]
keywords['hist'] = os.path.join(out_folder, f'hist_{init}-{intervals[i+1]}.xvg')
keywords['out'] = os.path.join(out_folder, f'pmf_{init}-{intervals[i+1]}.xvg')
xvg_label_paths.append((f'{init}-{intervals[i+1]}', keywords['out']))
list_keywords.append(copy.deepcopy(keywords))
except:
pass
if wham:
if wham_jobs:
jobs = wham_jobs
else:
jobs = mp.cpu_count()
pool = mp.Pool(jobs)
print(f"Generating PMF with gmx wham. It could take a long time. {jobs} job(s) was(were) launched (in parallel).")
for i in tqdm.tqdm(pool.imap_unordered(cmdwham_star, list_keywords), total=len(list_keywords)):
pass
pool.close()
print("Done!")
whole_data = dict()
for (label, path) in xvg_label_paths:
whole_data[label] = xvg.XVG(path).data
if plot:
fig, ax = plt.subplots(figsize = figsize)
for key in whole_data:
ax.plot(whole_data[key][:,0], whole_data[key][:,1], label=f"{key} ps")
ax.legend(loc="lower right")
ax.set(title = f"Time Block Average",
xlabel = r"$\xi$",
ylabel = "PMF [kJ/mol]")
fig.savefig(figname)
return whole_data
def multi_pmf(time, NumBlocks, path = '.', pattern = 'coord[0-5]_selected.dat', figsize = (16,9), figname = 'PMF_time_block_average.pdf', wham = True, wham_jobs = 1):
linestyle_tuple = [
('loosely dotted', (0, (1, 10))),
('dotted', (0, (1, 1))),
('densely dotted', (0, (1, 1))),
('loosely dashed', (0, (5, 10))),
('dashed', (0, (5, 5))),
('densely dashed', (0, (5, 1))),
('loosely dashdotted', (0, (3, 10, 1, 10))),
('dashdotted', (0, (3, 5, 1, 5))),
('densely dashdotted', (0, (3, 1, 1, 1))),
('dashdotdotted', (0, (3, 5, 1, 5, 1, 5))),
('loosely dashdotdotted', (0, (3, 10, 1, 10, 1, 10))),
('densely dashdotdotted', (0, (3, 1, 1, 1, 1, 1)))]
pattern = re.compile(pattern)
dats = [dat for dat in tools.list_if_file(path) if pattern.match(dat)]
fig, ax = plt.subplots(figsize = figsize)
if len(dats) > 1:
fig_i, ax_i = plt.subplots(len(dats), 1,figsize = figsize)
for (i,dat) in enumerate(dats):
name = dat.split('.')[0]
print(f'Calculating for {name}...')
pmfData = pmf(time,
NumBlocks,
out_folder = f'BLOCKS_{name}',
plot = False,
wham_jobs = wham_jobs,
wham = wham,
is_ = dat)
for (j, key) in enumerate(pmfData):
try:
linestyle = linestyle_tuple[j][1]
except:
linestyle = linestyle_tuple[j-len(pmfData)-1][1]
ax.plot(pmfData[key][:,0], pmfData[key][:,1], label=f"{name}_{key} ps", linestyle = linestyle)
if len(dats) >1:
ax_i[i].plot(pmfData[key][:,0], pmfData[key][:,1], label=f"{name}_{key} ps")
ax_i[i].legend(loc="lower right")
ax_i[i].set(title = f"Time Block Average",
xlabel = r"$\xi$",
ylabel = "PMF [kJ/mol]",
ylim = (-10,150))
if i != len(dats) -1:
ax_i[i].tick_params('x', labelbottom=False)
ax.legend(loc="lower right")
ax.set(title = f"Time Block Average",
xlabel = r"$\xi$",
ylabel = "PMF [kJ/mol]")
fig.savefig(figname)
fig_i.subplots_adjust(hspace=.5)
fig_i.savefig(f"Split_{figname}", bbox_inches="tight")
if __name__ == '__main__':
pass
|
[
"ale94mleon@gmail.com"
] |
ale94mleon@gmail.com
|
9f240e8ec2d7aa6bf90bc8e662f69dc153f7360d
|
ac32bac45df77083f4ef3115e747038a6753936c
|
/adapter-transformers-customs/adapter-transformers-stack/src/transformers/adapters/modeling.py
|
a60c3253fe706707b412edd06c27377ad4712857
|
[
"Apache-2.0"
] |
permissive
|
Yujin-Yujin/rexpert
|
13e1d5c4ca55664dd9fbb9a765ea5157a2e0893f
|
ed8628dc053194fee40e593b1cc5ec45a26c8073
|
refs/heads/main
| 2023-06-22T05:58:42.269923
| 2021-07-23T06:35:43
| 2021-07-23T06:35:43
| 373,423,887
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 13,683
|
py
|
import math
import torch
from torch import nn
class Activation_Function_Class(nn.Module):
"""
Implementation of various activation function.
"""
def __init__(self, hidden_act):
if hidden_act.lower() == "relu":
self.f = nn.functional.relu
elif hidden_act.lower() == "tanh":
self.f = torch.tanh
elif hidden_act.lower() == "swish":
def swish(x):
return x * torch.sigmoid(x)
self.f = swish
elif hidden_act.lower() == "gelu":
def gelu_new(x):
"""
Implementation of the gelu activation function currently in Google Bert repo (identical to OpenAI GPT).
Also see https://arxiv.org/abs/1606.08415
"""
return 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))
self.f = gelu_new
elif hidden_act.lower() == "leakyrelu":
self.f = nn.functional.leaky_relu
super().__init__()
def forward(self, x):
return self.f(x)
# Single Adapter
class Adapter(nn.Module):
"""
Implementation of a single Adapter block.
"""
def __init__(
self,
input_size,
down_sample=None,
non_linearity="relu",
init_bert_weights=True,
add_layer_norm_before=True,
add_layer_norm_after=False,
residual_before_ln=True,
):
super().__init__()
self.input_size = input_size
self.add_layer_norm_before = add_layer_norm_before
self.add_layer_norm_after = add_layer_norm_after
self.residual_before_ln = residual_before_ln
# list for all modules of the adapter, passed into nn.Sequential()
seq_list = []
# If we want to have a layer norm on input, we add it to seq_list
if self.add_layer_norm_before:
self.adapter_norm_before = nn.LayerNorm(self.input_size)
seq_list.append(self.adapter_norm_before)
# if a downsample size is not passed, we just half the size of the original input
self.down_sample = down_sample
if down_sample is None:
self.down_sample = self.input_size // 2
# Linear down projection of the input
seq_list.append(nn.Linear(self.input_size, self.down_sample))
# select non-linearity
self.non_linearity = Activation_Function_Class(non_linearity.lower())
seq_list.append(self.non_linearity)
# sequential adapter, first downproject, then non-linearity then upsample. In the forward pass we include the
# residual connection
self.adapter_down = nn.Sequential(*seq_list)
# Up projection to input size
self.adapter_up = nn.Linear(self.down_sample, self.input_size)
# If we want to have a layer norm on output, we apply it later after a separate residual connection
# This means that we learn a new output layer norm, which replaces another layer norm learned in the bert layer
if self.add_layer_norm_after:
self.adapter_norm_after = nn.LayerNorm(self.input_size)
# if we want to initialize with the bert strategy then this function is called for all the linear layers
if init_bert_weights:
self.adapter_down.apply(self.init_bert_weights)
self.adapter_up.apply(self.init_bert_weights)
def forward(self, x, residual_input): # , residual_input=None):
down = self.adapter_down(x)
up = self.adapter_up(down)
output = up
# apply residual connection before layer norm if configured in this way
if self.residual_before_ln:
output = output + residual_input
# apply layer norm if available
if self.add_layer_norm_after:
output = self.adapter_norm_after(output)
# if residual should be applied after layer norm, apply it here
if not self.residual_before_ln:
output = output + residual_input
return output, down, up
# This is copied from the BertPreTrainedModel class to make this a self containing class.
@staticmethod
def init_bert_weights(module):
"""Initialize the weights."""
if isinstance(module, (nn.Linear, nn.Embedding)):
# std defaults to 0.02, this might need to be changed
module.weight.data.normal_(mean=0.0, std=0.02)
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
if isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_()
# Adapter Fusion
class BertFusion(nn.Module):
"""
Implementation of an AdapterFusion block.
"""
def __init__(self, config):
super(BertFusion, self).__init__()
# if config.hidden_size % config.num_attention_heads != 0:
# raise ValueError(
# "The hidden size (%d) is not a multiple of the number of attention "
# "heads (%d)" % (config.hidden_size, config.num_attention_heads))
self.config = config
self.output_attentions = config.output_attentions
self.dense_size = int(config.hidden_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
if (
not self.config.adapter_fusion["query"]
and not self.config.adapter_fusion["key"]
and not self.config.adapter_fusion["value"]
):
self.dense = nn.Linear(self.dense_size, 1)
# if self.config.adapter_fusion["query"]:
# self.query = nn.Linear(int(config.hidden_size), self.dense_size)
# self.query.apply(Adapter.init_bert_weights)
# if self.config.adapter_fusion["key"]:
# self.key = nn.Linear(self.dense_size, self.dense_size)
# self.key.apply(Adapter.init_bert_weights)
if self.config.adapter_fusion["value"]:
self.value = nn.Linear(int(config.hidden_size), int(config.hidden_size), bias=False)
self.value.apply(Adapter.init_bert_weights)
if self.config.adapter_fusion["value_initialized"]:
self.value.weight.data = (
torch.zeros(int(config.hidden_size), int(config.hidden_size)) + 0.000001
).fill_diagonal_(1.0)
# if self.config.adapter_fusion["temperature"]:
# self.T = 50.0
# else:
# self.T = 1.0
# self.reduction = self.T / 1000.0
print("pooh - fusion stack")
# def forward(self, query, key, value, residual):
def forward(self, query, value):
# if self.config.adapter_fusion["residual_before"]:
# value += residual[:, :, None, :].repeat(1, 1, value.size(2), 1)
# if self.config.adapter_fusion["query"]:
# query_layer = self.query(query)
# else:
# query_layer = query
# if self.config.adapter_fusion["key"]:
# key_layer = self.key(key)
# else:
# key_layer = key
# if self.config.adapter_fusion["value"] and self.config.adapter_fusion["value_before_softmax"]:
# # key/value have dims => batch, toks, number-of-adapters, feats
# value_layer = self.value(value)
# else:
# value_layer = value
# # Take the dot product between "query" and "key" to get the raw attention scores.
# attention_scores = torch.squeeze(torch.matmul(query_layer.unsqueeze(2), key_layer.transpose(-2, -1)), dim=2)
# attention_scores = self.dropout(attention_scores)
# # Normalize the attention scores to probabilities.
# attention_probs = nn.Softmax(dim=-1)(attention_scores / self.T)
# self.T = max(self.T - self.reduction, 1.0)
# if not self.training:
# self.recent_attention = attention_probs.detach().cpu().numpy()
# context_layer = torch.squeeze(torch.matmul(attention_probs.unsqueeze(2), value_layer), dim=2)
#pooh check!!!!
context_layer = self.value(value)
# if self.config.adapter_fusion["value"] and not self.config.adapter_fusion["value_before_softmax"]:
# # key/value have dims => batch, toks, feats
# context_layer = self.value(value)
# print("pooh pass here?")
# else:
# context_layer = value
# print("pooh does not pass ")
# if not self.config.adapter_fusion["residual_before"]:
# context_layer += residual
return context_layer
# Invertible Adapters
def get_subnet_constructor(non_linearity, reduction_factor):
def subnet(dims_in, dims_out):
return nn.Sequential(
nn.Linear(dims_in, dims_in // reduction_factor),
Activation_Function_Class(non_linearity),
nn.Linear(dims_in // reduction_factor, dims_out),
)
return subnet
class NICECouplingBlock(nn.Module):
"""Coupling Block following the NICE design."""
def __init__(self, dims_in, dims_c=[], non_linearity="relu", reduction_factor=2):
super().__init__()
channels = dims_in[0][0]
self.split_len1 = channels // 2
self.split_len2 = channels - channels // 2
assert all(
[dims_c[i][1:] == dims_in[0][1:] for i in range(len(dims_c))]
), "Dimensions of input and one or more conditions don't agree."
self.conditional = len(dims_c) > 0
condition_length = sum([dims_c[i][0] for i in range(len(dims_c))])
subnet_constructor = get_subnet_constructor(non_linearity, reduction_factor)
self.F = subnet_constructor(self.split_len2 + condition_length, self.split_len1)
self.G = subnet_constructor(self.split_len1 + condition_length, self.split_len2)
def forward(self, x, c=[], rev=False):
x1, x2 = (x[:, :, : self.split_len1], x[:, :, self.split_len1 :])
if not rev:
x2_c = torch.cat([x2, *c], 1) if self.conditional else x2
y1 = x1 + self.F(x2_c)
y1_c = torch.cat([y1, *c], 1) if self.conditional else y1
y2 = x2 + self.G(y1_c)
else:
x1_c = torch.cat([x1, *c], 1) if self.conditional else x1
y2 = x2 - self.G(x1_c)
y2_c = torch.cat([y2, *c], 1) if self.conditional else y2
y1 = x1 - self.F(y2_c)
return torch.cat((y1, y2), -1)
def jacobian(self, x, rev=False):
return 0
def output_dims(self, input_dims):
assert len(input_dims) == 1, "Can only use 1 input"
return input_dims
class GLOWCouplingBlock(nn.Module):
"""
Coupling Block following the GLOW design. The only difference to the RealNVP coupling blocks, is the fact that it
uses a single subnetwork to jointly predict [s_i, t_i], instead of two separate subnetworks. This reduces
computational cost and speeds up learning. clamp: Soft clamping for the multiplicative component. The amplification
or attenuation of each input dimension can be at most ±exp(clamp).
"""
def __init__(self, dims_in, dims_c=[], non_linearity="relu", reduction_factor=2, clamp=5.0):
super().__init__()
channels = dims_in[0][0]
self.ndims = len(dims_in[0])
self.split_len1 = channels // 2
self.split_len2 = channels - channels // 2
self.clamp = clamp
self.max_s = math.exp(clamp)
self.min_s = math.exp(-clamp)
assert all(
[tuple(dims_c[i][1:]) == tuple(dims_in[0][1:]) for i in range(len(dims_c))]
), f"Dimensions of input and one or more conditions don't agree: {dims_c} vs {dims_in}."
self.conditional = len(dims_c) > 0
condition_length = sum([dims_c[i][0] for i in range(len(dims_c))])
subnet_constructor = get_subnet_constructor(non_linearity, reduction_factor)
self.s1 = subnet_constructor(self.split_len1 + condition_length, self.split_len2 * 2)
self.s2 = subnet_constructor(self.split_len2 + condition_length, self.split_len1 * 2)
def e(self, s):
return torch.exp(self.clamp * 0.636 * torch.atan(s / self.clamp))
def log_e(self, s):
return self.clamp * 0.636 * torch.atan(s / self.clamp)
def forward(self, x, c=[], rev=False):
x1, x2 = (x[:, :, : self.split_len1], x[:, :, self.split_len1 :])
if not rev:
s2, t2 = x1.clone(), x2.clone()
y1 = self.e(s2) * x1 + t2
r1 = self.s1(torch.cat([y1, *c], 1) if self.conditional else y1)
s1, t1 = r1[:, : self.split_len2], r1[:, self.split_len2 :]
y2 = self.e(s1) * x2 + t1
self.last_jac = torch.sum(self.log_e(s1), dim=tuple(range(1, self.ndims + 1))) + torch.sum(
self.log_e(s2), dim=tuple(range(1, self.ndims + 1))
)
else: # names of x and y are swapped!
r1 = self.s1(torch.cat([x1, *c], 1) if self.conditional else x1)
s1, t1 = r1[:, : self.split_len2], r1[:, self.split_len2 :]
y2 = (x2 - t1) / self.e(s1)
r2 = self.s2(torch.cat([y2, *c], 1) if self.conditional else y2)
s2, t2 = r2[:, : self.split_len1], r2[:, self.split_len1 :]
y1 = (x1 - t2) / self.e(s2)
self.last_jac = -torch.sum(self.log_e(s1), dim=tuple(range(1, self.ndims + 1))) - torch.sum(
self.log_e(s2), dim=tuple(range(1, self.ndims + 1))
)
return [torch.cat((y1, y2), 1)]
def jacobian(self, x, c=[], rev=False):
return self.last_jac
def output_dims(self, input_dims):
return input_dims
|
[
"yujin000731@yonsei.ac.kr"
] |
yujin000731@yonsei.ac.kr
|
79f5cd2344c6dc4fbea6b3a38bb221c0b66c5044
|
866b6fdda19b70bd9381f99ab4acde097ddbf9d4
|
/lesson_4/task_4.5.py
|
3f82e12d2396317736c868112a5eec8f7ff45864
|
[] |
no_license
|
yarikmik/geekbrains_homework
|
6b8279d57e9e71eeef4c969b1ccbdca2359b147d
|
cc68a3ea1ce343bd4dc28d9d32b838558b53b691
|
refs/heads/master
| 2023-01-19T03:03:12.536035
| 2020-12-05T06:05:11
| 2020-12-05T06:05:11
| 301,085,631
| 0
| 0
| null | 2020-12-05T06:05:12
| 2020-10-04T09:11:25
|
Python
|
UTF-8
|
Python
| false
| false
| 643
|
py
|
"""
Реализовать формирование списка, используя функцию range() и возможности генератора.
В список должны войти четные числа от 100 до 1000 (включая границы).
Необходимо получить результат вычисления произведения всех элементов списка.
Подсказка: использовать функцию reduce().
"""
from functools import reduce
def calculate(val1, val2):
return val1 * val2
print(reduce(calculate, [i for i in range(100, 1001) if i % 2 == 0]))
|
[
"yarik.mik@gmail.com"
] |
yarik.mik@gmail.com
|
4719319086089f102ad8ae04be753b9057594527
|
c7521583847e8141109efce75a464236c994725c
|
/manage.py
|
6e16ac9454131335f23c833874ac7468e1fae609
|
[] |
no_license
|
EquipeDevelopers/projeto_01
|
558aff390df3425cda0c8755467203edfab7fe27
|
f8558b4fc5e0720ef439da1460d69b5a3c0039fb
|
refs/heads/master
| 2016-08-12T23:52:09.268334
| 2015-12-11T16:03:13
| 2015-12-11T16:03:13
| 47,828,329
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 253
|
py
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "projeto_01.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
[
"gguedes10@outlook.com"
] |
gguedes10@outlook.com
|
8057cdc545d53909b42c18954d86a19430ed7901
|
5867d16cf87b3c38160fc1b71e3147e1c4efa1d4
|
/cities.py
|
d217357680ee43fd5f250465f0f0fbd8018d075a
|
[] |
no_license
|
usr401/persona-generator
|
4d93baf678310240dba3c8efc45368a6f9f8e806
|
5a50e9ca0aa9538cc1a60ec803ee8defc738413e
|
refs/heads/master
| 2022-12-23T19:39:59.426401
| 2020-08-23T09:35:42
| 2020-08-23T09:35:42
| 287,795,826
| 0
| 1
| null | 2020-10-02T01:36:53
| 2020-08-15T17:41:49
|
Python
|
UTF-8
|
Python
| false
| false
| 2,568
|
py
|
city1m = ('boston','new york','new jersey','philadelphia','washington','baltimore','atlanta','miami','chicago','seattle','omaha','raleigh','columbus','jacksonville',
'los angeles','minneapolis','st. louis','portland','san francisco','san jose','oakland','sacramento','san diego','little rock','denver','las vegas','fresno',
'houston','dallas','austin','san antonio','new orleans','kansas city','detroit','milwaukee','cleveland','phoenix','wichita','orlando','salt lake city','anchorage',
'tampa bay','charlotte','indianapolis','memphis','nashville','pittsburgh','tucson','tulsa','cincinnati','norfolk','louisville','oklahoma city','rochester','omaha')
ger_city = ('berlin','hamburg','munich','cologne','frankfurt','stuttgart','Düsseldorf','dortmund','essen','leipzig','bremen','dresden','hanover','Mönchengladbach',
'nuremberg','duisburg','bochum','wuppertal','bielefeld','bonn','Münster','karlsruhe','mannheim','augsburg','wiesbaden','Gelsenkirchen','braunschweig',
'chemnitz','kiel','aachen')
fra_city = ('paris','marseille','lyon','toulouse','nice','nantes','montpellier','strasbourg','bordeaux','lille','rennes','reims','toulon','le havre','grenoble',
'dijon','angers','nimes','Saint-Étienne')
ind_city = ('Mumbai','Delhi','Bangalore','Hyderabad','Ahmedabad','Chennai','Kolkata','Surat','Pune','Jaipur','Lucknow','Kanpur','Nagpur','Indore','Thane','Bhopal',
'Visakhapatnam','Patna','Vadodara','Ghaziabad','Ludhiana','Agra','Nashik','Faridabad','Meerut','Rajkot','Varanasi','Srinagar','Aurangabad','Dhanbad',
'Amritsar','Jabalpur','Vijayawada','Jodhpur','Madurai','Raipur','Kota')
span_city = ('Mexico City','Barcelona','Madrid','Seville','Bogota','lima','bilbao','valencia','santiago','quito','guayaquil','cali','medellin','buenos aires',
'montevideo','zaragoza','Málaga','caracas','baranquilla','Maracaibo','Córdoba','rosario','cartagena','panama city','Guadalajara','Ciudad Juárez',
'tijuana','Monterrey')
rus_city = ('st. petersburg','murmansk','moscow','voronezh','nizhny novogord','kazan','perm','saratov','rostov','krasnodar','sochi','volgograd','samara','ufa',
'yekaterinburg','chelyabinsk','omsk','novosbirsk','krasnoyarsk','khabarovsk','vladivostok','yaroslavl','tomsk','irkutsk','tyumen','tolyatti','izhevsk',
'barnaul','kirov','ulyanovsk','orenburg','ryazan','astrakhan','makhachkala')
a = len(city1m)
b = len(ger_city)
c = len(fra_city)
d = len(ind_city)
e = len(span_city)
f = len(rus_city)
print(a+b+c+d+e+f)
|
[
"l.hall@tutanota.com"
] |
l.hall@tutanota.com
|
516f74e95b919d711026e7833cbd8c43c03c3dcd
|
36f1a2dbaed722cdfdc65c0e7f843c4f06415807
|
/backend/backend/settings.py
|
3d8319ec232ba47a807d5f738bd9a80198ec443b
|
[] |
no_license
|
Hashem-Poya/django-react-todo-app
|
9fbfb91dfa074e9ef62eb6b109800c6fd781e027
|
032d880984f3129d38694aaa5c30f7f861383f6f
|
refs/heads/main
| 2023-06-05T04:36:05.167708
| 2021-06-15T11:59:08
| 2021-06-15T11:59:08
| 368,508,952
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,329
|
py
|
"""
Django settings for backend project.
Generated by 'django-admin startproject' using Django 3.2.3.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = ''
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'corsheaders',
'rest_framework',
'todo'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'corsheaders.middleware.CorsMiddleware'
]
ROOT_URLCONF = 'backend.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'backend.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
CORS_ORIGIN_WHITELIST = [
'http://localhost:3000'
]
|
[
"poya.kpu.2020@gmail.com"
] |
poya.kpu.2020@gmail.com
|
1bb4e6bb9cbf22ae6c4168b5ed9e9b700c6077f4
|
ae7b262ecd72f2fac76c7fe2cff3b8efd7224cb9
|
/Next Permutation.py
|
cdfab34762bb67edb7b6b358af83d64b0b3c6b4a
|
[] |
no_license
|
FengFengHan/LeetCode
|
02a8041f7413b14bed5ac17af1ba68237b159959
|
c2d449f2a93815f31c432805a6b4b8008d09d3df
|
refs/heads/master
| 2021-01-10T18:44:06.374304
| 2016-04-16T13:18:55
| 2016-04-16T13:18:55
| 56,384,615
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,145
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Fri Nov 13 21:46:32 2015
@author: HAN
"""
class Solution(object):
def nextPermutation(self, nums):
"""
:type nums: List[int]
:rtype: void Do not return anything, modify nums in-place instead.
"""
if len(nums) == 0:
return
i = len(nums) - 1
while i > 0:
if nums[i] > nums[i-1]:
break
i -= 1
lastDecPos = i
#列表有序排列
if lastDecPos == 0:
nums.reverse()
return
for j in range(len(nums)-1,lastDecPos-1,-1):
if nums[j] > nums[lastDecPos-1]:
nums[lastDecPos-1],nums[j] = nums[j], nums[lastDecPos-1]
break
self.reverse_(nums,lastDecPos,len(nums)-1)
def reverse_(self,nums,start,end):
mid = (start + end)//2
for i in range(start,mid+ 1):
nums[i], nums[start+end - i] = nums[start+end-i],nums[i]
x = [1,2]
s = Solution()
ans = s.nextPermutation(x)
|
[
"HAN@HandeMacBook-Pro.local"
] |
HAN@HandeMacBook-Pro.local
|
0cf499645379170f62ded149ef94469fe24625b9
|
51bd78dadd37fe06c5b933c60b86d19f335adc73
|
/submissions/5748bc2b63905b3a11d97cca/src/grabber.py
|
d0931a7d36c7f5233e45523da4095101e2cdafdc
|
[] |
no_license
|
jsdelivrbot/challenge_word_classifier
|
f78202f3d91b2f69f4daca3ef6917970d9567b18
|
cf80e6489c5310cdc90b25312603d6d47bf8c4dd
|
refs/heads/master
| 2020-04-10T06:11:10.736995
| 2018-12-07T16:22:13
| 2018-12-07T16:22:13
| 160,847,871
| 0
| 0
| null | 2018-12-07T16:22:24
| 2018-12-07T16:22:23
| null |
UTF-8
|
Python
| false
| false
| 1,354
|
py
|
import urllib
import sys
import json
url = 'https://hola.org/challenges/word_classifier/testcase'
def Usage():
print 'Usage', sys.argv[0], '-i/-o', 'seedfile', '[n]'
def GrabData(f, seed):
obj = json.load(f)
fout = open('data/' + str(seed) + '.csv', 'w')
for k in obj:
fout.write(k + ',' + str(int(obj[k])) + '\n')
def GetSeed():
f = urllib.urlopen(url)
seed = int(f.geturl().split('/')[-1])
GrabData(f, seed)
f.close()
return seed
def Grab(seed):
f = urllib.urlopen(url + '/' + str(seed))
GrabData(f, seed)
f.close()
def main():
if len(sys.argv) < 3 or len(sys.argv) > 4:
Usage()
return
if sys.argv[1] == '-i':
cnt = 0
for line in open(sys.argv[2]):
cnt += 1
cur = 0
for line in open(sys.argv[2]):
print cur, '/', cnt
Grab(int(line))
cur += 1
print cnt, '/', cnt
elif sys.argv[1] == '-o':
try:
cnt = int(sys.argv[3])
except:
Usage()
return
with open(sys.argv[2], 'w') as f:
for i in xrange(cnt):
print i, '/', cnt
seed = GetSeed()
f.write(str(seed) + '\n')
print cnt, '/', cnt
else:
Usage()
if __name__ == '__main__':
main()
|
[
"alexey@feldgendler.ru"
] |
alexey@feldgendler.ru
|
eb7d06aec0d93bef3f8423c94ae2c2c976d2796d
|
f0d713996eb095bcdc701f3fab0a8110b8541cbb
|
/Fe6wvtjcNFwuANuLu_19.py
|
ea0dfdcba502e446d75aff8a616b5d1dcaafd878
|
[] |
no_license
|
daniel-reich/turbo-robot
|
feda6c0523bb83ab8954b6d06302bfec5b16ebdf
|
a7a25c63097674c0a81675eed7e6b763785f1c41
|
refs/heads/main
| 2023-03-26T01:55:14.210264
| 2021-03-23T16:08:01
| 2021-03-23T16:08:01
| 350,773,815
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,000
|
py
|
"""
A game of table tennis almost always sounds like _Ping!_ followed by _Pong!_
Therefore, you know that Player 2 has won if you hear _Pong!_ as the last
sound (since Player 1 didn't return the ball back).
Given a list of _Ping!_ , create a function that inserts _Pong!_ in between
each element. Also:
* If `win` equals `True`, end the list with _Pong!_.
* If `win` equals `False`, end with _Ping!_ instead.
### Examples
ping_pong(["Ping!"], True) ➞ ["Ping!", "Pong!"]
ping_pong(["Ping!", "Ping!"], False) ➞ ["Ping!", "Pong!", "Ping!"]
ping_pong(["Ping!", "Ping!", "Ping!"], True) ➞ ["Ping!", "Pong!", "Ping!", "Pong!", "Ping!", "Pong!"]
### Notes
* You will always return the ball (i.e. the Pongs are yours).
* Player 1 serves the ball and makes _Ping!_.
* Return a list of strings.
"""
def ping_pong(lst, win):
i = 0
while i < len(lst):
if lst[i] == "Ping!": lst.insert(i+1,"Pong!")
i += 1
if win == False: lst.pop()
return lst
|
[
"daniel.reich@danielreichs-MacBook-Pro.local"
] |
daniel.reich@danielreichs-MacBook-Pro.local
|
fe1204a3a03aa0707af12ab325f65c25e63bdf13
|
b394bb6bd3e8848688b525f55e82962f152c1bb3
|
/demos/upload/error_and_fp/Density of Floating Point Numbers.py
|
c901d4a9ab13acedb1a5355a3a4efcacb6c7c8ee
|
[] |
no_license
|
lukeolson/cs450-f20-demos
|
02c2431d7696348cf9ca1ab67bdd5c44a97ac38b
|
040e7dfa15c68f7f426cf69655cb600926f9f626
|
refs/heads/master
| 2023-01-22T19:12:33.394521
| 2020-12-03T19:48:18
| 2020-12-03T19:48:18
| 288,542,898
| 5
| 10
| null | 2020-10-05T19:39:07
| 2020-08-18T19:13:52
| null |
UTF-8
|
Python
| false
| false
| 762
|
py
|
#!/usr/bin/env python
# coding: utf-8
# # Density of Floating Point Numbers
#
# This notebook enumerates all possible floating point nubmers in a floating point system and shows them in a plot to illustrate their density.
# In[ ]:
import matplotlib.pyplot as pt
import numpy as np
# In[ ]:
significand_bits = 4
exponent_min = -3
exponent_max = 4
fp_numbers = []
for exp in range(exponent_min, exponent_max+1):
for sbits in range(0, 2**significand_bits):
significand = 1 + sbits/2**significand_bits
fp_numbers.append(significand * 2**exp)
fp_numbers = np.array(fp_numbers)
print(fp_numbers)
pt.plot(fp_numbers, np.ones_like(fp_numbers), "+")
#pt.semilogx(fp_numbers, np.ones_like(fp_numbers), "+")
# In[ ]:
|
[
"luke.olson@gmail.com"
] |
luke.olson@gmail.com
|
20c8f218f1668b24be6542df295e7cb324172941
|
08a0ef9d837c1e38facedd1b4e0e79185d607d68
|
/linux_system_inspector.py
|
2e4ec93479d37f2ef6a3b2dc028499cc8419c873
|
[] |
no_license
|
Yuweiluo1618/Python-Advanced-Project
|
2aa7888a81280366a17d6a75981379e87792dfd0
|
a372307a225af67f4f29063bca0c9daa19757053
|
refs/heads/main
| 2023-02-12T02:45:40.013414
| 2021-01-13T09:58:49
| 2021-01-13T09:58:49
| 309,018,737
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 903
|
py
|
#import the module
import psutil
import datetime
#CPU usage info
cpu_per = psutil.cpu_percent(interval=0.5)
#Memory info
memory_info = psutil.virtual_memory()
#hard disk info
hard_disk_info = psutil.disk_usage('/')
#Network info
net_info = psutil.net_io_counters()
#obtain system time
current_time = datetime.datetime.now().strftime("%F %T")
#print info
log_str = f'\t\tTime \t\t\t\t CPU info (core:{psutil.cpu_count(logical=False)}) \t\t\t\t Memory info (total:{memory_info.total/1024/1024/1024}G) \t\t\t\t Hard disk info (total: {hard_disk_info.total/1024/1024/1024}G) \t\t\t\t Networl info \n'
log_str += f'{current_time} \t\t\t\t{cpu_per}% \t\t\t\t\t\t\t\t {memory_info.percent}% \t\t\t\t\t\t\t\t\t\t\t\t\t\t\t {hard_disk_info.percent}% t\t\t\t\t\t Rec: {net_info.bytes_recv} / Send: {net_info.bytes_sent}'
print(log_str)
#save info to file
f = open('log.txt', 'a')
f.write(log_str)
f.close()
|
[
"yuwei.luo@alumni.ubc.ca"
] |
yuwei.luo@alumni.ubc.ca
|
e1ae328c44bfc6fb4a8b5699f3b23faaa960937e
|
182589e45edfc256de5fab9a44edf09a5cbc5dae
|
/car_manager.py
|
9a3476304521f6e56f87718d17115b980f9ef700
|
[] |
no_license
|
DEEPANSHURAJORA/Turtle_game
|
26367a0dc417f23fd372c19980a1f86bccb2bd03
|
e66810e84ddf9ed2b48bcd5c421f0066c7bb9b30
|
refs/heads/main
| 2023-07-28T18:35:43.288390
| 2021-09-09T08:57:09
| 2021-09-09T08:57:09
| 404,652,279
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 845
|
py
|
from turtle import Turtle
import random
COLORS = ["red", "orange", "yellow", "green", "blue", "purple"]
STARTING_MOVE_DISTANCE = 5
MOVE_INCREMENT = 10
class Carmanager:
def __init__(self):
self.all_cars = []
self.car_speed = STARTING_MOVE_DISTANCE
def create_cars(self):
random_chance = random.randint(1,6)
if random_chance ==1:
new_Car = Turtle("square")
new_Car.shapesize(stretch_len=2,stretch_wid=1)
new_Car.penup()
new_Car.color(random.choice(COLORS))
random_y = random.randint(-250,250)
new_Car.goto(300,random_y)
self.all_cars.append(new_Car)
def move_car(self):
for car in self.all_cars:
car.backward(STARTING_MOVE_DISTANCE)
def level (self):
self.car_speed += MOVE_INCREMENT
|
[
"noreply@github.com"
] |
DEEPANSHURAJORA.noreply@github.com
|
7cccffff6414332630370bcd48fd3fa5b869cc08
|
3f6108f545068d106d895b5ee08619a2cae3b238
|
/manage.py
|
60adfb0dd63f25f5aa5a586d077508f23ffc7b5b
|
[] |
no_license
|
PPPW/stock-portfolio-optimizer
|
c91c4dc6197e7c0c4acb6f6b00661781fb2f4fde
|
c4429b3ae499eae4ff4ee12df8e33d998ee99405
|
refs/heads/master
| 2022-12-09T01:54:55.383252
| 2019-10-22T01:34:22
| 2019-10-22T01:34:22
| 146,462,834
| 1
| 0
| null | 2022-12-08T02:47:43
| 2018-08-28T14:50:13
|
Vue
|
UTF-8
|
Python
| false
| false
| 445
|
py
|
from flask_script import Manager
#from flask_migrate import Migrate, MigrateCommand
from app import create_app
app = create_app('development')
#migrate = Migrate(app, db)
manager = Manager(app)
#manager.add_command('db', MigrateCommand)
@manager.command
def test():
import unittest
tests = unittest.TestLoader().discover('tests')
unittest.TextTestRunner(verbosity=2).run(tests)
if __name__ == '__main__':
manager.run()
|
[
"pei.wang.fudan@gmail.com"
] |
pei.wang.fudan@gmail.com
|
77531e6278a2da0e7e47e3a1e00b32e9cbe5e653
|
63cb74c1e014fd9ec7e6c3a701e11ff47e53cc50
|
/redesocial/connectedin/connectedin/asgi.py
|
449329060784c3f16c49d3aeff606e4c56509891
|
[] |
no_license
|
FelipeAlvesDeSouza/django_guilu_
|
d8d578892ddad9bd4281e14b94cd8ee4f546aaa8
|
ead744fd8bef33e7f6f16c34da0c8c6e066d1f00
|
refs/heads/master
| 2022-06-23T16:06:00.466276
| 2020-03-18T20:24:50
| 2020-03-18T20:24:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 399
|
py
|
"""
ASGI config for connectedin project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'connectedin.settings')
application = get_asgi_application()
|
[
"felipealvess_hpg_@hotmail.comgit config --global user.name F3lipefifa14"
] |
felipealvess_hpg_@hotmail.comgit config --global user.name F3lipefifa14
|
94fa8f70532d78f37dd0a5165bf490e8c88f8571
|
bc8d3c824eaa902f7a38f4467404bad7472c013a
|
/WordEmbed/seq2seq/seq2seq_translation.py
|
9b8f260aaa52930aea4b676eb469d1df49e56711
|
[] |
no_license
|
gaoming96/TorchFlow
|
217389dc71e94bc2f8276c0a587584051ddc837d
|
3dc5d6e97cd9b5ef8ffe2ace17af83f907522b36
|
refs/heads/master
| 2020-04-10T11:45:55.492552
| 2019-01-18T05:34:58
| 2019-01-18T05:34:58
| 161,002,084
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 16,505
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals, print_function, division
from io import open
import unicodedata
import re
import random
import torch
import torch.nn as nn
from torch import optim
import torch.nn.functional as F
#device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
device=torch.device('cpu')
######################################################################
SOS_token = 0
EOS_token = 1
class Lang:
def __init__(self, name):
self.name = name
self.word2index = {}
self.word2count = {}
self.index2word = {0: "SOS", 1: "EOS"}
self.n_words = 2 # Count SOS and EOS
def addSentence(self, sentence):
for word in sentence.split(' '):
self.addWord(word)
def addWord(self, word):
if word not in self.word2index:
self.word2index[word] = self.n_words
self.word2count[word] = 1
self.index2word[self.n_words] = word
self.n_words += 1
else:
self.word2count[word] += 1
def unicodeToAscii(s):
return ''.join(
c for c in unicodedata.normalize('NFD', s)
if unicodedata.category(c) != 'Mn'
)
# Lowercase, trim, and remove non-letter characters
def normalizeString(s):
s = unicodeToAscii(s.lower().strip())
s = re.sub(r"([.!?])", r" \1", s)
s = re.sub(r"[^a-zA-Z.!?]+", r" ", s)
return s
######################################################################
# To read the data file we will split the file into lines, and then split
# lines into pairs. The files are all English → Other Language, so if we
# want to translate from Other Language → English I added the ``reverse``
# flag to reverse the pairs.
#
def readLangs(lang1, lang2, reverse=False):
print("Reading lines...")
# Read the file and split into lines
lines = open('data/%s-%s.txt' % (lang1, lang2), encoding='utf-8').\
read().strip().split('\n')
# Split every line into pairs and normalize
pairs = [[normalizeString(s) for s in l.split('\t')] for l in lines]
# Reverse pairs, make Lang instances
if reverse:
pairs = [list(reversed(p)) for p in pairs]
input_lang = Lang(lang2)
output_lang = Lang(lang1)
else:
input_lang = Lang(lang1)
output_lang = Lang(lang2)
return input_lang, output_lang, pairs
######################################################################
# Since there are a *lot* of example sentences and we want to train
# something quickly, we'll trim the data set to only relatively short and
# simple sentences. Here the maximum length is 10 words (that includes
# ending punctuation) and we're filtering to sentences that translate to
# the form "I am" or "He is" etc. (accounting for apostrophes replaced
# earlier).
#
MAX_LENGTH = 10
eng_prefixes = (
"i am ", "i m ",
"he is", "he s ",
"she is", "she s",
"you are", "you re ",
"we are", "we re ",
"they are", "they re "
)
def filterPair(p):
return len(p[0].split(' ')) < MAX_LENGTH and \
len(p[1].split(' ')) < MAX_LENGTH and \
p[1].startswith(eng_prefixes)
def filterPairs(pairs):
return [pair for pair in pairs if filterPair(pair)]
######################################################################
# The full process for preparing the data is:
#
# - Read text file and split into lines, split lines into pairs
# - Normalize text, filter by length and content
# - Make word lists from sentences in pairs
#
def prepareData(lang1, lang2, reverse=False):
input_lang, output_lang, pairs = readLangs(lang1, lang2, reverse)
print("Read %s sentence pairs" % len(pairs))
pairs = filterPairs(pairs)
print("Trimmed to %s sentence pairs" % len(pairs))
print("Counting words...")
for pair in pairs:
input_lang.addSentence(pair[0])
output_lang.addSentence(pair[1])
print("Counted words:")
print(input_lang.name, input_lang.n_words)
print(output_lang.name, output_lang.n_words)
return input_lang, output_lang, pairs
input_lang, output_lang, pairs = prepareData('eng', 'fra', True)
print(random.choice(pairs))
######################################################################
# Preparing Training Data
def indexesFromSentence(lang, sentence):
return [lang.word2index[word] for word in sentence.split(' ')]
def tensorFromSentence(lang, sentence):
indexes = indexesFromSentence(lang, sentence)
indexes.append(EOS_token)
return torch.tensor(indexes, dtype=torch.long, device=device).view(-1, 1)
def tensorsFromPair(pair):
input_tensor = tensorFromSentence(input_lang, pair[0])
target_tensor = tensorFromSentence(output_lang, pair[1])
return (input_tensor, target_tensor)
# test
pairs[100]
a,b=tensorsFromPair(pairs[100])
a.size()
b.size()
a
b
######################################################################
# The Encoder
class EncoderRNN(nn.Module):
def __init__(self, input_size, hidden_size):
super(EncoderRNN, self).__init__()
self.hidden_size = hidden_size
self.embedding = nn.Embedding(input_size, hidden_size)
self.gru = nn.GRU(hidden_size, hidden_size)
def forward(self, input, hidden):
embedded = self.embedding(input).view(-1, 1, 256)
output = embedded
output, hidden = self.gru(output, hidden)
return output, hidden
def initHidden(self):
return torch.zeros(1, 1, self.hidden_size, device=device)
#test
h=EncoderRNN(4489,256).initHidden()
a=torch.tensor([[ 6],
[88],
[ 5],
[ 1]])
a
o,s=EncoderRNN(4489,256)(a[1],h)
o.size()
s.size()
o[0,0].size()
o,s=EncoderRNN(4489,256)(a.view(-1),h)
o.size()
######################################################################
# Simple Decoder
class DecoderRNN(nn.Module):
def __init__(self, hidden_size, output_size):
super(DecoderRNN, self).__init__()
self.hidden_size = hidden_size
self.embedding = nn.Embedding(output_size, hidden_size)
self.gru = nn.GRU(hidden_size, hidden_size)
self.out = nn.Linear(hidden_size, output_size)
self.softmax = nn.LogSoftmax(dim=1)
def forward(self, input, hidden):
output = self.embedding(input).view(1, 1, -1)
output = F.relu(output)
output, hidden = self.gru(output, hidden)
output = self.softmax(self.out(output[0]))
return output, hidden
def initHidden(self):
return torch.zeros(1, 1, self.hidden_size, device=device)
######################################################################
# Attention Decoder
class AttnDecoderRNN(nn.Module):
def __init__(self, hidden_size, output_size, dropout_p=0.1, max_length=MAX_LENGTH):
super(AttnDecoderRNN, self).__init__()
self.hidden_size = hidden_size
self.output_size = output_size
self.dropout_p = dropout_p
self.max_length = max_length
self.embedding = nn.Embedding(self.output_size, self.hidden_size)
self.attn = nn.Linear(self.hidden_size * 2, self.max_length)
self.attn_combine = nn.Linear(self.hidden_size * 2, self.hidden_size)
self.dropout = nn.Dropout(self.dropout_p)
self.gru = nn.GRU(self.hidden_size, self.hidden_size)
self.out = nn.Linear(self.hidden_size, self.output_size)
def forward(self, input, hidden, encoder_outputs):
embedded = self.embedding(input).view(1, 1, -1)
embedded = self.dropout(embedded)
attn_weights = F.softmax(
self.attn(torch.cat((embedded[0], hidden[0]), 1)), dim=1)
attn_applied = torch.bmm(attn_weights.unsqueeze(0),
encoder_outputs.unsqueeze(0))
output = torch.cat((embedded[0], attn_applied[0]), 1)
output = self.attn_combine(output).unsqueeze(0)
output = F.relu(output)
output, hidden = self.gru(output, hidden)
output = F.log_softmax(self.out(output[0]), dim=1)
return output, hidden, attn_weights
def initHidden(self):
return torch.zeros(1, 1, self.hidden_size, device=device)
######################################################################
# Training the Model
teacher_forcing_ratio = 0.5
def train(input_tensor, target_tensor, encoder, decoder, encoder_optimizer, decoder_optimizer, criterion, max_length=MAX_LENGTH):
encoder_hidden = encoder.initHidden()
encoder_optimizer.zero_grad()
decoder_optimizer.zero_grad()
input_length = input_tensor.size(0)
target_length = target_tensor.size(0)
encoder_outputs = torch.zeros(max_length, encoder.hidden_size, device=device)
loss = 0
for ei in range(input_length):
encoder_output, encoder_hidden = encoder(
input_tensor[ei], encoder_hidden)
encoder_outputs[ei] = encoder_output[0, 0]
decoder_input = torch.tensor([[SOS_token]], device=device)
decoder_hidden = encoder_hidden
use_teacher_forcing = True if random.random() < teacher_forcing_ratio else False
if use_teacher_forcing:
# Teacher forcing: Feed the target as the next input
for di in range(target_length):
decoder_output, decoder_hidden, decoder_attention = decoder(
decoder_input, decoder_hidden, encoder_outputs)
loss += criterion(decoder_output, target_tensor[di])
decoder_input = target_tensor[di] # Teacher forcing
else:
# Without teacher forcing: use its own predictions as the next input
for di in range(target_length):
decoder_output, decoder_hidden, decoder_attention = decoder(
decoder_input, decoder_hidden, encoder_outputs)
topv, topi = decoder_output.topk(1)
decoder_input = topi.squeeze().detach() # detach from history as input
loss += criterion(decoder_output, target_tensor[di])
if decoder_input.item() == EOS_token:
break
loss.backward()
encoder_optimizer.step()
decoder_optimizer.step()
return loss.item() / target_length
######################################################################
# This is a helper function to print time elapsed and estimated time
# remaining given the current time and progress %.
#
import time
import math
def asMinutes(s):
m = math.floor(s / 60)
s -= m * 60
return '%dm %ds' % (m, s)
def timeSince(since, percent):
now = time.time()
s = now - since
es = s / (percent)
rs = es - s
return '%s (- %s)' % (asMinutes(s), asMinutes(rs))
######################################################################
def trainIters(encoder, decoder, n_iters, print_every=1000, plot_every=100, learning_rate=0.01):
start = time.time()
plot_losses = []
print_loss_total = 0 # Reset every print_every
plot_loss_total = 0 # Reset every plot_every
encoder_optimizer = optim.SGD(encoder.parameters(), lr=learning_rate)
decoder_optimizer = optim.SGD(decoder.parameters(), lr=learning_rate)
training_pairs = [tensorsFromPair(random.choice(pairs))
for i in range(n_iters)]
criterion = nn.NLLLoss()
for iter in range(1, n_iters + 1):
training_pair = training_pairs[iter - 1]
input_tensor = training_pair[0]
target_tensor = training_pair[1]
loss = train(input_tensor, target_tensor, encoder,
decoder, encoder_optimizer, decoder_optimizer, criterion)
print_loss_total += loss
plot_loss_total += loss
if iter % print_every == 0:
print_loss_avg = print_loss_total / print_every
print_loss_total = 0
print('%s (%d %d%%) %.4f' % (timeSince(start, iter / n_iters),
iter, iter / n_iters * 100, print_loss_avg))
if iter % plot_every == 0:
plot_loss_avg = plot_loss_total / plot_every
plot_losses.append(plot_loss_avg)
plot_loss_total = 0
showPlot(plot_losses)
######################################################################
# Plotting results
# ----------------
#
# Plotting is done with matplotlib, using the array of loss values
# ``plot_losses`` saved while training.
#
import matplotlib.pyplot as plt
plt.switch_backend('agg')
import matplotlib.ticker as ticker
import numpy as np
def showPlot(points):
plt.figure()
fig, ax = plt.subplots()
# this locator puts ticks at regular intervals
loc = ticker.MultipleLocator(base=0.2)
ax.yaxis.set_major_locator(loc)
plt.plot(points)
######################################################################
# Evaluation
def evaluate(encoder, decoder, sentence, max_length=MAX_LENGTH):
with torch.no_grad():
input_tensor = tensorFromSentence(input_lang, sentence)
input_length = input_tensor.size()[0]
encoder_hidden = encoder.initHidden()
encoder_outputs = torch.zeros(max_length, encoder.hidden_size, device=device)
for ei in range(input_length):
encoder_output, encoder_hidden = encoder(input_tensor[ei],
encoder_hidden)
encoder_outputs[ei] += encoder_output[0, 0]
decoder_input = torch.tensor([[SOS_token]], device=device) # SOS
decoder_hidden = encoder_hidden
decoded_words = []
decoder_attentions = torch.zeros(max_length, max_length)
for di in range(max_length):
decoder_output, decoder_hidden, decoder_attention = decoder(
decoder_input, decoder_hidden, encoder_outputs)
decoder_attentions[di] = decoder_attention.data
topv, topi = decoder_output.data.topk(1)
if topi.item() == EOS_token:
decoded_words.append('<EOS>')
break
else:
decoded_words.append(output_lang.index2word[topi.item()])
decoder_input = topi.squeeze().detach()
return decoded_words, decoder_attentions[:di + 1]
######################################################################
# We can evaluate random sentences from the training set and print out the
# input, target, and output to make some subjective quality judgements:
#
def evaluateRandomly(encoder, decoder, n=10):
for i in range(n):
pair = random.choice(pairs)
print('>', pair[0])
print('=', pair[1])
output_words, attentions = evaluate(encoder, decoder, pair[0])
output_sentence = ' '.join(output_words)
print('<', output_sentence)
print('')
######################################################################
# Training and Evaluating
hidden_size = 256
encoder1 = EncoderRNN(input_lang.n_words, hidden_size).to(device)
attn_decoder1 = AttnDecoderRNN(hidden_size, output_lang.n_words, dropout_p=0.1).to(device)
trainIters(encoder1, attn_decoder1, 75000, print_every=5000)
evaluateRandomly(encoder1, attn_decoder1)
######################################################################
# Visualizing Attention
output_words, attentions = evaluate(
encoder1, attn_decoder1, "je suis trop froid .")
plt.matshow(attentions.numpy())
######################################################################
# For a better viewing experience we will do the extra work of adding axes
# and labels:
#
def showAttention(input_sentence, output_words, attentions):
# Set up figure with colorbar
fig = plt.figure()
ax = fig.add_subplot(111)
cax = ax.matshow(attentions.numpy(), cmap='bone')
fig.colorbar(cax)
# Set up axes
ax.set_xticklabels([''] + input_sentence.split(' ') +
['<EOS>'], rotation=90)
ax.set_yticklabels([''] + output_words)
# Show label at every tick
ax.xaxis.set_major_locator(ticker.MultipleLocator(1))
ax.yaxis.set_major_locator(ticker.MultipleLocator(1))
plt.show()
def evaluateAndShowAttention(input_sentence):
output_words, attentions = evaluate(
encoder1, attn_decoder1, input_sentence)
print('input =', input_sentence)
print('output =', ' '.join(output_words))
showAttention(input_sentence, output_words, attentions)
evaluateAndShowAttention("elle a cinq ans de moins que moi .")
evaluateAndShowAttention("elle est trop petit .")
evaluateAndShowAttention("je ne crains pas de mourir .")
evaluateAndShowAttention("c est un jeune directeur plein de talent .")
|
[
"noreply@github.com"
] |
gaoming96.noreply@github.com
|
a73a244ef78780197126b2f8728cd1b97833a737
|
611969b23cce6d1ab047474c5367f88e4edf8948
|
/Switch Sort.py
|
cb346335ef27085b49d7d532e3e0fd8fccb797ad
|
[] |
no_license
|
shilpipriya/coderbyte-codes
|
ca647fdf9ae5cdd410ecf05f97d6b575de25cf5e
|
4f48d6deda62c84bb9ddff59f1bfc77403ffddd6
|
refs/heads/master
| 2022-03-04T08:57:20.373126
| 2019-10-22T05:36:54
| 2019-10-22T05:36:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 373
|
py
|
def SwitchSort(arr):
swap = 0
visited = [False for i in range(len(arr))]
for i in range(len(arr)):
j = i
counter = 0
while not visited[j]:
visited[j] = True
j = arr[j]-1
counter= counter+1
if counter!= 0:
swap=swap+(counter-1)
return swap
# keep this function call here
print SwitchSort(raw_input())
|
[
"noreply@github.com"
] |
shilpipriya.noreply@github.com
|
ba31670426e646c45bbfebf7c72dcd32b857b4e8
|
2e682fd72e3feaa70e3f7bf2a3b83c50d783ec02
|
/PyTorch/built-in/cv/classification/Resnet50_Cifar_for_PyTorch/configs/repmlp/repmlp-base_delopy_8xb64_in1k.py
|
8e11dcd2599322a1402464a2cd48c892e47c3243
|
[
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference",
"GPL-1.0-or-later",
"LicenseRef-scancode-warranty-disclaimer"
] |
permissive
|
Ascend/ModelZoo-PyTorch
|
4c89414b9e2582cef9926d4670108a090c839d2d
|
92acc188d3a0f634de58463b6676e70df83ef808
|
refs/heads/master
| 2023-07-19T12:40:00.512853
| 2023-07-17T02:48:18
| 2023-07-17T02:48:18
| 483,502,469
| 23
| 6
|
Apache-2.0
| 2022-10-15T09:29:12
| 2022-04-20T04:11:18
|
Python
|
UTF-8
|
Python
| false
| false
| 667
|
py
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the BSD 3-Clause License (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://opensource.org/licenses/BSD-3-Clause
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
_base_ = ['./repmlp-base_8xb64_in1k.py']
model = dict(backbone=dict(deploy=True))
|
[
"wangjiangben@huawei.com"
] |
wangjiangben@huawei.com
|
8a12d754e6e3a5e1e5395d8edfd3c7272d264c6f
|
ac6e8799d1051c90bbba6275751b0cf096db2907
|
/Practise_beginner/exp_notification_alert.py
|
afaeea4ea6fd5645f41dda2b6237c525e47c3855
|
[] |
no_license
|
Himanshu-jn20/PythonNPysparkPractice
|
777e9e01df201cb8481a3dc5e1f27a406022be27
|
5ababff00089b8f432c4a28e1277e89009f7897f
|
refs/heads/main
| 2023-04-18T05:31:47.448014
| 2021-03-30T18:14:15
| 2021-03-30T18:16:39
| 348,235,570
| 0
| 0
| null | 2021-03-21T18:52:06
| 2021-03-16T06:17:40
|
Python
|
UTF-8
|
Python
| false
| false
| 764
|
py
|
import math
nd=list(map(int, input().rstrip().split()))
n,d = nd
med=d/2
mod=d%2
med_val=math.floor(med)
exp_main=list(map(int, input().rstrip().split()))
exp=exp_main[0:d]
exp.sort()
notification=0
for i in range(n-d):
if i>0:
exp.pop(0)
k=0
for j in range(d-1):
if exp_main[d+i-1] < exp[j]:
exp.insert(j,exp_main[d+i-1])
k=1
break
if k == 0:
exp.append(exp_main[d+i-1])
if mod != 0:
lim=2 * exp[med_val]
else:
lim=2 * (exp[med_val-1] + exp[med_val])/2
if int(exp_main[d+i]) >= lim:
notification=notification+1
print(notification)
|
[
"himanshu.jn20@gmail.com"
] |
himanshu.jn20@gmail.com
|
440ba0371a59b674d7017be9dd26afef0c14b450
|
597c6b8af2a674d927ff3b4f4958e1dc435b1634
|
/gem/embedding/node2vec.py
|
5b844d5596f8fbbdd55f49e7ced351d90a2a445e
|
[] |
no_license
|
THUfl12/NETest
|
4ec37a227ea668bc23b10657a41bda696fe5cdf0
|
227e803308a3c67b8cfa79b9ded7c57453c3a4a9
|
refs/heads/master
| 2021-09-18T20:27:48.967961
| 2018-07-19T11:39:18
| 2018-07-19T11:39:18
| 104,615,266
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,151
|
py
|
disp_avlbl = True
from os import environ
if 'DISPLAY' not in environ:
disp_avlbl = False
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
import networkx as nx
import numpy as np
import scipy.io as sio
import scipy.sparse as sp
import scipy.sparse.linalg as lg
from time import time
import sys
# sys.path.append('./')
from subprocess import call
from static_graph_embedding import StaticGraphEmbedding
from gem.utils import graph_util, plot_util
from gem.evaluation import visualize_embedding as viz
class node2vec(StaticGraphEmbedding):
def __init__(self, d, max_iter, wLen, nWalks, cSize, ret_p, inout_p):
self._d = d
self._max_iter = max_iter
self._walkLength = wLen
self._numWalks = nWalks
self._contextSize = cSize
self._return_p = ret_p
self._inout_p = inout_p
self._method_name = 'node2vec_rw'
self._X = None
def get_method_name(self):
return self._method_name
def get_method_summary(self):
return '%s_%d' % (self._method_name, self._d)
def learn_embedding(self, graph=None, edge_f=None, is_weighted=False, no_python=False):
args = ["./node2vec"]
if not graph and not edge_f:
raise Exception('graph/edge_f needed')
if edge_f:
graph = graph_util.loadGraphFromEdgeListTxt(edge_f)
graph_util.saveGraphToEdgeListTxtn2v(graph, 'tempGraph.graph')
args.append("-i:tempGraph.graph")
args.append("-o:tempGraph.emb")
args.append("-d:%d" % self._d)
args.append("-l:%d" % self._walkLength)
args.append("-r:%d" % self._numWalks)
args.append("-k:%d" % self._contextSize)
args.append("-e:%d" % self._max_iter)
args.append("-p:%f" % self._return_p)
args.append("-q:%f" % self._inout_p)
args.append("-v")
args.append("-dr")
args.append("-w")
t1 = time()
try:
call(args)
except:
raise Exception('./node2vec not found. Please compile snap, place node2vec in the path and grant executable permission')
self._X = graph_util.loadEmbedding('tempGraph.emb')
t2 = time()
return self._X, (t2-t1)
def get_embedding(self):
return self._X
def get_edge_weight(self, i, j):
return np.dot(self._X[i, :], self._X[j, :])
def get_reconstructed_adj(self, X=None, node_l=None):
if X is not None:
node_num = X.shape[0]
self._X = X
else:
node_num = self._node_num
adj_mtx_r = np.zeros((node_num, node_num)) # G_r is the reconstructed graph
for v_i in range(node_num):
for v_j in range(node_num):
if v_i == v_j:
continue
adj_mtx_r[v_i, v_j] = self.get_edge_weight(v_i, v_j)
return adj_mtx_r
if __name__ == '__main__':
# load Zachary's Karate graph
edge_f = '../data/karate.edgelist'
G = graph_util.loadGraphFromEdgeListTxt(edge_f, directed=False)
G = G.to_directed()
res_pre = 'results/testKarate'
print 'Num nodes: %d, num edges: %d' % (G.number_of_nodes(), G.number_of_edges())
t1 = time()
embedding = node2vec(2, 1, 80, 10, 10, 1, 1)
embedding.learn_embedding(graph=G, edge_f=None, is_weighted=True, no_python=True)
print 'node2vec:\n\tTraining time: %f' % (time() - t1)
viz.plot_embedding2D(embedding.get_embedding(), di_graph=G, node_colors=None)
plt.show()
|
[
"1027555983@qq.com"
] |
1027555983@qq.com
|
d74d89c2f1de124ab345feb3df4608f09f295c99
|
27b8eba8387d741156c3c15d1534160da34b484c
|
/fqdemo/server/server/deviceServer.py
|
9d7156a79afd80bc7f521d8faf1c799eda30cb10
|
[] |
no_license
|
Rose-Hulman-ROBO4xx/1415-Realbotics
|
a042d6b1c811bd09d4ebc957b382f1f61352cc2d
|
7593c9ebaa9304f9e0d3d89a4626468a763c6999
|
refs/heads/master
| 2020-12-24T14:26:56.015056
| 2015-05-13T22:01:00
| 2015-05-13T22:01:00
| 24,424,878
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,602
|
py
|
from wsgiref.simple_server import make_server
from ws4py.websocket import WebSocket
from ws4py.server.wsgirefserver import WSGIServer, WebSocketWSGIRequestHandler
from ws4py.server.wsgiutils import WebSocketWSGIApplication
import re
import threading
import time
Auth = {'testclient': 'testclienttoken', 'testclient2': 'testclienttoken'}
class CommandHistory:
def __init__(self):
self.lock = threading.Lock()
self.items = list()
self.maxSize = 50
def register(self, sender, target, message):
string = '[' + str(int(time.time())) + '] ' + str(sender) + ' -> ' + str(target) + ' : ' + message
self.lock.acquire()
while(len(self.items) > self.maxSize):
self.items.pop(0)
self.items.append(string)
self.lock.release()
def get(self):
self.lock.acquire()
copy = list(self.items)
self.lock.release()
return copy
class DeviceServer:
def __init__(self):
print('Device server starting')
self.clients = dict()
self.history = CommandHistory()
class MockSocket(WebSocket):
def received_message(self2, message):
try:
parts = re.match(r'^(\w*) (.*)$', message.data)
messageType = parts.group(1)
messageRest = parts.group(2)
except Exception as e:
self2.close(1008, 'invalid message: ' + message.data)
return
if(messageType == 'identity'):
self2.identity = messageRest
return self2.register()
elif(messageType == 'token'):
self2.token = messageRest
return self2.register()
else:
self2.close(1002, 'invalid message type ' + messageType)
return
def opened(self2):
self2.identity = None
self2.token = None
self2.registered = False
print('Client connected from ' + str(self2.peer_address))
def closed(self2, code, reason=None):
if self2.registered and self2.identity in self.clients:
del self.clients[self2.identity]
print('Client disconnected, code ' + str(code) + ', reason ' + str(reason))
def register(self2):
if self2.identity == None:
return
if self2.token == None:
return
if not(self2.identity in Auth):
return self2.close(1008, 'No such identity')
if self2.identity in self.clients:
return self2.close(1008, 'That client is already logged in')
if Auth[self2.identity] != self2.token:
return self2.close(1008, 'Invalid token')
self2.registered = True
self.clients[self2.identity] = self2
print('Client ' + self2.identity + ' ready')
self.server = make_server('', 5581, server_class=WSGIServer,
handler_class=WebSocketWSGIRequestHandler,
app = WebSocketWSGIApplication(handler_cls=MockSocket))
self.server.initialize_websockets_manager()
def runner():
try:
self.server.serve_forever()
except KeyboardInterrupt:
return
self.runThread = threading.Thread(target=runner)
self.runThread.start()
server = DeviceServer()
|
[
"dillonscroggin@gmail.com"
] |
dillonscroggin@gmail.com
|
1daf83bbf35823c60ee20b9cc222434d25f9ad0c
|
f5050aa0dd8971c25017473d2b540c0b0233b4d5
|
/lec12/p7.py
|
ef0fa94f6f623423cd4b2ec2d185651ee479fed0
|
[] |
no_license
|
pranav1214/Python-practice
|
718a33aedf798a2ec2326454e3125b48b3e66cb6
|
6f38b1b467c81306c5c340b8ac8ae5c9dc283396
|
refs/heads/master
| 2022-12-22T22:45:00.682611
| 2020-09-19T14:59:18
| 2020-09-19T14:59:18
| 296,891,479
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 709
|
py
|
import socket
import requests
import bs4
import lxml
try:
google = ("www.google.com", 80)
socket.create_connection(google)
print("Connected ")
res = requests.get("https://www.brainyquote.com/quote_of_the_day")
print(res)
s = bs4.BeautifulSoup(res.text, 'lxml')
print(s)
data = s.find('img', {"class":"p-qotd"})
print("data", data)
quote = data['alt']
print("Quote --> ", quote)
# to download image
image_url = "https://www.brainyquote.com" + data["data-img-url"]
f = None
try:
f = open("im1.jpg", "wb")
res = requests.get(image_url)
f.write(res.content)
except Exception as e:
print(e)
finally:
if f is not None:
f.close()
except OSError as e:
print("Issue ", e)
|
[
"pranav1214@gmail.com"
] |
pranav1214@gmail.com
|
9e360dc2ce19e2f083ad03c9b87ef880f1babd6a
|
65f9576021285bc1f9e52cc21e2d49547ba77376
|
/adsp_proc/core/kernel/qurt/build/adsp_link/qdsp6/670.adsp.prod/install/ADSPv65MP/scripts/island_analysis.py
|
d1ed28424d4ee4a89045987e1183f29e51048629
|
[] |
no_license
|
AVCHD/qcs605_root_qcom
|
183d7a16e2f9fddc9df94df9532cbce661fbf6eb
|
44af08aa9a60c6ca724c8d7abf04af54d4136ccb
|
refs/heads/main
| 2023-03-18T21:54:11.234776
| 2021-02-26T11:03:59
| 2021-02-26T11:03:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,216
|
py
|
#===============================================================================
# Copyright (c) 2013 by Qualcomm Technologies, Inc. All Rights Reserved.
# QUALCOMM Proprietary/GTDR
#===============================================================================
import os, sys
#----------------------------------------------------------------------------
# Hooks for SCons
#----------------------------------------------------------------------------
def exists(env):
return env.Detect('island_analysis')
def generate(env):
island_analysis_generate(env)
def island_analysis_generate(env):
env.AddMethod(island_analysis_builder_wrapper, "IslandAnalysis")
island_analysis_bld = env.Builder(action = island_analysis_builder)
env['BUILDERS']['IslandAnalysisInternalBuilder'] = island_analysis_bld
def island_analysis_builder_wrapper(env, target, source, libdirs=[], ignorelist=[],pass_chk=0):
env.Replace(ISLAND_ANALYSIS_LIBDIRS = libdirs)
env.Replace(ISLAND_ANALYSIS_IGNORELIST = ignorelist)
env.Replace(ISLAND_ANALYSIS_PASS_CHK = pass_chk)
return env.IslandAnalysisInternalBuilder(target, source)
def island_analysis_builder(target, source, env):
libdirs = env.get('ISLAND_ANALYSIS_LIBDIRS')
ignorelist = env.get('ISLAND_ANALYSIS_IGNORELIST')
elf = 0
pass_chk = env.get('ISLAND_ANALYSIS_PASS_CHK')
if env.get('LIST_EXTREF_PY') == None:
env.Replace(LIST_EXTREF_PY = "${BUILD_ROOT}/core/kernel/qurt/scripts/list_extref.py")
if int(env.get('HEXAGON_RTOS_RELEASE').split(".")[0]) >= 7:
env.Replace(IA_OBJDUMP = "${QDSP6BIN}/hexagon-llvm-objdump${EXE_EXT}")
else:
env.Replace(IA_OBJDUMP = "${QDSP6BIN}/hexagon-objdump${EXE_EXT}")
analysis_cmd = "python ${LIST_EXTREF_PY} -v -e ${IA_OBJDUMP} "
for src in source:
if str(src).endswith('.o'):
analysis_cmd += "-o "+str(src)+" "
elif str(src).endswith('.elf'):
analysis_cmd += "-x "+str(src)+" "
elf = src
else:
analysis_cmd += "-s "+str(src)+" "
if elf:
csv = str(target[1])
analysis_cmd += "-c "+str(csv)+" "
for libdir in libdirs:
analysis_cmd += "-l "+libdir+" "
for ilist in ignorelist:
analysis_cmd += "-i "+ilist+" "
analysis_cmd = env.subst(analysis_cmd)
analysis_cmd = analysis_cmd+" > "+str(target[0])
print analysis_cmd
#std_out, std_err, return_val = env.ExecCmds(analysis_cmd)
extref = os.system(analysis_cmd)
# pass_chk variable tells us if we need to fail the build in case number of external references is more than 0.
if(pass_chk > 0):
if(extref > 0):
print "ERROR : This build is enabled with a feature to fail the build compilation in case of any unaccounted \
DDR symbol references from island code. For list of DDR symbols being referenced check 'Undefined Island Symbols' section in \
island_analysis.txt for more details. These symbols either need to be added to Island sections or whitelisted using\
following scons API 'env.AddSymbolToIslandWhitelist(tags, node, [list of symbols])'"
sys.exit(1)
#return target
|
[
"jagadeshkumar.s@pathpartnertech.com"
] |
jagadeshkumar.s@pathpartnertech.com
|
6442dc019628b1f6ccbcfce58472f468079113da
|
dece3bc3e098a86cbfcddf66eb2cce27c21ff562
|
/test/distributed/launcher/elastic_launch_test.py
|
346cf5b3ec9d1a16f452c2378620c6d80d5013a5
|
[
"BSD-2-Clause",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"BSL-1.0",
"Apache-2.0"
] |
permissive
|
deltabravozulu/pytorch
|
068e500b0b85bf3797a824458929112ba4d1c019
|
c6eef589971e45bbedacc7f65533d1b8f80a6895
|
refs/heads/master
| 2023-04-09T00:04:51.686741
| 2021-04-21T00:55:54
| 2021-04-21T00:55:54
| 359,989,621
| 0
| 0
|
NOASSERTION
| 2021-04-21T00:53:37
| 2021-04-21T00:43:57
| null |
UTF-8
|
Python
| false
| false
| 14,746
|
py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import multiprocessing as mp
import os
import shutil
import subprocess
import tempfile
import unittest
import uuid
from contextlib import closing
from unittest import mock
from unittest.mock import Mock, patch
import torch.distributed.elastic_launch as launch
from torch.distributed.elastic.agent.server.api import RunResult, WorkerState
from torch.distributed.elastic.multiprocessing.errors import ChildFailedError
from torch.distributed.elastic.rendezvous.etcd_server import EtcdServer
from torch.distributed.elastic.utils import get_socket_with_port
from torch.testing._internal.common_utils import (
TEST_WITH_ASAN,
TEST_WITH_TSAN,
)
def launch_in_proc(args):
launch.main(args)
def path(script):
return os.path.join(os.path.dirname(__file__), script)
def get_child_pids(pid):
pgrep = subprocess.Popen(args=f"pgrep -P {pid}", shell=True, stdout=subprocess.PIPE)
pgrep.wait()
out = pgrep.stdout.read().decode("utf-8").rstrip().split("\n")
pids = []
for pid in out:
if pid:
pids.append(int(pid))
return pids
def pid_exists(pid):
try:
os.kill(pid, 0)
return True
except OSError:
return False
class MockException(Exception):
pass
class ElasticLaunchTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
# start a standalone, single process etcd server to use for all tests
cls._etcd_server = EtcdServer()
cls._etcd_server.start()
cls._etcd_endpoint = cls._etcd_server.get_endpoint()
@classmethod
def tearDownClass(cls):
# stop the standalone etcd server
cls._etcd_server.stop()
def setUp(self):
self.test_dir = tempfile.mkdtemp()
# remove any lingering environment variables
for env in os.environ.keys():
if env.startswith("PET_"):
del os.environ[env]
# set a sentinel env var on the parent proc
# this should be present on the child and gets
# asserted in ``bin/test_script.py``
os.environ["TEST_SENTINEL_PARENT"] = "FOOBAR"
def tearDown(self):
shutil.rmtree(self.test_dir)
def test_launch_user_script_python(self):
run_id = str(uuid.uuid4().int)
nnodes = 1
nproc_per_node = 4
world_size = nnodes * nproc_per_node
args = [
f"--nnodes={nnodes}",
f"--nproc_per_node={nproc_per_node}",
"--rdzv_backend=etcd",
f"--rdzv_endpoint={self._etcd_endpoint}",
f"--rdzv_id={run_id}",
"--monitor_interval=1",
"--start_method=fork",
path("bin/test_script.py"),
f"--touch_file_dir={self.test_dir}",
]
launch.main(args)
# make sure all the workers ran
# each worker touches a file with its global rank as the name
self.assertSetEqual(
{str(i) for i in range(world_size)}, set(os.listdir(self.test_dir))
)
def test_launch_user_script_python_caffe2_bc(self):
nnodes = 1
nproc_per_node = 4
world_size = nnodes * nproc_per_node
sock = get_socket_with_port()
with closing(sock):
master_port = sock.getsockname()[1]
args = [
f"--nnodes={nnodes}",
f"--nproc_per_node={nproc_per_node}",
"--monitor_interval=1",
"--start_method=fork",
"--master_addr=localhost",
f"--master_port={master_port}",
"--node_rank=0",
"--use_env",
path("bin/test_script.py"),
f"--touch_file_dir={self.test_dir}",
]
launch.main(args)
# make sure all the workers ran
# each worker touches a file with its global rank as the name
self.assertSetEqual(
{str(i) for i in range(world_size)}, set(os.listdir(self.test_dir))
)
def test_launch_user_script_bash(self):
run_id = str(uuid.uuid4().int)
nnodes = 1
nproc_per_node = 4
world_size = nnodes * nproc_per_node
args = [
f"--nnodes={nnodes}",
f"--nproc_per_node={nproc_per_node}",
"--rdzv_backend=etcd",
f"--rdzv_endpoint={self._etcd_endpoint}",
f"--rdzv_id={run_id}",
"--monitor_interval=1",
"--start_method=fork",
"--no_python",
]
script_args = [path("bin/test_script.sh"), f"{self.test_dir}"]
with self.assertRaises(ValueError):
# --no_python cannot be used with --module
launch.main(args + ["--module"] + script_args)
launch.main(args + script_args)
# make sure all the workers ran
# each worker touches a file with its global rank as the name
self.assertSetEqual(
{str(i) for i in range(world_size)}, set(os.listdir(self.test_dir))
)
def test_launch_with_env_vars(self):
run_id = str(uuid.uuid4().int)
nnodes = 1
nproc_per_node = 4
world_size = nnodes * nproc_per_node
os.environ["PET_NNODES"] = str(nnodes)
os.environ["PET_NPROC_PER_NODE"] = str(nproc_per_node)
os.environ["PET_RDZV_BACKEND"] = "etcd"
os.environ["PET_RDZV_ENDPOINT"] = self._etcd_endpoint
os.environ["PET_RDZV_ID"] = run_id
os.environ["PET_MONITOR_INTERVAL"] = "1"
os.environ["PET_START_METHOD"] = "fork"
os.environ["PET_NO_PYTHON"] = "1"
script_args = [path("bin/test_script.sh"), f"{self.test_dir}"]
with self.assertRaises(ValueError):
# --no_python cannot be used with --module
os.environ["PET_MODULE"] = "1"
launch.main(script_args)
os.environ["PET_MODULE"] = "0"
launch.main(script_args)
# make sure all the workers ran
# each worker touches a file with its global rank as the name
self.assertSetEqual(
{str(i) for i in range(world_size)}, set(os.listdir(self.test_dir))
)
def _test_nproc_launch_configuration(self, nproc_type, expected_number):
run_id = str(uuid.uuid4().int)
nnodes = 1
args = [
f"--nnodes={nnodes}",
f"--nproc_per_node={nproc_type}",
"--rdzv_backend=etcd",
f"--rdzv_endpoint={self._etcd_endpoint}",
f"--rdzv_id={run_id}",
"--monitor_interval=1",
"--start_method=fork",
"--no_python",
]
script_args = [path("bin/test_script.sh"), f"{self.test_dir}"]
launch.main(args + script_args)
world_size = nnodes * expected_number
# make sure all the workers ran
# each worker touches a file with its global rank as the name
self.assertSetEqual(
{str(i) for i in range(world_size)}, set(os.listdir(self.test_dir))
)
@unittest.skipIf(TEST_WITH_ASAN or TEST_WITH_TSAN, "test incompatible with tsan")
def test_nproc_launch_auto_configurations(self):
self._test_nproc_launch_configuration("auto", os.cpu_count())
@unittest.skipIf(TEST_WITH_ASAN or TEST_WITH_TSAN, "test incompatible with tsan")
def test_nproc_launch_number_configurations(self):
self._test_nproc_launch_configuration("4", 4)
@unittest.skipIf(TEST_WITH_ASAN or TEST_WITH_TSAN, "test incompatible with tsan")
def test_nproc_launch_unknown_configurations(self):
with self.assertRaises(ValueError):
self._test_nproc_launch_configuration("unknown", 4)
@unittest.skipIf(TEST_WITH_ASAN or TEST_WITH_TSAN, "test incompatible with tsan")
@patch("torch.cuda.is_available", return_value=True)
@patch("torch.cuda.device_count", return_value=3)
def test_nproc_gpu_launch_configurations(self, _mock1, _mock2):
self._test_nproc_launch_configuration("auto", 3)
self._test_nproc_launch_configuration("gpu", 3)
@unittest.skipIf(TEST_WITH_ASAN or TEST_WITH_TSAN, "test incompatible with tsan")
def test_launch_elastic(self):
run_id = str(uuid.uuid4().int)
min_nodes = 1
max_nodes = 2
nproc_per_node = 4
# we are only launching 1 node (even though max = 2)
world_size = nproc_per_node
args = [
f"--nnodes={min_nodes}:{max_nodes}",
f"--nproc_per_node={nproc_per_node}",
"--rdzv_backend=etcd",
f"--rdzv_endpoint={self._etcd_endpoint}",
f"--rdzv_id={run_id}",
"--monitor_interval=1",
"--start_method=fork",
path("bin/test_script.py"),
f"--touch_file_dir={self.test_dir}",
]
launch.main(args)
# make sure all the workers ran
# each worker touches a file with its global rank as the name
self.assertSetEqual(
{str(i) for i in range(world_size)}, set(os.listdir(self.test_dir))
)
@mock.patch("torch.distributed.elastic.events.record")
@unittest.skipIf(TEST_WITH_ASAN or TEST_WITH_TSAN, "test incompatible with tsan")
def test_launch_elastic_worker_raise_exception(self, record_mock):
"""
Asserts that when the worker program fails and lancher raieses exception
to indicate that worker process failed
"""
run_id = str(uuid.uuid4().int)
min_nodes = 1
max_nodes = 2
nproc_per_node = 4
args = [
f"--nnodes={min_nodes}:{max_nodes}",
f"--nproc_per_node={nproc_per_node}",
"--rdzv_backend=etcd",
f"--rdzv_endpoint={self._etcd_endpoint}",
f"--rdzv_id={run_id}",
"--monitor_interval=1",
"--max_restarts=0",
"--start_method=fork",
path("bin/test_script.py"),
"--fail",
]
with self.assertRaises(ChildFailedError):
launch.main(args)
record_mock.assert_called_once()
@unittest.skipIf(TEST_WITH_ASAN or TEST_WITH_TSAN, "test incompatible with tsan")
@mock.patch(
"torch.distributed.elastic.agent.server.local_elastic_agent.LocalElasticAgent.run"
)
@mock.patch("torch.distributed.elastic.events.record")
def test_launch_elastic_agent_raise_exception(self, record_mock, mock_agent_run):
"""
Asserts that when the agent raises an exception
the launcher re-raises the original exception
"""
run_id = str(uuid.uuid4().int)
min_nodes = 1
max_nodes = 2
nproc_per_node = 4
args = [
f"--nnodes={min_nodes}:{max_nodes}",
f"--nproc_per_node={nproc_per_node}",
"--rdzv_backend=etcd",
f"--rdzv_endpoint={self._etcd_endpoint}",
f"--rdzv_id={run_id}",
"--monitor_interval=1",
"--max_restarts=0",
"--start_method=fork",
path("bin/test_script.py"),
f"--touch_file_dir={self.test_dir}",
]
mock_agent_run.side_effect = MockException
with self.assertRaises(MockException):
launch.main(args)
record_mock.assert_called_once()
@unittest.skipIf(TEST_WITH_ASAN or TEST_WITH_TSAN, "test incompatible with tsan")
def test_launch_standalone(self):
nnodes = 1
nproc_per_node = 4
world_size = nnodes * nproc_per_node
args = [
f"--nnodes={nnodes}",
f"--nproc_per_node={nproc_per_node}",
"--standalone",
"--monitor_interval=1",
"--start_method=fork",
path("bin/test_script.py"),
f"--touch_file_dir={self.test_dir}",
]
launch.main(args)
# make sure all the workers ran
# each worker touches a file with its global rank as the name
self.assertSetEqual(
{str(i) for i in range(world_size)}, set(os.listdir(self.test_dir))
)
@unittest.skipIf(TEST_WITH_ASAN or TEST_WITH_TSAN, "test incompatible with tsan")
def test_launch_elastic_multiple_agents(self):
run_id = str(uuid.uuid4().int)
min_nodes = 1
max_nodes = 2
nproc_per_node = 4
nnodes = 2
world_size = nnodes * nproc_per_node
args = [
f"--nnodes={min_nodes}:{max_nodes}",
f"--nproc_per_node={nproc_per_node}",
"--rdzv_backend=etcd",
f"--rdzv_endpoint={self._etcd_endpoint}",
f"--rdzv_id={run_id}",
"--monitor_interval=1",
"--start_method=fork",
path("bin/test_script.py"),
f"--touch_file_dir={self.test_dir}",
]
procs = []
for _ in range(nnodes - 1):
p = mp.Process(target=launch.main, args=[args])
procs.append(p)
p.start()
launch.main(args)
for i in range(nnodes - 1):
p = procs[i]
p.join()
self.assertEqual(0, p.exitcode)
# make sure all the workers ran
# each worker touches a file with its global rank as the name
self.assertSetEqual(
{str(i) for i in range(world_size)}, set(os.listdir(self.test_dir))
)
def test_min_max_nodes_parse(self):
min_nodes, max_nodes = launch.parse_min_max_nnodes("1")
self.assertTrue(min_nodes, max_nodes)
self.assertTrue(1, min_nodes)
min_nodes, max_nodes = launch.parse_min_max_nnodes("2:20")
self.assertTrue(2, min_nodes)
self.assertTrue(20, max_nodes)
with self.assertRaises(RuntimeError):
launch.parse_min_max_nnodes("2:20:30")
@patch("torch.distributed.launcher.api.LocalElasticAgent")
def test_launch_shutdown(self, agent_mock_cls):
nnodes = 1
nproc_per_node = 4
args = [
f"--nnodes={nnodes}",
f"--nproc_per_node={nproc_per_node}",
"--monitor_interval=1",
"--start_method=fork",
path("bin/test_script.py"),
f"--touch_file_dir={self.test_dir}",
]
agent_mock = Mock()
agent_mock.run.return_value = RunResult(WorkerState.SUCCEEDED)
agent_mock_cls.return_value = agent_mock
rdzv_handler_mock = Mock()
with patch(
"torch.distributed.elastic.rendezvous.registry.get_rendezvous_handler"
) as param_mock:
param_mock.return_value = rdzv_handler_mock
launch.main(args)
rdzv_handler_mock.shutdown.assert_called_once()
|
[
"facebook-github-bot@users.noreply.github.com"
] |
facebook-github-bot@users.noreply.github.com
|
2953557cf58e2d6e0bb2ac7f71db2cda0c146708
|
5cfa0f98255fd5e23577371ef1e8b013cbb9e99f
|
/cgi-bin/upload_form.py
|
5c2646874a217418e06f32592a6862d63e886539
|
[] |
no_license
|
jeffreytony/csci4140hw
|
66cae44a57e0e45b6445d69397b24659abdd8321
|
28f7dd13e7f257b93b3335fb6ad67662cf50eddd
|
refs/heads/master
| 2021-03-24T09:09:54.015184
| 2018-02-18T07:27:55
| 2018-02-18T07:27:55
| 121,765,482
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 507
|
py
|
#!/usr/bin/env python
print 'Content-type: text/html'
print
print"""
<html>
<body>
<form enctype="multipart/form-data" action="upload.py" method="POST">
Choose an image (.jpg .gif .png): <br />
<input type="file" name="pic" accept="image/gif, image/jpeg, image/png" /><br />
<input type="submit" value="Upload" />
<select>
<option selected="selected" value="public">Public</option>
<option value="private">Private</option>
</select>
</form>
</body>
</html>"""
|
[
"noreply@github.com"
] |
jeffreytony.noreply@github.com
|
b52f6245ef6708b26bce4789e3f081ab908ad7a7
|
d2b0ebbeedde3a1c14e343df73d171182009e4d2
|
/Petri/initialize_world.py
|
8e4d26bd4ee4dff15138a93472711e9a715d462f
|
[] |
no_license
|
mynameisvinn/Petri
|
cf22289e5bc4351a3702a95bce0fefaaba1a82a5
|
22f03e8a13d76e45abb1636d116236f9fa537b84
|
refs/heads/master
| 2022-11-13T15:36:16.788308
| 2020-06-29T18:27:36
| 2020-06-29T18:27:36
| 273,346,618
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,888
|
py
|
import random
import numpy as np
from .utils import _draw, _return_random_loc, _sample
def create_wind(n_rows, n_cols, n_force, n_kites):
world = {}
world['n_rows'] = n_rows
world['n_cols'] = n_cols
for _ in range(n_force):
i = random.choice(np.arange(n_rows))
j = random.choice(np.arange(n_cols - (n_cols // 5), n_cols)) # the right fifth of the board
loc = (i, j)
world[loc] = (7, None)
for _ in range(n_kites):
loc = _return_random_loc(n_rows, n_cols)
world[loc] = (1, None)
return world
def create_oil_world(n_rows, n_cols, n_dregs, n_oil):
world = {}
world['n_rows'] = n_rows
world['n_cols'] = n_cols
# create dregs
for _ in range(n_dregs):
loc = _return_random_loc(n_rows, n_cols)
world[loc] = ("Dreg", None)
# create oil (represented by id 6)
for _ in range(n_oil):
loc = _return_random_loc(n_rows, n_cols)
world[loc] = ("Oil", None)
return world
def create_sorting_world(n_rows, n_cols, n_dregs, n_sorters, n_emitters):
"""initialize world for sorting numbers.
"""
world = {}
world['n_rows'] = n_rows
world['n_cols'] = n_cols
# create dregs (represented by id 1)
for _ in range(n_dregs):
loc = _return_random_loc(n_rows, n_cols)
world[loc] = ("Dreg", None)
# create sorter (represented by id 3)
for _ in range(n_sorters):
loc = _return_random_loc(n_rows, n_cols)
world[loc] = ("Sorter", _sample())
# create emitter (represented by id 5)
for _ in range(n_emitters):
i = random.choice(np.arange(n_rows))
j = random.choice(np.arange(n_cols - (n_cols // 5), n_cols)) # the right fifth of the board
loc = (i, j)
world[loc] = ("Emitter", None)
return world
|
[
"vin.tang@gmail.com"
] |
vin.tang@gmail.com
|
0503c990b91b7e81374a3d8521bebb3f81f14347
|
887f4beeb3ba480f3bcc7dff1e9eb3d61d445f9b
|
/console/console/records/constants.py
|
60ab0e3144af8381cc4f32f6fe8d848d40088939
|
[] |
no_license
|
wang-shun/console
|
ccaa8a1716e2ab6bf5ed6d1c4240cecd4e59f155
|
50425ff068c4795bf13bd178891da126f8677383
|
refs/heads/master
| 2020-04-04T18:15:14.382006
| 2018-07-09T14:42:42
| 2018-07-09T14:42:42
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,812
|
py
|
# coding=utf-8
__author__ = 'huangfuxin'
from django.utils.translation import ugettext as _
from console.console.trash.constants import TRASH_RECORD_MAP
from ..instances.records import INSTANCES_RECORD_MAP
from ..backups.records import BACKUPS_RECORD_MAP
from ..disks.records import DISKS_RECORD_MAP
from ..ips.records import IPS_RECORD_MAP
from ..keypairs.records import KEYPAIRS_RECORD_MAP
from ..nets.records import NETS_RECORD_MAP
from ..routers.records import ROUTERS_RECORD_MAP
from ..security.records import SECURITY_RECORD_MAP
from ..alarms.records import ALARMS_RECORD_MAP
from ..loadbalancer.records import LOADBALANCER_RECORD_MAP
from ..rds.records import RDS_RECORD_MAP
from ..jumper.records import JUMPER_RECORD_MAP
from console.finance.appstore.records import APPSTORE_RECORD_MAP
from console.finance.waf.records import WAF_RECORD_MAP
TOPSPEED_RECORD_MAP = {
# 硬盘相关
"topspeed": {
"service": _(u"极速创建"),
"type": _(u"极速创建主机"),
"detail": _(u"主机: %(count)d个")
},
}
RESOURCES = {"instances": _(u"主机"),
"disks": _(u"硬盘"),
"images": _(u"镜像"),
"nets": _(u"子网"),
"quotas": _(u"配额"),
"tickets": _(u"工单"),
"routers": _(u"路由器"),
"backups": _(u"备份"),
"ips": _(u"公网IP"),
"keypairs": _(u"密钥对"),
"monitors": _(u"监控"),
"security": _(u"安全组"),
"zones": _(u"区"),
"wallets": _(u"钱包"),
"billings": _(u"计费"),
"alarms": _(u"告警"),
"rds": _(u"云数据库"),
"loadbalancer": _(u"负载均衡"),
"topspeed": _(u"极速创建"),
}
# combine dict; fix me
ACTION_RECORD_MAP = dict(BACKUPS_RECORD_MAP, **INSTANCES_RECORD_MAP)
ACTION_RECORD_MAP = dict(ACTION_RECORD_MAP, **DISKS_RECORD_MAP)
ACTION_RECORD_MAP = dict(ACTION_RECORD_MAP, **IPS_RECORD_MAP)
ACTION_RECORD_MAP = dict(ACTION_RECORD_MAP, **KEYPAIRS_RECORD_MAP)
ACTION_RECORD_MAP = dict(ACTION_RECORD_MAP, **NETS_RECORD_MAP)
ACTION_RECORD_MAP = dict(ACTION_RECORD_MAP, **ROUTERS_RECORD_MAP)
ACTION_RECORD_MAP = dict(ACTION_RECORD_MAP, **SECURITY_RECORD_MAP)
ACTION_RECORD_MAP = dict(ACTION_RECORD_MAP, **ALARMS_RECORD_MAP)
ACTION_RECORD_MAP = dict(ACTION_RECORD_MAP, **RDS_RECORD_MAP)
ACTION_RECORD_MAP = dict(ACTION_RECORD_MAP, **LOADBALANCER_RECORD_MAP)
ACTION_RECORD_MAP = dict(ACTION_RECORD_MAP, **TOPSPEED_RECORD_MAP)
ACTION_RECORD_MAP = dict(ACTION_RECORD_MAP, **TRASH_RECORD_MAP)
ACTION_RECORD_MAP = dict(ACTION_RECORD_MAP, **JUMPER_RECORD_MAP)
ACTION_RECORD_MAP = dict(ACTION_RECORD_MAP, **APPSTORE_RECORD_MAP)
ACTION_RECORD_MAP = dict(ACTION_RECORD_MAP, **WAF_RECORD_MAP)
|
[
"lhongzhan@126.com"
] |
lhongzhan@126.com
|
78acf0ac80b9e8b6f1a565314a6417fbe2020b14
|
7b6769ac9be0caa7dec225d35366c2c6e34b7c2d
|
/terra/video/fields.py
|
54c6f356f237364674aef05a37c001e9371f6dc0
|
[] |
no_license
|
jhughsam/django-video
|
d4046732f8e6673a5778ae742c16e4e00d5f2d71
|
a13c15651a13db00a8d0158f64054c6a1b6b4e02
|
refs/heads/master
| 2020-05-19T16:29:56.933252
| 2009-08-20T14:58:35
| 2009-08-20T14:58:35
| 32,493,428
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,694
|
py
|
"""
Copyright (C) 2008 Y-NODE Software
Author: Aleksey Artamonov <aleksey.artamonov@y-node.com>
This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
from django.db.models import ImageField, FileField, signals
from django.conf import settings
from distutils.dir_util import mkpath
from video.signals import pre_upload
import shutil, os, glob, re
class DynamicUploadFileField(FileField):
def __init__(self, *args, **kwargs):
if not 'upload_to' in kwargs:
kwargs['upload_to'] = 'tmp'
self.signal = kwargs.get('signal', None)
if 'signal' in kwargs:
del(kwargs['signal'])
super(DynamicUploadFileField, self).__init__(*args, **kwargs)
def contribute_to_class(self, cls, name):
super(DynamicUploadFileField, self).contribute_to_class(cls, name)
pre_upload.connect(self._move, sender=cls)
def _move(self, instance=None, **kwargs):
if hasattr(instance, 'get_upload_to'):
src = getattr(instance, self.attname)
if src:
src = str(src)
m = re.match(r"%s/(.*)" % self.upload_to, src)
if m:
upload_path = instance.get_upload_to(self.attname)
dst = "%s%s" % (
upload_path,
m.groups()[0]
)
basedir = os.path.join(
settings.MEDIA_ROOT,
os.path.dirname(dst)
)
fromdir = os.path.join(
settings.MEDIA_ROOT,
src
)
mkpath(basedir)
shutil.move(fromdir,
os.path.join(basedir,
m.groups()[0])
)
setattr(instance, self.attname, dst)
instance.save()
if self.signal:
self.signal(instance)
def db_type(self):
return 'varchar(200)'
|
[
"Aleksey.Artamonov@2b9ea72e-b3cd-11dd-b680-ffd21d2dd7b0"
] |
Aleksey.Artamonov@2b9ea72e-b3cd-11dd-b680-ffd21d2dd7b0
|
9a9eeeaaa1fb4bf0c62f6bc85af936a9d4432739
|
4cb4d94379344113519b9ae1d999fbfb62a3092e
|
/examples/context.py
|
afa786b74ac6c8305fe5bc2a78a94d1df7091dd1
|
[
"MIT"
] |
permissive
|
jreamy/dwt
|
5cf9237086dafbb4a5218c517d47d298ff5b0490
|
efbabf5fde56e522ab1f814ec1b6a03f6f174c14
|
refs/heads/main
| 2023-07-18T13:38:56.238279
| 2021-05-11T01:42:49
| 2021-05-11T01:42:49
| 361,247,222
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 172
|
py
|
import os
import sys
# Add the module directory to the path
sys.path.insert(0, os.path.abspath(
os.path.join(os.path.dirname(__file__), '..')))
import dwt # nopep8
|
[
"jreamy17@goergefox.edu"
] |
jreamy17@goergefox.edu
|
4deeae533e3992970ba2d5720d9c7c5ba8dd9981
|
10a749dc6557afe0c277e2107ac41ee6ec36a60d
|
/venv/Scripts/easy_install-3.8-script.py
|
27308fca5bcf00f7682d4790a90c19fdc062d287
|
[] |
no_license
|
YYeceJ/spiderDemo
|
941a26719785b47961793352b967a02e4016dff9
|
412516f3c48cd07f3daf3a5de1bde726d94cf6d9
|
refs/heads/master
| 2020-12-01T18:22:44.922532
| 2019-12-29T08:59:05
| 2019-12-29T08:59:05
| 230,726,412
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 456
|
py
|
#!D:\myCode\PycharmProjects\spiderDemo\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==40.8.0','console_scripts','easy_install-3.8'
__requires__ = 'setuptools==40.8.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==40.8.0', 'console_scripts', 'easy_install-3.8')()
)
|
[
"YYeceJ@163.com"
] |
YYeceJ@163.com
|
53130e0301f7236adf230fa9165248e0fc6b80c6
|
ae7a61d270e81d619b7d96ab8489e4864a508a98
|
/physics/harmonet.py
|
16d00250d176255e508fc34efe4ca1603453d421
|
[
"MIT"
] |
permissive
|
charlesfry/MuZero
|
32da6fcda7c1c5b330dd626e2a01f560c1aa7480
|
83a0412c721e7f911b81e24aecdf58ff621b2e92
|
refs/heads/master
| 2023-07-27T14:08:57.368566
| 2020-11-07T18:59:07
| 2020-11-07T18:59:07
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,582
|
py
|
import sys
sys.path.append('../../../')
from physics.config import MuZeroConfig, make_chess_config
from physics.networks import SharedStorage
from physics.self_play.self_play import run_selfplay, run_eval
from physics.training import ReplayBuffer
from physics.training import train_network
def muzero(config: MuZeroConfig):
"""
MuZero training is split into two independent parts: Network training and
self-play data generation.
These two parts only communicate by transferring the latest networks checkpoint
from the training to the self-play, and the finished games from the self-play
to the training.
In contrast to the original MuZero algorithm this version doesn't works with
multiple threads, therefore the training and self-play is done alternately.
:param config:
:return:
"""
storage = SharedStorage(config.new_network(), config.uniform_network(), config.new_optimizer())
replay_buffer = ReplayBuffer(config)
for i in range(config.num_training_loop):
print(f'Train Step {i}')
score_train = run_selfplay(config, storage, replay_buffer, config.num_episodes)
train_network(config, storage, replay_buffer, config.num_epochs)
print("Train score:", score_train)
print("Test score:", run_eval(config, storage, 50))
print(f"MuZero played {config.num_episodes * (i + 1)} "
f"episodes and trained for {config.num_epochs * (i + 1)} epochs.\n")
return storage.latest_network()
if __name__ == '__main__':
config = make_chess_config()
muzero(config)
|
[
"52051562+AmishWarlord@users.noreply.github.com"
] |
52051562+AmishWarlord@users.noreply.github.com
|
9b60de734d4171341c27d3a1bb7378b3ad90b6fe
|
275947f3b6263904a4dfbca79941a726650b4be0
|
/orders/migrations/0001_initial.py
|
d26db8ccf7feb0c2f9a0237e85f4d4fb38f4980c
|
[] |
no_license
|
tiagolch/e-commerce_GraphQL
|
dcd1c75efececa92cad7ca32228fd95043a4a7bb
|
355086fecd952dbb244d4251570075de3af74e4d
|
refs/heads/master
| 2023-04-04T18:47:40.588318
| 2021-04-11T02:40:28
| 2021-04-11T02:40:28
| 356,614,415
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,816
|
py
|
# Generated by Django 3.2 on 2021-04-10 02:29
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('products', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Orders',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('customer_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name_plural': 'Orders',
},
),
migrations.CreateModel(
name='Order_products',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('total', models.DecimalField(decimal_places=2, default=0, max_digits=10)),
('quantity', models.PositiveIntegerField(default=0)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('order_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='orders.orders')),
('product', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='products.products')),
],
options={
'verbose_name_plural': 'Order_Products',
},
),
]
|
[
"tiagodevch@gmail.com"
] |
tiagodevch@gmail.com
|
f16d502fd39da13a55a8512d89f86969f8326832
|
29f47e8a07b8bccc778b0528e69d61f34a5e9500
|
/Code/BaseMap.py
|
0aefb4403ca2d9f647736704217b2d13956960f7
|
[] |
no_license
|
datagold2017/KaoPuRouting
|
ab4fc401fbfe184eedf5f39f7956d5238bee709d
|
7cf1b0faf499d99876f6d583492c1456b3035bec
|
refs/heads/master
| 2021-01-23T07:55:19.511461
| 2014-08-06T09:13:54
| 2014-08-06T09:13:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,169
|
py
|
#!/usr/bin/python
#coding: utf-8
# ######################################################################
# This file is one of traffic map utility component files
# Created in 2013
# ######################################################################
from PosCoordsUtl import *
from Tkinter import *
from PIL import Image, ImageTk
from StringIO import StringIO
import urllib2
# Below is the area for class definition
class BaseMap:
def __init__(self, canvas_size, image_size = 256, center = [116,39]):
self.canvas_size = canvas_size
self.image_size = image_size
self.rows = int(math.ceil(self.canvas_size[1] / float(self.image_size)))
self.cols = int(math.ceil(self.canvas_size[0] / float(self.image_size)))
self.zoom_level = 12
self.pos_coords_utl = PosCoordsUtl(self.zoom_level, self.canvas_size, self.image_size)
self.center_coords = center
# hot_spots are for marks that can be dragged around
# format: [spot_pos, spot_coords, spot_radius, spot_text]
self.hot_spots = []
self.selected_spot = None
# below are attributes of display
self.default_tile = None
self.cached_tiles = []
self.tile_shift = [0,0]
self.cache_tile()
def update_spots(self):
for spot in self.hot_spots:
spot[0] = self.get_pos(spot[1])
def set_center(self, center_coords):
self.center_coords = center_coords
self.update_spots()
#self.cache_tile()
def set_zoom_level(self, zoom_level):
self.zoom_level = zoom_level
self.pos_coords_utl.set_zoom_level(self.zoom_level)
self.update_spots()
#self.cache_tile()
def cache_tile(self):
# check out left top and right bottom grid first
grids = self.pos_coords_utl.get_conner_grid(self.center_coords)
left_top_grid = grids[0]
temp_photo_array = []
temp_cached_tiles = []
for i in range(0, self.rows + 1): #cache one more tile to make sure the map is fully covered
for j in range(0, self.cols + 1): #cache one more tile to make sure the map is fully covered
x = left_top_grid[0] + j
y = left_top_grid[1] + i
# fetch new tile only in case not cached
if not self.is_tile_in_cache(x,y):
tk_photo = self.get_image_from_server((x,y))
self.cached_tiles.append((x,y,self.zoom_level,tk_photo))
def is_tile_in_cache(self, x, y):
for tile_item in self.cached_tiles:
if tile_item[0] == x and tile_item[1] == y and tile_item[2] == self.zoom_level:
return True
return False
def get_tile(self, x, y):
for tile_item in self.cached_tiles:
if tile_item[0] == x and tile_item[1] == y and tile_item[2] == self.zoom_level:
return tile_item[3]
return None
def optimize_cache(self):
pass #to be added
def draw(self, canvas, mark_point_array = []):
self.cache_tile()
canvas.delete(ALL)
self.tile_shift = self.pos_coords_utl.get_shift(self.center_coords)
grids = self.pos_coords_utl.get_conner_grid(self.center_coords)
left_top_grid = grids[0]
# draw tiles
for i in range(0,self.rows + 1):
for j in range(0,self.cols + 1):
x = left_top_grid[0] + j
y = left_top_grid[1] + i
image_obj = self.get_tile(x,y)
pos_x = self.image_size/2 + 256 * j - self.tile_shift[0]
pos_y = self.image_size/2 + 256 * i - self.tile_shift[1]
if image_obj != None:
canvas.create_image(pos_x,pos_y, image=image_obj)
else:
print x,y, 'is not loaded!'
# draw array in lines
for mark_point in mark_point_array:
if not ('last_mark_point' in locals()):
last_mark_point = mark_point
last_pos = self.get_pos(last_mark_point)
continue
#print last_mark_point,mark_point
pos = self.get_pos(mark_point)
canvas.create_line(last_pos[0], last_pos[1],
pos[0], pos[1],
fill = 'blue', width = 3)
last_mark_point = mark_point
last_pos = pos
# draw hot spots
for spot in self.hot_spots:
self.draw_hot_spot(canvas, spot[0], spot[2], spot[3])
def draw_hot_spot(self, canvas, pos, r, txt):
the_circle = canvas.create_oval([pos[0]-r,pos[1]-r,pos[0]+r,pos[1]+r],fill='blue')
canvas.create_text(pos[0],pos[1],text=txt, fill='white')
return the_circle
def get_center_coords(self):
return self.center_coords
def get_image_from_server(self, grid):
# format request string
request_part1 = 'http://maps.nlp.nokia.com.cn/maptile/2.1/maptile/newest/normal.day.grey'
request_part2 = '/%d/%d/%d/256/png8' % (self.zoom_level, grid[0], grid[1])
request_part3 = '?app_id=demo_qCG24t50dHOwrLQ&token=NYKC67ShPhQwqaydGIW4yg&lg=chi'
request_string = request_part1 + request_part2 + request_part3
#print request_string
# generate request for the image
req = urllib2.Request(request_string)
response = urllib2.urlopen(req)
image_data = response.read()
# generate photo instance from the data and convert into image that Tkinter can use
pil_image = Image.open(StringIO(image_data))
tk_photo = ImageTk.PhotoImage(pil_image)
return tk_photo
def move_center(self, movement):
pos = self.pos_coords_utl.coords_to_pos(self.center_coords)
new_pos = [pos[0] - movement[0], pos[1] - movement[1]]
#print pos, new_pos, self.center_coords,
self.set_center(self.pos_coords_utl.pos_to_coords(new_pos))
#print self.center_coords
# self.cache_tile()
return self.center_coords
def get_pos(self, coords):
pos = self.pos_coords_utl.coords_to_pos(coords)
center_pos = self.pos_coords_utl.coords_to_pos(self.center_coords)
diff = [center_pos[0]-pos[0],center_pos[1]-pos[1]]
return [self.canvas_size[0]/2 - diff[0], self.canvas_size[1]/2 - diff[1]]
# para of canvas_pos should be pos in current canvas
def get_coords(self,canvas_pos):
pos_diff = [canvas_pos[0] - self.canvas_size[0] / 2, canvas_pos[1] - self.canvas_size[1] / 2]
center_pos = self.pos_coords_utl.coords_to_pos(self.center_coords)
pos = [center_pos[0] + pos_diff[0], center_pos[1] + pos_diff[1]]
return self.pos_coords_utl.pos_to_coords(pos)
def add_spot(self, spot_coords, spot_radius, spot_text):
spot_pos = self.get_pos(spot_coords)
self.hot_spots.append([spot_pos, spot_coords, spot_radius, spot_text])
def get_spot(self, index):
return self.hot_spots[index]
# below are added
def update_map(self):
# self.center_coords = ((self.from_longitude + self.to_longitude) / 2, (self.from_latitude + self.to_latitude) / 2)
self.basemap.set_zoom_level(self.zoom_level)
self.basemap.set_center([self.center_longitude, self.center_latitude])
def handler_button1_down(self,event):
# check if it is on hot spot, for loop should be full loop in order to make sure select the one on top
self.selected_spot = None
for spot in self.hot_spots:
pos = spot[0]
dis = (event.x-pos[0]) ** 2 + (event.y - pos[1]) ** 2
if dis < spot[2] ** 2: # check distance
self.selected_spot = spot
self.pos_of_move_from = [event.x,event.y]
def handler_button1_move(self,event):
if self.selected_spot != None:
self.selected_spot[0] = [event.x,event.y]
self.selected_spot[1] = self.get_coords(self.selected_spot[0])
else:
movement = [event.x - self.pos_of_move_from[0], event.y - self.pos_of_move_from[1]]
self.move_center(movement)
self.pos_of_move_from = [event.x,event.y]
def handler_button1_released(self,event):
if self.selected_spot != None:
self.selected_spot[0] = [event.x,event.y]
self.selected_spot[1] = self.get_coords(self.selected_spot[0])
else:
movement = [event.x - self.pos_of_move_from[0], event.y - self.pos_of_move_from[1]]
self.move_center(movement)
self.selected_spot = None
# Below is for helper functions
# sample url
# http://1.maps.nlp.nokia.com.cn/maptile/2.1/maptile/newest/normal.day.grey/13/6748/3108/256/png8?app_id=demo_qCG24t50dHOwrLQ&token=NYKC67ShPhQwqaydGIW4yg&lg=chi
# unit test code
def main():
# Here, your unit test code or main program
# init Tkinter
root_widget = Tk()
map_area = LabelFrame(root_widget, text = "Traffic Map", padx=5,pady=5)
map_area.pack(side = LEFT)
map_canvas = Canvas(map_area, width = 1024, height = 768)
map_canvas.pack()
basemap_instance = BaseMap((1024,768))
basemap_instance.draw(map_canvas)
root_widget.mainloop()
if __name__=='__main__':
main()
|
[
"stevewang@outlook.com"
] |
stevewang@outlook.com
|
3bc74f9601911439ac6431e531f6a47b4cf5f992
|
72cb3df94d1aa1347cd18334103523128ea064e8
|
/U4/django_u4/django_u4/core/admin.py
|
d0527ef7997a5d40d4b57de6fced27cd8c28f99b
|
[] |
no_license
|
slnowak/python-lecture
|
4ef2e02ee5e8427207a1e79da4c80b00965ac5a2
|
52e6a12c7296651c795b20fddaeb06c4baa78c60
|
refs/heads/master
| 2020-12-11T06:08:07.542830
| 2014-01-22T09:50:37
| 2014-01-22T09:50:37
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 126
|
py
|
from django.contrib import admin
# Register your models here.
from django_u4.core.models import Foo
admin.site.register(Foo)
|
[
"cypreess@gmail.com"
] |
cypreess@gmail.com
|
6b6f6fe94a5349e9c5d12da6c1ac15edc70da0dc
|
621fffffe7bf63826149dd6c6bcd882a751c2b19
|
/capstone/animalproject/animalapp/signals.py
|
0dade45299d974757f6cebb9772ef47775aeb04f
|
[
"MIT"
] |
permissive
|
jastr945/PDXclass
|
ccbe2ccd646ed3c5f24667d5cd0e8983119b82b2
|
f8bf97e03586a83c860845a383a32162b9bea74c
|
refs/heads/master
| 2021-01-22T02:08:26.080633
| 2017-12-10T06:32:47
| 2017-12-10T06:32:47
| 92,336,055
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 294
|
py
|
# from .models import Animal
# from django.db.models.signals import post_save
# from django.dispatch import receiver
#
#
# # saving each new animal profile instance into Elasticsearch
# @receiver(post_save, sender=Animal)
# def index_post(sender, instance, **kwargs):
# instance.indexing()
|
[
"polina.jastr@gmail.com"
] |
polina.jastr@gmail.com
|
719e5727f5943e326509daf86592303957e524c7
|
5f96ac76dd42e0349939520ddac15d4203da7630
|
/dojoninjas/apps/dojo_ninjas/models.py
|
7d5143608dd76cf59a33eee84095cedc68de6be3
|
[] |
no_license
|
pangyang1/Django-DojoNinjas-
|
e441c9145047edcf52a34177934fb16910b0bf7a
|
14e0f762f3b3b31b1e98a7dae7a5effdac75493a
|
refs/heads/master
| 2020-03-23T05:57:53.638128
| 2018-07-16T18:48:09
| 2018-07-16T18:48:09
| 141,179,849
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 751
|
py
|
from __future__ import unicode_literals
from django.db import models
class Dojo(models.Model):
name = models.CharField(max_length = 255)
city = models.CharField(max_length = 255)
state = models.CharField(max_length = 2)
desc = models.TextField(default = "")
created_at = models.DateTimeField(auto_now_add = True)
updated_at = models.DateTimeField(auto_now = True)
class Ninja(models.Model):
first_name = models.CharField(max_length = 255)
last_name = models.CharField(max_length = 255)
dojo = models.ForeignKey(Dojo, related_name = "ninjas")
created_at = models.DateTimeField(auto_now_add = True)
updated_at = models.DateTimeField(auto_now = True)
def __str__(self):
return self.first_name
|
[
"noreply@github.com"
] |
pangyang1.noreply@github.com
|
1498491a3cf1af66a139a9ad13ccefd3ccd2f488
|
d530a6067b8c23e2cda66e423c14d2d8add7ec43
|
/dogs/serializers.py
|
bff117363e98708433c949f5c69109f72d2aca3b
|
[] |
no_license
|
Shari87/python_django
|
bd15349cbfdf2c2876ad6814afeab442944c32c0
|
53d2c19d62cea5f5f30f2aacd66ab71d3005c2a5
|
refs/heads/master
| 2020-05-18T02:27:05.119253
| 2019-04-30T18:15:38
| 2019-04-30T18:15:38
| 184,114,946
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 271
|
py
|
from rest_framework import serializers
from .models import Dog
class DogSerializer(serializers.ModelSerializer):
class Meta:
model = Dog
fields = ('id','env','tests','name', 'age', 'breed', 'color', 'created_at', 'started_at','finished_at','status')
|
[
"Shari87"
] |
Shari87
|
06d739396620861fadad44ed7bfabcc366d044d7
|
dfe97a01b8f1cbb08dc91626193ee077c4277ab0
|
/LateForWork/lateForWork.spec
|
4c5b1e8af794780603cac0b31c05bdece40ce704
|
[] |
no_license
|
AdityaShandilyaS/PyProjects
|
9d1d1ba0dba92946fc281f484f1609d59209a966
|
eca7eb2549a3c008b57ff469057e8f0927b20227
|
refs/heads/master
| 2021-07-18T10:42:22.819724
| 2020-07-12T15:05:26
| 2020-07-12T15:05:26
| 193,763,865
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 856
|
spec
|
# -*- mode: python -*-
block_cipher = None
a = Analysis(['lateForWork.py'],
pathex=['C:\\Users\\dell\\Python Files'],
binaries=[],
datas=[],
hiddenimports=[],
hookspath=[],
runtime_hooks=[],
excludes=[],
win_no_prefer_redirects=False,
win_private_assemblies=False,
cipher=block_cipher,
noarchive=False)
pyz = PYZ(a.pure, a.zipped_data,
cipher=block_cipher)
exe = EXE(pyz,
a.scripts,
a.binaries,
a.zipfiles,
a.datas,
[],
name='lateForWork',
debug=False,
bootloader_ignore_signals=False,
strip=False,
upx=True,
runtime_tmpdir=None,
console=True )
|
[
"aditya.shandilya.2693@gmail.com"
] |
aditya.shandilya.2693@gmail.com
|
72a7197185f097f36b383efd9a5d4c4b390623bc
|
38ed9af4ac273585d209b5ba605dad26367d7fa3
|
/vnpy/trader/uiBasicWidget.py
|
f1f90dcfa5301a07de1fd54b4a643d43b4cf1d3c
|
[
"MIT"
] |
permissive
|
tianhm/vnpy-beta
|
633c9d1b55e6c9671d2904323071fedf3310354d
|
51b4e963281e16589e8db0257086b9e856dbcee0
|
refs/heads/master
| 2021-01-01T19:38:06.509105
| 2017-05-16T08:32:35
| 2017-05-16T08:32:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 48,683
|
py
|
# encoding: UTF-8
import json
import csv
import os
from collections import OrderedDict
from PyQt4 import QtGui, QtCore
from qtpy import QtWidgets, QtCore #, QtGui
from vnpy.event import *
from vnpy.trader.vtEvent import *
from vtFunction import *
from vtGateway import *
import vtText
COLOR_RED = QtGui.QColor('red')
COLOR_GREEN = QtGui.QColor('green')
#----------------------------------------------------------------------
def loadFont():
"""载入字体设置"""
fileName = 'VT_setting.json'
path = os.path.abspath(os.path.dirname(__file__))
fileName = os.path.join(path, fileName)
try:
f = file(fileName)
setting = json.load(f)
family = setting['fontFamily']
size = setting['fontSize']
font = QtGui.QFont(family, size)
except:
font = QtGui.QFont(u'微软雅黑', 12)
return font
BASIC_FONT = loadFont()
########################################################################
class BasicCell(QtWidgets.QTableWidgetItem):
"""基础的单元格"""
#----------------------------------------------------------------------
def __init__(self, text=None, mainEngine=None):
"""Constructor"""
super(BasicCell, self).__init__()
self.data = None
if text:
self.setContent(text)
#----------------------------------------------------------------------
def setContent(self, text):
"""设置内容"""
if text == '0' or text == '0.0':
self.setText('')
else:
self.setText(text)
########################################################################
class NumCell(QtWidgets.QTableWidgetItem):
"""用来显示数字的单元格"""
#----------------------------------------------------------------------
def __init__(self, text=None, mainEngine=None):
"""Constructor"""
super(NumCell, self).__init__()
self.data = None
if text:
self.setContent(text)
#----------------------------------------------------------------------
def setContent(self, text):
"""设置内容"""
# 考虑到NumCell主要用来显示OrderID和TradeID之类的整数字段,
# 这里的数据转化方式使用int类型。但是由于部分交易接口的委托
# 号和成交号可能不是纯数字的形式,因此补充了一个try...except
try:
num = int(text)
self.setData(QtCore.Qt.DisplayRole, num)
except ValueError:
self.setText(text)
########################################################################
class DirectionCell(QtWidgets.QTableWidgetItem):
"""用来显示买卖方向的单元格"""
#----------------------------------------------------------------------
def __init__(self, text=None, mainEngine=None):
"""Constructor"""
super(DirectionCell, self).__init__()
self.data = None
if text:
self.setContent(text)
#----------------------------------------------------------------------
def setContent(self, text):
"""设置内容"""
if text == DIRECTION_LONG or text == DIRECTION_NET:
self.setForeground(QtGui.QColor('red'))
elif text == DIRECTION_SHORT:
self.setForeground(QtGui.QColor('green'))
self.setText(text)
########################################################################
class NameCell(QtWidgets.QTableWidgetItem):
"""用来显示合约中文的单元格"""
#----------------------------------------------------------------------
def __init__(self, text=None, mainEngine=None):
"""Constructor"""
super(NameCell, self).__init__()
self.mainEngine = mainEngine
self.data = None
if text:
self.setContent(text)
#----------------------------------------------------------------------
def setContent(self, text):
"""设置内容"""
if self.mainEngine:
# 首先尝试正常获取合约对象
contract = self.mainEngine.getContract(text)
# 如果能读取合约信息
if contract:
self.setText(contract.name)
########################################################################
class BidCell(QtWidgets.QTableWidgetItem):
"""买价单元格"""
#----------------------------------------------------------------------
def __init__(self, text=None, mainEngine=None):
"""Constructor"""
super(BidCell, self).__init__()
self.data = None
self.setForeground(QtGui.QColor('black'))
self.setBackground(QtGui.QColor(255,174,201))
if text:
self.setContent(text)
#----------------------------------------------------------------------
def setContent(self, text):
"""设置内容"""
self.setText(text)
########################################################################
class AskCell(QtWidgets.QTableWidgetItem):
"""买价单元格"""
#----------------------------------------------------------------------
def __init__(self, text=None, mainEngine=None):
"""Constructor"""
super(AskCell, self).__init__()
self.data = None
self.setForeground(QtGui.QColor('black'))
self.setBackground(QtGui.QColor(160,255,160))
if text:
self.setContent(text)
#----------------------------------------------------------------------
def setContent(self, text):
"""设置内容"""
self.setText(text)
########################################################################
class PnlCell(QtWidgets.QTableWidgetItem):
"""显示盈亏的单元格"""
#----------------------------------------------------------------------
def __init__(self, text=None, mainEngine=None):
"""Constructor"""
super(PnlCell, self).__init__()
self.data = None
self.color = ''
if text:
self.setContent(text)
#----------------------------------------------------------------------
def setContent(self, text):
"""设置内容"""
self.setText(text)
try:
value = float(text)
if value >= 0 and self.color != 'red':
self.color = 'red'
self.setForeground(COLOR_RED)
elif value < 0 and self.color != 'green':
self.color = 'green'
self.setForeground(COLOR_GREEN)
except ValueError:
pass
########################################################################
class BasicMonitor(QtWidgets.QTableWidget):
"""
基础监控
headerDict中的值对应的字典格式如下
{'chinese': u'中文名', 'cellType': BasicCell}
"""
signal = QtCore.Signal(type(Event()))
#----------------------------------------------------------------------
def __init__(self, mainEngine=None, eventEngine=None, parent=None):
"""Constructor"""
super(BasicMonitor, self).__init__(parent)
self.mainEngine = mainEngine
self.eventEngine = eventEngine
# 保存表头标签用
self.headerDict = OrderedDict() # 有序字典,key是英文名,value是对应的配置字典
self.headerList = [] # 对应self.headerDict.keys()
# 保存相关数据用
self.dataDict = {} # 字典,key是字段对应的数据,value是保存相关单元格的字典
self.dataKey = '' # 字典键对应的数据字段
# 监控的事件类型
self.eventType = ''
# 字体
self.font = None
# 保存数据对象到单元格
self.saveData = False
# 默认不允许根据表头进行排序,需要的组件可以开启
self.sorting = False
# 初始化右键菜单
self.initMenu()
#----------------------------------------------------------------------
def setHeaderDict(self, headerDict):
"""设置表头有序字典"""
self.headerDict = headerDict
self.headerList = headerDict.keys()
#----------------------------------------------------------------------
def setDataKey(self, dataKey):
"""设置数据字典的键"""
self.dataKey = dataKey
#----------------------------------------------------------------------
def setEventType(self, eventType):
"""设置监控的事件类型"""
self.eventType = eventType
#----------------------------------------------------------------------
def setFont(self, font):
"""设置字体"""
self.font = font
#----------------------------------------------------------------------
def setSaveData(self, saveData):
"""设置是否要保存数据到单元格"""
self.saveData = saveData
#----------------------------------------------------------------------
def initTable(self):
"""初始化表格"""
# 设置表格的列数
col = len(self.headerDict)
self.setColumnCount(col)
# 设置列表头
labels = [d['chinese'] for d in self.headerDict.values()]
self.setHorizontalHeaderLabels(labels)
# 关闭左边的垂直表头
self.verticalHeader().setVisible(False)
# 设为不可编辑
self.setEditTriggers(self.NoEditTriggers)
# 设为行交替颜色
self.setAlternatingRowColors(True)
# 设置允许排序
self.setSortingEnabled(self.sorting)
#----------------------------------------------------------------------
def registerEvent(self):
"""注册GUI更新相关的事件监听"""
self.signal.connect(self.updateEvent)
self.eventEngine.register(self.eventType, self.signal.emit)
#----------------------------------------------------------------------
def updateEvent(self, event):
"""收到事件更新"""
data = event.dict_['data']
self.updateData(data)
#----------------------------------------------------------------------
def updateData(self, data):
"""将数据更新到表格中"""
# 如果允许了排序功能,则插入数据前必须关闭,否则插入新的数据会变乱
if self.sorting:
self.setSortingEnabled(False)
# 如果设置了dataKey,则采用存量更新模式
if self.dataKey:
key = data.__getattribute__(self.dataKey)
# 如果键在数据字典中不存在,则先插入新的一行,并创建对应单元格
if key not in self.dataDict:
self.insertRow(0)
d = {}
for n, header in enumerate(self.headerList):
content = safeUnicode(data.__getattribute__(header))
cellType = self.headerDict[header]['cellType']
cell = cellType(content, self.mainEngine)
if self.font:
cell.setFont(self.font) # 如果设置了特殊字体,则进行单元格设置
if self.saveData: # 如果设置了保存数据对象,则进行对象保存
cell.data = data
self.setItem(0, n, cell)
d[header] = cell
self.dataDict[key] = d
# 否则如果已经存在,则直接更新相关单元格
else:
d = self.dataDict[key]
for header in self.headerList:
content = safeUnicode(data.__getattribute__(header))
cell = d[header]
cell.setContent(content)
if self.saveData: # 如果设置了保存数据对象,则进行对象保存
cell.data = data
# 否则采用增量更新模式
else:
self.insertRow(0)
for n, header in enumerate(self.headerList):
content = safeUnicode(data.__getattribute__(header))
cellType = self.headerDict[header]['cellType']
cell = cellType(content, self.mainEngine)
if self.font:
cell.setFont(self.font)
if self.saveData:
cell.data = data
self.setItem(0, n, cell)
# 调整列宽
self.resizeColumns()
# 重新打开排序
if self.sorting:
self.setSortingEnabled(True)
#----------------------------------------------------------------------
def resizeColumns(self):
"""调整各列的大小"""
self.horizontalHeader().resizeSections(QtWidgets.QHeaderView.ResizeToContents)
#----------------------------------------------------------------------
def setSorting(self, sorting):
"""设置是否允许根据表头排序"""
self.sorting = sorting
#----------------------------------------------------------------------
def saveToCsv(self):
"""保存表格内容到CSV文件"""
# 先隐藏右键菜单
self.menu.close()
# 获取想要保存的文件名
path = QtWidgets.QFileDialog.getSaveFileName(self, vtText.SAVE_DATA, '', 'CSV(*.csv)')
try:
#if not path.isEmpty():
if path:
with open(unicode(path), 'wb') as f:
writer = csv.writer(f)
# 保存标签
headers = [header.encode('gbk') for header in self.headerList]
writer.writerow(headers)
# 保存每行内容
for row in range(self.rowCount()):
rowdata = []
for column in range(self.columnCount()):
item = self.item(row, column)
if item is not None:
rowdata.append(
unicode(item.text()).encode('gbk'))
else:
rowdata.append('')
writer.writerow(rowdata)
except IOError:
pass
#----------------------------------------------------------------------
def initMenu(self):
"""初始化右键菜单"""
self.menu = QtWidgets.QMenu(self)
saveAction = QtWidgets.QAction(vtText.SAVE_DATA, self)
saveAction.triggered.connect(self.saveToCsv)
self.menu.addAction(saveAction)
#----------------------------------------------------------------------
def contextMenuEvent(self, event):
"""右键点击事件"""
self.menu.popup(QtGui.QCursor.pos())
########################################################################
class MarketMonitor(BasicMonitor):
"""市场监控组件"""
#----------------------------------------------------------------------
def __init__(self, mainEngine, eventEngine, parent=None):
"""Constructor"""
super(MarketMonitor, self).__init__(mainEngine, eventEngine, parent)
# 设置表头有序字典
d = OrderedDict()
d['symbol'] = {'chinese':vtText.CONTRACT_SYMBOL, 'cellType':BasicCell}
d['vtSymbol'] = {'chinese':vtText.CONTRACT_NAME, 'cellType':NameCell}
d['lastPrice'] = {'chinese':vtText.LAST_PRICE, 'cellType':BasicCell}
d['preClosePrice'] = {'chinese':vtText.PRE_CLOSE_PRICE, 'cellType':BasicCell}
d['volume'] = {'chinese':vtText.VOLUME, 'cellType':BasicCell}
d['openInterest'] = {'chinese':vtText.OPEN_INTEREST, 'cellType':BasicCell}
d['openPrice'] = {'chinese':vtText.OPEN_PRICE, 'cellType':BasicCell}
d['highPrice'] = {'chinese':vtText.HIGH_PRICE, 'cellType':BasicCell}
d['lowPrice'] = {'chinese':vtText.LOW_PRICE, 'cellType':BasicCell}
d['bidPrice1'] = {'chinese':vtText.BID_PRICE_1, 'cellType':BidCell}
d['bidVolume1'] = {'chinese':vtText.BID_VOLUME_1, 'cellType':BidCell}
d['askPrice1'] = {'chinese':vtText.ASK_PRICE_1, 'cellType':AskCell}
d['askVolume1'] = {'chinese':vtText.ASK_VOLUME_1, 'cellType':AskCell}
d['time'] = {'chinese':vtText.TIME, 'cellType':BasicCell}
d['gatewayName'] = {'chinese':vtText.GATEWAY, 'cellType':BasicCell}
self.setHeaderDict(d)
# 设置数据键
self.setDataKey('vtSymbol')
# 设置监控事件类型
self.setEventType(EVENT_TICK)
# 设置字体
self.setFont(BASIC_FONT)
# 设置允许排序
self.setSorting(True)
# 初始化表格
self.initTable()
# 注册事件监听
self.registerEvent()
########################################################################
class LogMonitor(BasicMonitor):
"""日志监控"""
#----------------------------------------------------------------------
def __init__(self, mainEngine, eventEngine, parent=None):
"""Constructor"""
super(LogMonitor, self).__init__(mainEngine, eventEngine, parent)
d = OrderedDict()
d['logTime'] = {'chinese':vtText.TIME, 'cellType':BasicCell}
d['logContent'] = {'chinese':vtText.CONTENT, 'cellType':BasicCell}
d['gatewayName'] = {'chinese':vtText.GATEWAY, 'cellType':BasicCell}
self.setHeaderDict(d)
self.setEventType(EVENT_LOG)
self.setFont(BASIC_FONT)
self.initTable()
self.registerEvent()
########################################################################
class ErrorMonitor(BasicMonitor):
"""错误监控"""
#----------------------------------------------------------------------
def __init__(self, mainEngine, eventEngine, parent=None):
"""Constructor"""
super(ErrorMonitor, self).__init__(mainEngine, eventEngine, parent)
d = OrderedDict()
d['errorTime'] = {'chinese':vtText.TIME, 'cellType':BasicCell}
d['errorID'] = {'chinese':vtText.ERROR_CODE, 'cellType':BasicCell}
d['errorMsg'] = {'chinese':vtText.ERROR_MESSAGE, 'cellType':BasicCell}
d['gatewayName'] = {'chinese':vtText.GATEWAY, 'cellType':BasicCell}
self.setHeaderDict(d)
self.setEventType(EVENT_ERROR)
self.setFont(BASIC_FONT)
self.initTable()
self.registerEvent()
########################################################################
class TradeMonitor(BasicMonitor):
"""成交监控"""
#----------------------------------------------------------------------
def __init__(self, mainEngine, eventEngine, parent=None):
"""Constructor"""
super(TradeMonitor, self).__init__(mainEngine, eventEngine, parent)
d = OrderedDict()
d['tradeID'] = {'chinese':vtText.TRADE_ID, 'cellType':NumCell}
d['orderID'] = {'chinese':vtText.ORDER_ID, 'cellType':NumCell}
d['symbol'] = {'chinese':vtText.CONTRACT_SYMBOL, 'cellType':BasicCell}
d['vtSymbol'] = {'chinese':vtText.CONTRACT_NAME, 'cellType':NameCell}
d['direction'] = {'chinese':vtText.DIRECTION, 'cellType':DirectionCell}
d['offset'] = {'chinese':vtText.OFFSET, 'cellType':BasicCell}
d['price'] = {'chinese':vtText.PRICE, 'cellType':BasicCell}
d['volume'] = {'chinese':vtText.VOLUME, 'cellType':BasicCell}
d['tradeTime'] = {'chinese':vtText.TRADE_TIME, 'cellType':BasicCell}
d['gatewayName'] = {'chinese':vtText.GATEWAY, 'cellType':BasicCell}
self.setHeaderDict(d)
self.setEventType(EVENT_TRADE)
self.setFont(BASIC_FONT)
self.setSorting(True)
self.initTable()
self.registerEvent()
########################################################################
class OrderMonitor(BasicMonitor):
"""委托监控"""
#----------------------------------------------------------------------
def __init__(self, mainEngine, eventEngine, parent=None):
"""Constructor"""
super(OrderMonitor, self).__init__(mainEngine, eventEngine, parent)
self.mainEngine = mainEngine
d = OrderedDict()
d['orderID'] = {'chinese':vtText.ORDER_ID, 'cellType':NumCell}
d['symbol'] = {'chinese':vtText.CONTRACT_SYMBOL, 'cellType':BasicCell}
d['vtSymbol'] = {'chinese':vtText.CONTRACT_NAME, 'cellType':NameCell}
d['direction'] = {'chinese':vtText.DIRECTION, 'cellType':DirectionCell}
d['offset'] = {'chinese':vtText.OFFSET, 'cellType':BasicCell}
d['price'] = {'chinese':vtText.PRICE, 'cellType':BasicCell}
d['totalVolume'] = {'chinese':vtText.ORDER_VOLUME, 'cellType':BasicCell}
d['tradedVolume'] = {'chinese':vtText.TRADED_VOLUME, 'cellType':BasicCell}
d['status'] = {'chinese':vtText.ORDER_STATUS, 'cellType':BasicCell}
d['orderTime'] = {'chinese':vtText.ORDER_TIME, 'cellType':BasicCell}
d['cancelTime'] = {'chinese':vtText.CANCEL_TIME, 'cellType':BasicCell}
#d['frontID'] = {'chinese':vtText.FRONT_ID, 'cellType':BasicCell} # 考虑到在vn.trader中,ctpGateway的报单号应该是始终递增的,因此这里可以忽略
#d['sessionID'] = {'chinese':vtText.SESSION_ID, 'cellType':BasicCell}
d['gatewayName'] = {'chinese':vtText.GATEWAY, 'cellType':BasicCell}
self.setHeaderDict(d)
self.setDataKey('vtOrderID')
self.setEventType(EVENT_ORDER)
self.setFont(BASIC_FONT)
self.setSaveData(True)
self.setSorting(True)
self.initTable()
self.registerEvent()
self.connectSignal()
#----------------------------------------------------------------------
def connectSignal(self):
"""连接信号"""
# 双击单元格撤单
self.itemDoubleClicked.connect(self.cancelOrder)
#----------------------------------------------------------------------
def cancelOrder(self, cell):
"""根据单元格的数据撤单"""
order = cell.data
req = VtCancelOrderReq()
req.symbol = order.symbol
req.exchange = order.exchange
req.frontID = order.frontID
req.sessionID = order.sessionID
req.orderID = order.orderID
self.mainEngine.cancelOrder(req, order.gatewayName)
########################################################################
class PositionMonitor(BasicMonitor):
"""持仓监控"""
#----------------------------------------------------------------------
def __init__(self, mainEngine, eventEngine, parent=None):
"""Constructor"""
super(PositionMonitor, self).__init__(mainEngine, eventEngine, parent)
d = OrderedDict()
d['symbol'] = {'chinese':vtText.CONTRACT_SYMBOL, 'cellType':BasicCell}
d['vtSymbol'] = {'chinese':vtText.CONTRACT_NAME, 'cellType':NameCell}
d['direction'] = {'chinese':vtText.DIRECTION, 'cellType':DirectionCell}
d['position'] = {'chinese':vtText.POSITION, 'cellType':BasicCell}
d['ydPosition'] = {'chinese':vtText.YD_POSITION, 'cellType':BasicCell}
d['frozen'] = {'chinese':vtText.FROZEN, 'cellType':BasicCell}
d['price'] = {'chinese':vtText.PRICE, 'cellType':BasicCell}
d['positionProfit'] = {'chinese':vtText.POSITION_PROFIT, 'cellType':PnlCell}
d['gatewayName'] = {'chinese':vtText.GATEWAY, 'cellType':BasicCell}
self.setHeaderDict(d)
self.setDataKey('vtPositionName')
self.setEventType(EVENT_POSITION)
self.setFont(BASIC_FONT)
self.setSaveData(True)
self.initTable()
self.registerEvent()
########################################################################
class AccountMonitor(BasicMonitor):
"""账户监控"""
#----------------------------------------------------------------------
def __init__(self, mainEngine, eventEngine, parent=None):
"""Constructor"""
super(AccountMonitor, self).__init__(mainEngine, eventEngine, parent)
d = OrderedDict()
d['accountID'] = {'chinese':vtText.ACCOUNT_ID, 'cellType':BasicCell}
d['preBalance'] = {'chinese':vtText.PRE_BALANCE, 'cellType':BasicCell}
d['balance'] = {'chinese':vtText.BALANCE, 'cellType':BasicCell}
d['available'] = {'chinese':vtText.AVAILABLE, 'cellType':BasicCell}
d['commission'] = {'chinese':vtText.COMMISSION, 'cellType':BasicCell}
d['margin'] = {'chinese':vtText.MARGIN, 'cellType':BasicCell}
d['closeProfit'] = {'chinese':vtText.CLOSE_PROFIT, 'cellType':BasicCell}
d['positionProfit'] = {'chinese':vtText.POSITION_PROFIT, 'cellType':BasicCell}
d['gatewayName'] = {'chinese':vtText.GATEWAY, 'cellType':BasicCell}
self.setHeaderDict(d)
self.setDataKey('vtAccountID')
self.setEventType(EVENT_ACCOUNT)
self.setFont(BASIC_FONT)
self.initTable()
self.registerEvent()
########################################################################
class TradingWidget(QtWidgets.QFrame):
"""简单交易组件"""
signal = QtCore.Signal(type(Event()))
directionList = [DIRECTION_LONG,
DIRECTION_SHORT]
offsetList = [OFFSET_OPEN,
OFFSET_CLOSE,
OFFSET_CLOSEYESTERDAY,
OFFSET_CLOSETODAY]
priceTypeList = [PRICETYPE_LIMITPRICE,
PRICETYPE_MARKETPRICE,
PRICETYPE_FAK,
PRICETYPE_FOK]
exchangeList = [EXCHANGE_NONE,
EXCHANGE_CFFEX,
EXCHANGE_SHFE,
EXCHANGE_DCE,
EXCHANGE_CZCE,
EXCHANGE_SSE,
EXCHANGE_SZSE,
EXCHANGE_SGE,
EXCHANGE_HKEX,
EXCHANGE_HKFE,
EXCHANGE_SMART,
EXCHANGE_ICE,
EXCHANGE_CME,
EXCHANGE_NYMEX,
EXCHANGE_GLOBEX,
EXCHANGE_IDEALPRO]
currencyList = [CURRENCY_NONE,
CURRENCY_CNY,
CURRENCY_HKD,
CURRENCY_USD]
productClassList = [PRODUCT_NONE,
PRODUCT_EQUITY,
PRODUCT_FUTURES,
PRODUCT_OPTION,
PRODUCT_FOREX]
gatewayList = ['']
#----------------------------------------------------------------------
def __init__(self, mainEngine, eventEngine, parent=None):
"""Constructor"""
super(TradingWidget, self).__init__(parent)
self.mainEngine = mainEngine
self.eventEngine = eventEngine
self.symbol = ''
# 添加交易接口
self.gatewayList.extend(mainEngine.getAllGatewayNames())
self.initUi()
self.connectSignal()
#----------------------------------------------------------------------
def initUi(self):
"""初始化界面"""
self.setWindowTitle(vtText.TRADING)
self.setMaximumWidth(400)
self.setFrameShape(self.Box) # 设置边框
self.setLineWidth(1)
# 左边部分
labelSymbol = QtWidgets.QLabel(vtText.CONTRACT_SYMBOL)
labelName = QtWidgets.QLabel(vtText.CONTRACT_NAME)
labelDirection = QtWidgets.QLabel(vtText.DIRECTION)
labelOffset = QtWidgets.QLabel(vtText.OFFSET)
labelPrice = QtWidgets.QLabel(vtText.PRICE)
self.checkFixed = QtWidgets.QCheckBox(u'') # 价格固定选择框
labelVolume = QtWidgets.QLabel(vtText.VOLUME)
labelPriceType = QtWidgets.QLabel(vtText.PRICE_TYPE)
labelExchange = QtWidgets.QLabel(vtText.EXCHANGE)
labelCurrency = QtWidgets.QLabel(vtText.CURRENCY)
labelProductClass = QtWidgets.QLabel(vtText.PRODUCT_CLASS)
labelGateway = QtWidgets.QLabel(vtText.GATEWAY)
self.lineSymbol = QtWidgets.QLineEdit()
self.lineName = QtWidgets.QLineEdit()
self.comboDirection = QtWidgets.QComboBox()
self.comboDirection.addItems(self.directionList)
self.comboOffset = QtWidgets.QComboBox()
self.comboOffset.addItems(self.offsetList)
self.spinPrice = QtWidgets.QDoubleSpinBox()
self.spinPrice.setDecimals(4)
self.spinPrice.setMinimum(0)
self.spinPrice.setMaximum(100000)
self.spinVolume = QtWidgets.QSpinBox()
self.spinVolume.setMinimum(0)
self.spinVolume.setMaximum(1000000)
self.comboPriceType = QtWidgets.QComboBox()
self.comboPriceType.addItems(self.priceTypeList)
self.comboExchange = QtWidgets.QComboBox()
self.comboExchange.addItems(self.exchangeList)
self.comboCurrency = QtWidgets.QComboBox()
self.comboCurrency.addItems(self.currencyList)
self.comboProductClass = QtWidgets.QComboBox()
self.comboProductClass.addItems(self.productClassList)
self.comboGateway = QtWidgets.QComboBox()
self.comboGateway.addItems(self.gatewayList)
gridleft = QtWidgets.QGridLayout()
gridleft.addWidget(labelSymbol, 0, 0)
gridleft.addWidget(labelName, 1, 0)
gridleft.addWidget(labelDirection, 2, 0)
gridleft.addWidget(labelOffset, 3, 0)
gridleft.addWidget(labelPrice, 4, 0)
gridleft.addWidget(labelVolume, 5, 0)
gridleft.addWidget(labelPriceType, 6, 0)
gridleft.addWidget(labelExchange, 7, 0)
gridleft.addWidget(labelCurrency, 8, 0)
gridleft.addWidget(labelProductClass, 9, 0)
gridleft.addWidget(labelGateway, 10, 0)
gridleft.addWidget(self.lineSymbol, 0, 1, 1, -1)
gridleft.addWidget(self.lineName, 1, 1, 1, -1)
gridleft.addWidget(self.comboDirection, 2, 1, 1, -1)
gridleft.addWidget(self.comboOffset, 3, 1, 1, -1)
gridleft.addWidget(self.checkFixed, 4, 1)
gridleft.addWidget(self.spinPrice, 4, 2)
gridleft.addWidget(self.spinVolume, 5, 1, 1, -1)
gridleft.addWidget(self.comboPriceType, 6, 1, 1, -1)
gridleft.addWidget(self.comboExchange, 7, 1, 1, -1)
gridleft.addWidget(self.comboCurrency, 8, 1, 1, -1)
gridleft.addWidget(self.comboProductClass, 9, 1, 1, -1)
gridleft.addWidget(self.comboGateway, 10, 1, 1, -1)
# 右边部分
labelBid1 = QtWidgets.QLabel(vtText.BID_1)
labelBid2 = QtWidgets.QLabel(vtText.BID_2)
labelBid3 = QtWidgets.QLabel(vtText.BID_3)
labelBid4 = QtWidgets.QLabel(vtText.BID_4)
labelBid5 = QtWidgets.QLabel(vtText.BID_5)
labelAsk1 = QtWidgets.QLabel(vtText.ASK_1)
labelAsk2 = QtWidgets.QLabel(vtText.ASK_2)
labelAsk3 = QtWidgets.QLabel(vtText.ASK_3)
labelAsk4 = QtWidgets.QLabel(vtText.ASK_4)
labelAsk5 = QtWidgets.QLabel(vtText.ASK_5)
self.labelBidPrice1 = QtWidgets.QLabel()
self.labelBidPrice2 = QtWidgets.QLabel()
self.labelBidPrice3 = QtWidgets.QLabel()
self.labelBidPrice4 = QtWidgets.QLabel()
self.labelBidPrice5 = QtWidgets.QLabel()
self.labelBidVolume1 = QtWidgets.QLabel()
self.labelBidVolume2 = QtWidgets.QLabel()
self.labelBidVolume3 = QtWidgets.QLabel()
self.labelBidVolume4 = QtWidgets.QLabel()
self.labelBidVolume5 = QtWidgets.QLabel()
self.labelAskPrice1 = QtWidgets.QLabel()
self.labelAskPrice2 = QtWidgets.QLabel()
self.labelAskPrice3 = QtWidgets.QLabel()
self.labelAskPrice4 = QtWidgets.QLabel()
self.labelAskPrice5 = QtWidgets.QLabel()
self.labelAskVolume1 = QtWidgets.QLabel()
self.labelAskVolume2 = QtWidgets.QLabel()
self.labelAskVolume3 = QtWidgets.QLabel()
self.labelAskVolume4 = QtWidgets.QLabel()
self.labelAskVolume5 = QtWidgets.QLabel()
labelLast = QtWidgets.QLabel(vtText.LAST)
self.labelLastPrice = QtWidgets.QLabel()
self.labelReturn = QtWidgets.QLabel()
self.labelLastPrice.setMinimumWidth(60)
self.labelReturn.setMinimumWidth(60)
gridRight = QtWidgets.QGridLayout()
gridRight.addWidget(labelAsk5, 0, 0)
gridRight.addWidget(labelAsk4, 1, 0)
gridRight.addWidget(labelAsk3, 2, 0)
gridRight.addWidget(labelAsk2, 3, 0)
gridRight.addWidget(labelAsk1, 4, 0)
gridRight.addWidget(labelLast, 5, 0)
gridRight.addWidget(labelBid1, 6, 0)
gridRight.addWidget(labelBid2, 7, 0)
gridRight.addWidget(labelBid3, 8, 0)
gridRight.addWidget(labelBid4, 9, 0)
gridRight.addWidget(labelBid5, 10, 0)
gridRight.addWidget(self.labelAskPrice5, 0, 1)
gridRight.addWidget(self.labelAskPrice4, 1, 1)
gridRight.addWidget(self.labelAskPrice3, 2, 1)
gridRight.addWidget(self.labelAskPrice2, 3, 1)
gridRight.addWidget(self.labelAskPrice1, 4, 1)
gridRight.addWidget(self.labelLastPrice, 5, 1)
gridRight.addWidget(self.labelBidPrice1, 6, 1)
gridRight.addWidget(self.labelBidPrice2, 7, 1)
gridRight.addWidget(self.labelBidPrice3, 8, 1)
gridRight.addWidget(self.labelBidPrice4, 9, 1)
gridRight.addWidget(self.labelBidPrice5, 10, 1)
gridRight.addWidget(self.labelAskVolume5, 0, 2)
gridRight.addWidget(self.labelAskVolume4, 1, 2)
gridRight.addWidget(self.labelAskVolume3, 2, 2)
gridRight.addWidget(self.labelAskVolume2, 3, 2)
gridRight.addWidget(self.labelAskVolume1, 4, 2)
gridRight.addWidget(self.labelReturn, 5, 2)
gridRight.addWidget(self.labelBidVolume1, 6, 2)
gridRight.addWidget(self.labelBidVolume2, 7, 2)
gridRight.addWidget(self.labelBidVolume3, 8, 2)
gridRight.addWidget(self.labelBidVolume4, 9, 2)
gridRight.addWidget(self.labelBidVolume5, 10, 2)
# 发单按钮
buttonSendOrder = QtWidgets.QPushButton(vtText.SEND_ORDER)
buttonCancelAll = QtWidgets.QPushButton(vtText.CANCEL_ALL)
size = buttonSendOrder.sizeHint()
buttonSendOrder.setMinimumHeight(size.height()*2) # 把按钮高度设为默认两倍
buttonCancelAll.setMinimumHeight(size.height()*2)
# 整合布局
hbox = QtWidgets.QHBoxLayout()
hbox.addLayout(gridleft)
hbox.addLayout(gridRight)
vbox = QtWidgets.QVBoxLayout()
vbox.addLayout(hbox)
vbox.addWidget(buttonSendOrder)
vbox.addWidget(buttonCancelAll)
vbox.addStretch()
self.setLayout(vbox)
# 关联更新
buttonSendOrder.clicked.connect(self.sendOrder)
buttonCancelAll.clicked.connect(self.cancelAll)
self.lineSymbol.returnPressed.connect(self.updateSymbol)
#----------------------------------------------------------------------
def updateSymbol(self):
"""合约变化"""
# 读取组件数据
symbol = str(self.lineSymbol.text())
exchange = unicode(self.comboExchange.currentText())
currency = unicode(self.comboCurrency.currentText())
productClass = unicode(self.comboProductClass.currentText())
gatewayName = unicode(self.comboGateway.currentText())
# 查询合约
if exchange:
vtSymbol = '.'.join([symbol, exchange])
contract = self.mainEngine.getContract(vtSymbol)
else:
vtSymbol = symbol
contract = self.mainEngine.getContract(symbol)
if contract:
vtSymbol = contract.vtSymbol
gatewayName = contract.gatewayName
self.lineName.setText(contract.name)
exchange = contract.exchange # 保证有交易所代码
# 清空价格数量
self.spinPrice.setValue(0)
self.spinVolume.setValue(0)
# 清空行情显示
self.labelBidPrice1.setText('')
self.labelBidPrice2.setText('')
self.labelBidPrice3.setText('')
self.labelBidPrice4.setText('')
self.labelBidPrice5.setText('')
self.labelBidVolume1.setText('')
self.labelBidVolume2.setText('')
self.labelBidVolume3.setText('')
self.labelBidVolume4.setText('')
self.labelBidVolume5.setText('')
self.labelAskPrice1.setText('')
self.labelAskPrice2.setText('')
self.labelAskPrice3.setText('')
self.labelAskPrice4.setText('')
self.labelAskPrice5.setText('')
self.labelAskVolume1.setText('')
self.labelAskVolume2.setText('')
self.labelAskVolume3.setText('')
self.labelAskVolume4.setText('')
self.labelAskVolume5.setText('')
self.labelLastPrice.setText('')
self.labelReturn.setText('')
# 重新注册事件监听
self.eventEngine.unregister(EVENT_TICK + self.symbol, self.signal.emit)
self.eventEngine.register(EVENT_TICK + vtSymbol, self.signal.emit)
# 订阅合约
req = VtSubscribeReq()
req.symbol = symbol
req.exchange = exchange
req.currency = currency
req.productClass = productClass
# 默认跟随价
self.checkFixed.setChecked(False)
self.mainEngine.subscribe(req, gatewayName)
# 更新组件当前交易的合约
self.symbol = vtSymbol
#----------------------------------------------------------------------
def updateTick(self, event):
"""更新行情"""
tick = event.dict_['data']
if tick.vtSymbol == self.symbol:
if not self.checkFixed.isChecked():
self.spinPrice.setValue(tick.lastPrice)
self.labelBidPrice1.setText(str(tick.bidPrice1))
self.labelAskPrice1.setText(str(tick.askPrice1))
self.labelBidVolume1.setText(str(tick.bidVolume1))
self.labelAskVolume1.setText(str(tick.askVolume1))
if tick.bidPrice2:
self.labelBidPrice2.setText(str(tick.bidPrice2))
self.labelBidPrice3.setText(str(tick.bidPrice3))
self.labelBidPrice4.setText(str(tick.bidPrice4))
self.labelBidPrice5.setText(str(tick.bidPrice5))
self.labelAskPrice2.setText(str(tick.askPrice2))
self.labelAskPrice3.setText(str(tick.askPrice3))
self.labelAskPrice4.setText(str(tick.askPrice4))
self.labelAskPrice5.setText(str(tick.askPrice5))
self.labelBidVolume2.setText(str(tick.bidVolume2))
self.labelBidVolume3.setText(str(tick.bidVolume3))
self.labelBidVolume4.setText(str(tick.bidVolume4))
self.labelBidVolume5.setText(str(tick.bidVolume5))
self.labelAskVolume2.setText(str(tick.askVolume2))
self.labelAskVolume3.setText(str(tick.askVolume3))
self.labelAskVolume4.setText(str(tick.askVolume4))
self.labelAskVolume5.setText(str(tick.askVolume5))
self.labelLastPrice.setText(str(tick.lastPrice))
if tick.preClosePrice:
rt = (tick.lastPrice/tick.preClosePrice)-1
self.labelReturn.setText(('%.2f' %(rt*100))+'%')
else:
self.labelReturn.setText('')
#----------------------------------------------------------------------
def connectSignal(self):
"""连接Signal"""
self.signal.connect(self.updateTick)
#----------------------------------------------------------------------
def sendOrder(self):
"""发单"""
symbol = str(self.lineSymbol.text())
exchange = unicode(self.comboExchange.currentText())
currency = unicode(self.comboCurrency.currentText())
productClass = unicode(self.comboProductClass.currentText())
gatewayName = unicode(self.comboGateway.currentText())
# 查询合约
if exchange:
vtSymbol = '.'.join([symbol, exchange])
contract = self.mainEngine.getContract(vtSymbol)
else:
vtSymbol = symbol
contract = self.mainEngine.getContract(symbol)
if contract:
gatewayName = contract.gatewayName
exchange = contract.exchange # 保证有交易所代码
req = VtOrderReq()
req.symbol = symbol
req.exchange = exchange
req.price = self.spinPrice.value()
req.volume = self.spinVolume.value()
req.direction = unicode(self.comboDirection.currentText())
req.priceType = unicode(self.comboPriceType.currentText())
req.offset = unicode(self.comboOffset.currentText())
req.currency = currency
req.productClass = productClass
self.mainEngine.sendOrder(req, gatewayName)
#----------------------------------------------------------------------
def cancelAll(self):
"""一键撤销所有委托"""
l = self.mainEngine.getAllWorkingOrders()
for order in l:
req = VtCancelOrderReq()
req.symbol = order.symbol
req.exchange = order.exchange
req.frontID = order.frontID
req.sessionID = order.sessionID
req.orderID = order.orderID
self.mainEngine.cancelOrder(req, order.gatewayName)
#----------------------------------------------------------------------
def closePosition(self, cell):
"""根据持仓信息自动填写交易组件"""
# 读取持仓数据,cell是一个表格中的单元格对象
pos = cell.data
symbol = pos.symbol
# 更新交易组件的显示合约
self.lineSymbol.setText(symbol)
self.updateSymbol()
# 自动填写信息
self.comboPriceType.setCurrentIndex(self.priceTypeList.index(PRICETYPE_LIMITPRICE))
self.comboOffset.setCurrentIndex(self.offsetList.index(OFFSET_CLOSE))
self.spinVolume.setValue(pos.position)
if pos.direction == DIRECTION_LONG or pos.direction == DIRECTION_NET:
self.comboDirection.setCurrentIndex(self.directionList.index(DIRECTION_SHORT))
else:
self.comboDirection.setCurrentIndex(self.directionList.index(DIRECTION_LONG))
# 价格留待更新后由用户输入,防止有误操作
########################################################################
class ContractMonitor(BasicMonitor):
"""合约查询"""
#----------------------------------------------------------------------
def __init__(self, mainEngine, parent=None):
"""Constructor"""
super(ContractMonitor, self).__init__(parent=parent)
self.mainEngine = mainEngine
d = OrderedDict()
d['symbol'] = {'chinese':vtText.CONTRACT_SYMBOL, 'cellType':BasicCell}
d['exchange'] = {'chinese':vtText.EXCHANGE, 'cellType':BasicCell}
d['vtSymbol'] = {'chinese':vtText.VT_SYMBOL, 'cellType':BasicCell}
d['name'] = {'chinese':vtText.CONTRACT_NAME, 'cellType':BasicCell}
d['productClass'] = {'chinese':vtText.PRODUCT_CLASS, 'cellType':BasicCell}
d['size'] = {'chinese':vtText.CONTRACT_SIZE, 'cellType':BasicCell}
d['priceTick'] = {'chinese':vtText.PRICE_TICK, 'cellType':BasicCell}
d['strikePrice'] = {'chinese':vtText.STRIKE_PRICE, 'cellType':BasicCell}
d['underlyingSymbol'] = {'chinese':vtText.UNDERLYING_SYMBOL, 'cellType':BasicCell}
d['optionType'] = {'chinese':vtText.OPTION_TYPE, 'cellType':BasicCell}
self.setHeaderDict(d)
# 过滤显示用的字符串
self.filterContent = EMPTY_STRING
self.initUi()
#----------------------------------------------------------------------
def initUi(self):
"""初始化界面"""
self.setMinimumSize(800, 800)
self.setFont(BASIC_FONT)
self.initTable()
self.addMenuAction()
#----------------------------------------------------------------------
def showAllContracts(self):
"""显示所有合约数据"""
l = self.mainEngine.getAllContracts()
d = {'.'.join([contract.exchange, contract.symbol]):contract for contract in l}
l2 = d.keys()
l2.sort(reverse=True)
self.setRowCount(len(l2))
row = 0
for key in l2:
# 如果设置了过滤信息且合约代码中不含过滤信息,则不显示
if self.filterContent and self.filterContent not in key:
continue
contract = d[key]
for n, header in enumerate(self.headerList):
content = safeUnicode(contract.__getattribute__(header))
cellType = self.headerDict[header]['cellType']
cell = cellType(content)
if self.font:
cell.setFont(self.font) # 如果设置了特殊字体,则进行单元格设置
self.setItem(row, n, cell)
row = row + 1
#----------------------------------------------------------------------
def refresh(self):
"""刷新"""
self.menu.close() # 关闭菜单
self.clearContents()
self.setRowCount(0)
self.showAllContracts()
#----------------------------------------------------------------------
def addMenuAction(self):
"""增加右键菜单内容"""
refreshAction = QtWidgets.QAction(vtText.REFRESH, self)
refreshAction.triggered.connect(self.refresh)
self.menu.addAction(refreshAction)
#----------------------------------------------------------------------
def show(self):
"""显示"""
super(ContractMonitor, self).show()
self.refresh()
#----------------------------------------------------------------------
def setFilterContent(self, content):
"""设置过滤字符串"""
self.filterContent = content
########################################################################
class ContractManager(QtWidgets.QWidget):
"""合约管理组件"""
#----------------------------------------------------------------------
def __init__(self, mainEngine, parent=None):
"""Constructor"""
super(ContractManager, self).__init__(parent=parent)
self.mainEngine = mainEngine
self.initUi()
#----------------------------------------------------------------------
def initUi(self):
"""初始化界面"""
self.setWindowTitle(vtText.CONTRACT_SEARCH)
self.lineFilter = QtWidgets.QLineEdit()
self.buttonFilter = QtWidgets.QPushButton(vtText.SEARCH)
self.buttonFilter.clicked.connect(self.filterContract)
self.monitor = ContractMonitor(self.mainEngine)
self.monitor.refresh()
hbox = QtWidgets.QHBoxLayout()
hbox.addWidget(self.lineFilter)
hbox.addWidget(self.buttonFilter)
hbox.addStretch()
vbox = QtWidgets.QVBoxLayout()
vbox.addLayout(hbox)
vbox.addWidget(self.monitor)
self.setLayout(vbox)
#----------------------------------------------------------------------
def filterContract(self):
"""显示过滤后的合约"""
content = str(self.lineFilter.text())
self.monitor.setFilterContent(content)
self.monitor.refresh()
|
[
"sonne.lu@hotmail.com"
] |
sonne.lu@hotmail.com
|
4712dc2ec92c5f2b55803a968c7ec2b86e8c19e4
|
c4464ab60e0b465b5ae311990c4538f3c150f73b
|
/app/gamer.py
|
78cab3736cd626a6709a77222f4d0bef8ba5a219
|
[] |
no_license
|
XavierBelletrud/NovaConnect4
|
1d8ecdf1369020be02194256d3d4b8b8e3691674
|
61c94957d0b4f34a09c887c5749068d5109b1acb
|
refs/heads/master
| 2023-05-09T19:00:43.754934
| 2021-06-07T08:23:42
| 2021-06-07T08:23:42
| 374,589,131
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,808
|
py
|
import numpy as np
from abc import ABC, abstractmethod
from keras.models import Sequential, load_model
from keras.layers import Dense, Dropout, Conv2D, MaxPool2D, Dropout, Flatten
from keras.optimizers import Adam
from keras.backend import clear_session
class Gamer(ABC):
def __init__(self, board, value=1):
self.board = board
self.value = value
@abstractmethod ## méthode abstraite doit être redéfinie dans les class filles
def input_game(self):
pass
class HumanGamer(Gamer):
def input_game(self):
choice = ""
possible_choice = np.arange(0, self.board.get_nbcolumns())
while type(choice) != int or choice not in possible_choice:
try :
choice = int(input(f'Select a number between 1 and {self.board.get_nbcolumns()}: '))-1
if choice in possible_choice:
return choice
except ValueError:
print(f" choice must be integer between 1 to {self.board.get_nbcolumns()}")
class CPUGamerRandom(Gamer):
def input_game(self):
choice = np.random.randint(0, self.board.get_nbcolumns())
return choice
class CPUGamerRL(Gamer):
def __init__(self, board, value:int=1, flag_train:bool=False,is_train_model:bool=True, model_weigth_path:str='', mdl_with_conv=True):
Gamer.__init__(self, board, value)
## variable
self.__learning_rate = 0.001 # 0.005 0.001
##### POUR battle Fixer Epsilon petit pour prise decision
self.__epsilon = 1.0
self.__epsilon_min = 0.01
self.__epsilon_decay = 0.99985 ## Attention modif epsilon par actions .... ou par parties ....
self.__gamma = 0.95
self.__tau = .125
self.mdl_with_conv = mdl_with_conv
self.__pre_trained = False # False create convoltion.h5+pretrained.h5
self.__model_weigth_path = model_weigth_path
self.__flag_is_train = flag_train
self.__is_train_model = is_train_model
self.strategy_replay = 'trial' # 'random'
if self.mdl_with_conv:
self.state_shape = (self.board.get_nblines(), self.board.get_nbcolumns(), 1)
else:
self.state_shape = self.board.get_nblines()*self.board.get_nbcolumns()
## Condition to create a target model
cond_target = (self.__flag_is_train and self.__is_train_model)
if self.__pre_trained==False:
self.model = self.create_model()
if cond_target:
self.target_model = self.create_model()
else:
filepath = self.__model_weigth_path#
self.model = load_model(filepath, custom_objects=None, compile=True, options=None)
if cond_target:
self.target_model = load_model(filepath, custom_objects=None, compile=True, options=None)
def create_model(self):
model = Sequential()
## add layers
if self.mdl_with_conv:
model.add(Conv2D(filters=16, kernel_size=3, padding='same', input_shape=self.state_shape, activation='relu'))
model.add(MaxPool2D())
model.add(Conv2D(filters=32, kernel_size=3, padding='same', activation='relu'))
model.add(MaxPool2D())
model.add(Dropout(0.2))
model.add(Flatten())
model.add(Dense(units=50, activation="relu"))
else:
model.add(Dense(units=50, input_dim=self.state_shape, activation="elu"))
model.add(Dense(units=50, activation="relu"))
model.add(Dense(units=50, activation="relu"))
model.add(Dense(units=50, activation="relu"))
model.add(Dense(units=50, activation="relu"))
model.add(Dense(units=50, activation="relu"))
model.add(Dense(units=50, activation="relu"))
model.add(Dense(units=self.board.get_nbcolumns(), activation="linear"))
## compile the model
model.compile(loss="mean_squared_error",
optimizer=Adam(lr=self.__learning_rate),
metrics=["mean_squared_error"])
model.summary()
return model
def input_game(self):
if self.mdl_with_conv==True:
current_state = np.reshape(self.board.grid, (1, self.board.get_nblines(), self.board.get_nbcolumns(), 1))
else:
current_state = self.board.grid.reshape(1,-1)
choice = np.argmax(self.model.predict(current_state)[0])
print("Qlearning model choice of column", choice+1)
return choice
def act(self, state):
self.__epsilon *= self.__epsilon_decay
self.__epsilon = max(self.__epsilon_min, self.__epsilon)
if np.random.random() < self.__epsilon:
return np.random.randint(0, self.board.get_nbcolumns())
print(state)
if self.mdl_with_conv:
state = np.reshape(state, (1, self.board.get_nblines(), self.board.get_nbcolumns(), 1))
else:
state = state.reshape(1,-1)
prediction = np.argmax(self.model.predict(state)[0])## à changer lorsque l'adverse joue
return prediction
def replay_random(self, memory, batch_size:int=32):
if len(memory) < batch_size:
return
samples = random.sample(memory, batch_size)
for sample in samples:
state, action, reward, new_state, done = sample
if self.mdl_with_conv:
state = np.reshape(state, (1, self.board.get_nblines(), self.board.get_nbcolumns(), 1))
new_state = np.reshape(new_state, (1, self.board.get_nblines(), self.board.get_nbcolumns(), 1))
target = self.target_model.predict(state)
if done:
target[0][action] = reward
else:
Q_future = max(self.target_model.predict(new_state)[0])
target[0][action] = reward + Q_future * self.__gamma
history = G2.model.fit(state, target, epochs=1, verbose=0)
return history.history['mean_squared_error']
def replay(self, memory):
for sample in memory:
state, action, reward, new_state, done = sample
# print(state)
if self.mdl_with_conv:
state = np.reshape(state, (1, self.board.get_nblines(), self.board.get_nbcolumns(), 1))
new_state = np.reshape(new_state, (1, self.board.get_nblines(), self.board.get_nbcolumns(), 1))
target = self.target_model.predict(state)
if done:
target[0][action] = reward
else:
Q_future = max(self.target_model.predict(new_state)[0])
target[0][action] = reward + Q_future * self.__gamma
history = self.model.fit(state, target, epochs=1, verbose=0)
return history.history['mean_squared_error']
def target_train(self):
weights = self.model.get_weights()
target_weights = self.target_model.get_weights()
for i in range(len(target_weights)):
target_weights[i] = weights[i] * self.__tau + target_weights[i] * (1 - self.__tau)
self.target_model.set_weights(target_weights)
def adverse_train(self, G2):
adverse_weights = G2.model.get_weights()
weights = self.model.get_weights()
for i in range(len(adverse_weights)):
weights[i] = adverse_weights[i] * self.__tau + weights[i] * (1 - self.__tau)
self.model.set_weights(weights)
def save_model(self, path:str):
self.model.save(path)
|
[
"slyx@MacBook-Pro-de-slyx.local"
] |
slyx@MacBook-Pro-de-slyx.local
|
0e3aefafc95a297c27d744d0561007dbb15e0381
|
c82f01f6618c9d533719db1eb182ba6680eada28
|
/2014-2015/wulff_workspace/workspace/build/vision_opencv/cv_bridge/catkin_generated/pkg.installspace.context.pc.py
|
aaaedb409b63f9aeda33746afba39d64c40c9908
|
[] |
no_license
|
hajungong007/landingpad
|
e561fec000e42a54842450b311fd77b453cfebbe
|
c33e3ace8504290a6c8bafb7d6ebc7db0fc92525
|
refs/heads/master
| 2020-07-26T20:30:55.116505
| 2016-04-29T04:48:01
| 2016-04-29T04:48:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,441
|
py
|
# generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/home/cracker/workspace/install/include;/usr/include/opencv;/usr/include".split(';') if "/home/cracker/workspace/install/include;/usr/include/opencv;/usr/include" != "" else []
PROJECT_CATKIN_DEPENDS = "rosconsole;sensor_msgs".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "-lcv_bridge;-l:/usr/lib/x86_64-linux-gnu/libopencv_videostab.so.2.4.8;-l:/usr/lib/x86_64-linux-gnu/libopencv_video.so.2.4.8;-l:/usr/lib/x86_64-linux-gnu/libopencv_superres.so.2.4.8;-l:/usr/lib/x86_64-linux-gnu/libopencv_stitching.so.2.4.8;-l:/usr/lib/x86_64-linux-gnu/libopencv_photo.so.2.4.8;-l:/usr/lib/x86_64-linux-gnu/libopencv_ocl.so.2.4.8;-l:/usr/lib/x86_64-linux-gnu/libopencv_objdetect.so.2.4.8;-l:/usr/lib/x86_64-linux-gnu/libopencv_ml.so.2.4.8;-l:/usr/lib/x86_64-linux-gnu/libopencv_legacy.so.2.4.8;-l:/usr/lib/x86_64-linux-gnu/libopencv_imgproc.so.2.4.8;-l:/usr/lib/x86_64-linux-gnu/libopencv_highgui.so.2.4.8;-l:/usr/lib/x86_64-linux-gnu/libopencv_gpu.so.2.4.8;-l:/usr/lib/x86_64-linux-gnu/libopencv_flann.so.2.4.8;-l:/usr/lib/x86_64-linux-gnu/libopencv_features2d.so.2.4.8;-l:/usr/lib/x86_64-linux-gnu/libopencv_core.so.2.4.8;-l:/usr/lib/x86_64-linux-gnu/libopencv_contrib.so.2.4.8;-l:/usr/lib/x86_64-linux-gnu/libopencv_calib3d.so.2.4.8".split(';') if "-lcv_bridge;-l:/usr/lib/x86_64-linux-gnu/libopencv_videostab.so.2.4.8;-l:/usr/lib/x86_64-linux-gnu/libopencv_video.so.2.4.8;-l:/usr/lib/x86_64-linux-gnu/libopencv_superres.so.2.4.8;-l:/usr/lib/x86_64-linux-gnu/libopencv_stitching.so.2.4.8;-l:/usr/lib/x86_64-linux-gnu/libopencv_photo.so.2.4.8;-l:/usr/lib/x86_64-linux-gnu/libopencv_ocl.so.2.4.8;-l:/usr/lib/x86_64-linux-gnu/libopencv_objdetect.so.2.4.8;-l:/usr/lib/x86_64-linux-gnu/libopencv_ml.so.2.4.8;-l:/usr/lib/x86_64-linux-gnu/libopencv_legacy.so.2.4.8;-l:/usr/lib/x86_64-linux-gnu/libopencv_imgproc.so.2.4.8;-l:/usr/lib/x86_64-linux-gnu/libopencv_highgui.so.2.4.8;-l:/usr/lib/x86_64-linux-gnu/libopencv_gpu.so.2.4.8;-l:/usr/lib/x86_64-linux-gnu/libopencv_flann.so.2.4.8;-l:/usr/lib/x86_64-linux-gnu/libopencv_features2d.so.2.4.8;-l:/usr/lib/x86_64-linux-gnu/libopencv_core.so.2.4.8;-l:/usr/lib/x86_64-linux-gnu/libopencv_contrib.so.2.4.8;-l:/usr/lib/x86_64-linux-gnu/libopencv_calib3d.so.2.4.8" != "" else []
PROJECT_NAME = "cv_bridge"
PROJECT_SPACE_DIR = "/home/cracker/workspace/install"
PROJECT_VERSION = "1.11.5"
|
[
"larry.pyeatt@sdsmt.edu"
] |
larry.pyeatt@sdsmt.edu
|
cf76e7a0493796a4d6b4af823b3a05f6eee88ba2
|
668f11da3e2fce7fdcbaab39e5f446c5ef36637b
|
/setup.py
|
dfa22549d3a6b93cf86abe0a77eb5f744c81c0c8
|
[] |
no_license
|
bsamseth/EigenNN
|
cab429ad900521d0285130a636d3bb390159cd4e
|
a0aac1585a9e172766275b5b9d3d00a972941158
|
refs/heads/master
| 2020-04-16T19:27:37.021830
| 2019-01-15T14:03:36
| 2019-01-15T14:03:36
| 165,860,179
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,926
|
py
|
import os
import re
import sys
import platform
import subprocess
import unittest
from setuptools import setup, Extension
from setuptools.command.build_ext import build_ext
from distutils.version import LooseVersion
PACKAGE_NAME = "EigenNN"
def load_test_suite():
test_loader = unittest.TestLoader()
test_suite = test_loader.discover('tests', pattern='test_*.py')
return test_suite
class CMakeExtension(Extension):
def __init__(self, name, sourcedir=''):
Extension.__init__(self, name, sources=[])
self.sourcedir = os.path.abspath(sourcedir)
class CMakeBuild(build_ext):
def run(self):
try:
out = subprocess.check_output(['cmake', '--version'])
except OSError:
raise RuntimeError("CMake must be installed to build the following extensions: " +
", ".join(e.name for e in self.extensions))
if platform.system() == "Windows":
cmake_version = LooseVersion(re.search(r'version\s*([\d.]+)', out.decode()).group(1))
if cmake_version < '3.1.0':
raise RuntimeError("CMake >= 3.1.0 is required on Windows")
for ext in self.extensions:
self.build_extension(ext)
def build_extension(self, ext):
extdir = os.path.abspath(os.path.dirname(self.get_ext_fullpath(ext.name)))
cmake_args = ['-DCMAKE_LIBRARY_OUTPUT_DIRECTORY=' + extdir,
'-DPYTHON_EXECUTABLE=' + sys.executable]
cfg = 'Debug' if self.debug else 'Release'
build_args = ['--config', cfg]
if platform.system() == "Windows":
cmake_args += ['-DCMAKE_LIBRARY_OUTPUT_DIRECTORY_{}={}'.format(cfg.upper(), extdir)]
if sys.maxsize > 2**32:
cmake_args += ['-A', 'x64']
build_args += ['--', '/m']
else:
cmake_args += ['-DCMAKE_BUILD_TYPE=' + cfg]
build_args += ['--', '-j2']
env = os.environ.copy()
env['CXXFLAGS'] = '{} -DVERSION_INFO=\\"{}\\"'.format(env.get('CXXFLAGS', ''),
self.distribution.get_version())
if not os.path.exists(self.build_temp):
os.makedirs(self.build_temp)
subprocess.check_call(['cmake', ext.sourcedir] + cmake_args, cwd=self.build_temp, env=env)
subprocess.check_call(['cmake', '--build', '.'] + build_args, cwd=self.build_temp)
if __name__ == '__main__':
setup(
name=PACKAGE_NAME,
version='1.0.0',
author='Bendik Samseth',
author_email='b.samseth@gmail.com',
description='Neural Network for Quantum Variational Monte Carlo',
long_description='',
ext_modules=[CMakeExtension(PACKAGE_NAME)],
cmdclass=dict(build_ext=CMakeBuild),
zip_safe=False,
test_suite='setup.load_test_suite',
tests_require=['numpy', 'autograd'],
)
|
[
"b.samseth@gmail.com"
] |
b.samseth@gmail.com
|
0e38159c0248dd405ba7f688dd0d74309c7b49c2
|
b7c054cf5ef54a039253e412c526e3352c571793
|
/exam/exception/__init__.py
|
0a5ffdd4d1ae2de657048608aa42decb5e9b7e56
|
[] |
no_license
|
choisoonsin/python3
|
f587593edb19ea0e125541529e32afeeee54c9c3
|
d417e2495bad88206f7b1ddde3400d30b901b369
|
refs/heads/master
| 2023-09-01T13:55:11.570100
| 2023-07-26T08:32:59
| 2023-07-26T08:32:59
| 161,561,831
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 30
|
py
|
f = open("C://test.txt" , 'r')
|
[
"cyh0214@gmail.com"
] |
cyh0214@gmail.com
|
65440b1030870cfe31732548952ceda8f36b86fa
|
090af6997bac1dd6e27b4684be41d70ede0e8bb3
|
/Week 5/deploy.py
|
2f5384d214a7ca472966c438e9261617426dee2d
|
[] |
no_license
|
amshumannsingh/Data_Glacier1
|
79cf55822403505484eb3544830d72ffa47ebfbc
|
70c478950029e89913f7e173e43bb4c6f997939a
|
refs/heads/main
| 2023-07-24T10:19:12.733018
| 2021-09-02T15:21:54
| 2021-09-02T15:21:54
| 403,010,613
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 877
|
py
|
from flask import Flask, jsonify, request
import pickle
import pandas as pd
app= Flask(__name__)
@app.route('/', methods = ['GET', 'POST'])
def home():
if (request.method == 'GET'):
data = "hello world"
return jsonify({'data': data})
@app.route('/predict/')
def price_predict():
model = pickle.load(open('model.pickle','rb'))
income = request.args.get('income')
house_age = request.args.get('house_age')
rooms = request.args.get('rooms')
bedrooms = request.args.get('bedrooms')
population = request.args.get('population')
test_df=pd.DataFrame({'Income':[income], 'House Age':[house_age], 'Rooms':[rooms], 'Bedrooms':[bedrooms], 'Population':[population] })
pred_price = model.predict(test_df)
return jsonify({'House Price': str(pred_price)})
if __name__ == '__main__':
app.run(debug = True)
|
[
"amshumannsingh@gmail.com"
] |
amshumannsingh@gmail.com
|
5c3a0b5ac9e4d361f4a28de00aacc7966a4d3eb8
|
95b613359c8976a17d2c10cfbdbf93fa500377b1
|
/Django_Demo/venv/bin/django-admin.py
|
7e5bba8bb3febd3834ba05385272652bfe3abc07
|
[] |
no_license
|
kaiyuean/Presentation_4
|
279b18e2e4c0729bb3e138a0c9cea7a452b73843
|
dfb796d70d8bc992b8a980f85aa0496c325a11b6
|
refs/heads/master
| 2021-01-10T09:37:20.179804
| 2015-12-14T03:02:10
| 2015-12-14T03:02:10
| 47,285,694
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 187
|
py
|
#!/Users/kaiyuean/Desktop/Object-Oriented/project/project_folder/venv/bin/python
from django.core import management
if __name__ == "__main__":
management.execute_from_command_line()
|
[
"kaiyue.an@colorado.edu"
] |
kaiyue.an@colorado.edu
|
3cde92d4a724f9c48cca6451694fa1d40b659e5b
|
d787daff01449224dd6834ac38c5b6d5d576c906
|
/venv/bin/easy_install-3.6
|
55331cda476121e3907c095c601601c254765b89
|
[] |
no_license
|
forza111/A_Bytte_of_Python
|
cc51e4176cebbe363d08e4fdb061165ce18b5e5a
|
d0aabfc6c75057846b546653e472c1e3987796ff
|
refs/heads/master
| 2022-12-15T08:35:36.647326
| 2020-09-07T19:06:48
| 2020-09-07T19:06:48
| 293,608,087
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 457
|
6
|
#!/home/nikita/PycharmProjects/A_Byte_of_Python/venv/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==40.8.0','console_scripts','easy_install-3.6'
__requires__ = 'setuptools==40.8.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==40.8.0', 'console_scripts', 'easy_install-3.6')()
)
|
[
"nikitos.ionkin@yandex.ru"
] |
nikitos.ionkin@yandex.ru
|
f1fad69576b6c0a8ba61f241efef3b08efe23954
|
fc736e3cbc0bd960f2c7c4c275f631a6f484cd76
|
/raw/backup/dssm_train.py
|
fcb3a3ff16aebee206d502039ec30973dcf86263
|
[] |
no_license
|
xubaochuan/mm_intern
|
b0779e164cc4f4472ee09d9790bfa431bc9fa2fa
|
bed961f4f5426a481016795464c375aad454ef14
|
refs/heads/master
| 2021-07-06T03:44:19.701952
| 2017-09-27T02:51:04
| 2017-09-27T02:51:04
| 103,612,057
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,599
|
py
|
import random
import numpy as np
import tensorflow as tf
import text_reformer
import sys
reload(sys)
sys.setdefaultencoding('utf8')
flags = tf.app.flags
FLAGS = flags.FLAGS
flags.DEFINE_string('summaries_dir', '/tmp/dssm-400-120-relu', 'Summaries directory')
flags.DEFINE_integer('embedding_dim', 200+200+500, 'the dim of the fusion of multi word2vec model')
flags.DEFINE_float('learning_rate', 0.1, 'Initial learning rate')
flags.DEFINE_integer('max_epoch', 50000, 'max train steps')
flags.DEFINE_integer('epoch_steps', 20, 'epoch step')
flags.DEFINE_integer('sentence_length', 20, 'sentence_max_word')
NEG = 10
BS = 20
L1_N = 400
L2_N = 120
query_train_data = None
doc_train_data = None
query_test_data = None
doc_test_data = None
vocab_path = 'model/vocab.txt'
vocab = {}
def variable_summaries(var, name):
with tf.name_scope('summaries'):
mean = tf.reduce_mean(var)
tf.summary.scalar('mean/'+name, mean)
with tf.name_scope('stddev'):
stddev = tf.sqrt(tf.reduce_sum(tf.squre(var - mean)))
tf.summary.scalar('stddev/' + name, stddev)
tf.summary.scalar('max/' + name, tf.reduce_max(var))
tf.summary.scalar('min/' + name, tf.reduce_min(var))
tf.summary.histogram(name, var)
def load_vocab():
global vocab_path, vocab
fr = open(vocab_path)
index = 0
for row in fr.readlines():
word = row.strip().decode('utf8')
if word == '':
continue
vocab[word] = index
index += 1
fr.close()
def read_onehot_file(filepath):
data_set = []
fr = open(filepath)
for row in fr.readlines():
temp = []
row = row.strip()
if row == '':
data_set.append(temp)
continue
else:
array = row.split(' ')
onehot_vec = []
for i in array:
onehot_vec.append(int(i))
temp.append(onehot_vec)
fr.close()
return data_set
def get_onehot_vec(filepath):
data_set = []
fr = open(filepath)
for row in fr.readlines():
temp = []
row = row.strip().decode('utf8')
if row == '':
continue
for word in row:
if word in vocab:
temp.append(vocab[word])
if len(temp) > FLAGS.sentence_length:
temp = temp[:FLAGS.sentence_length]
elif len(temp) < FLAGS.sentence_length:
temp = temp + [0]*(FLAGS.sentence_length-len(temp))
data_set.append(temp)
fr.close()
return data_set
def load_train_data(query_path, doc_path):
global query_train_data, doc_train_data
query_train_data = np.asarray(get_onehot_vec(query_path), dtype=np.float32)[:400,:]
doc_train_data = np.asarray(get_onehot_vec(doc_path), dtype=np.float32)[:400,:]
assert query_train_data.shape[0] == doc_train_data.shape[0]
def load_test_data(query_path, doc_path):
global query_test_data, doc_test_data
query_test_data = np.asarray(get_onehot_vec(query_path), dtype=np.float32)[400:,:]
doc_test_data = np.asarray(get_onehot_vec(doc_path), dtype=np.float32)[400:,:]
assert query_test_data.shape[0] == doc_test_data.shape[0]
def get_batch_data(step):
global query_train_data, doc_train_data
start = step * BS
end = step * BS + BS
return query_train_data[start:end, :], doc_train_data[start:end, :]
def load_w2v(path, expectDim):
fp = open(path, "r")
line = fp.readline().strip()
ss = line.split(" ")
total = int(ss[0])
dim = int(ss[1])
assert (dim == expectDim)
ws = []
for t in range(total):
line = fp.readline().strip()
ss = line.split(" ")
assert (len(ss) == dim)
vals = []
for i in range(0, dim):
fv = float(ss[i])
vals.append(fv)
ws.append(vals)
fp.close()
return np.asarray(ws, dtype=np.float32)
def conv2d(name, input, w, b):
return tf.nn.relu(tf.nn.bias_add(tf.nn.conv2d(input, w, strides=[1, 1, 1, 1], padding='VALID'), b), name=name)
def max_pool(name, input, k):
return tf.nn.max_pool(input, ksize=[1, k, 1, 1], strides=[1, k, 1, 1], padding='VALID', name=name)
def full_max_pool(name, input, perm):
conv1 = tf.transpose(input, perm=perm)
values = tf.nn.top_k(conv1, 1, name=name).values
conv2 = tf.transpose(values, perm=perm)
return conv2
def norm(name, input, lsize=4):
return tf.nn.local_response_normalization(input, lsize, bias=1.0, alpha=0.001 / 9.0, beta=0.75, name=name)
w2v = load_w2v('model/vec.txt', 900)
load_vocab()
print len(vocab), w2v.shape[0]
assert len(vocab) == w2v.shape[0]
load_train_data('train_data/train_query.txt','train_data/train_doc.txt')
load_test_data('train_data/train_query.txt','train_data/train_doc.txt')
with tf.name_scope('input'):
query_batch = tf.placeholder(tf.int32, shape=[BS, FLAGS.sentence_length], name='QueryBatch')
doc_batch = tf.placeholder(tf.int32, shape=[BS, FLAGS.sentence_length], name='DocBatch')
with tf.name_scope('w2v'):
words = tf.Variable(w2v, dtype=tf.float32, name='words')
query_words = tf.nn.embedding_lookup(words, query_batch)
doc_words = tf.nn.embedding_lookup(words, doc_batch)
query_words_out = tf.expand_dims(query_words, -1)
doc_words_out = tf.expand_dims(doc_words, -1)
with tf.name_scope('convolution_layer'):
#conv kernel = 2
wc1 = tf.Variable(tf.random_normal([2, FLAGS.embedding_dim, 1, 64]), 'wc1')
bc1 = tf.Variable(tf.random_normal([64]), 'bc1')
query_conv1 = conv2d('conv1', query_words_out, wc1, bc1)
query_pool1 = full_max_pool('pool1', query_conv1, [0, 3, 2, 1])
doc_conv1 = conv2d('conv1', doc_words_out, wc1, bc1)
doc_pool1 = full_max_pool('pool1', doc_conv1, [0, 3, 2,1])
#conv kernel = 3
wc2 = tf.Variable(tf.random_normal([3, FLAGS.embedding_dim, 1, 64]), 'wc2')
bc2 = tf.Variable(tf.random_normal([64]), 'bc2')
query_conv2 = conv2d('conv2', query_words_out, wc2, bc2)
query_pool2 = full_max_pool('pool2', query_conv2, [0, 3, 2, 1])
doc_conv2 = conv2d('conv2', doc_words_out, wc2, bc2)
doc_pool2 = full_max_pool('pool2', doc_conv2, [0, 3, 2, 1])
#conv kernel = 4
wc3 = tf.Variable(tf.random_normal([4, FLAGS.embedding_dim, 1, 64]), 'wc3')
bc3 = tf.Variable(tf.random_normal([64]), 'bc3')
query_conv3 = conv2d('conv3', query_words_out, wc3, bc3)
query_pool3 = full_max_pool('pool3', query_conv3, [0, 3, 2, 1])
doc_conv3 = conv2d('conv3', query_words_out, wc3, bc3)
doc_pool3 = full_max_pool('pool3', doc_conv3, [0, 3, 2, 1])
query_pool_merge = tf.concat([query_pool1, query_pool2, query_pool3], 3)
query_pool_norm = tf.reshape(norm('conv_norm', query_pool_merge), [BS, 64*3])
doc_pool_merge = tf.concat([doc_pool1, doc_pool2, doc_pool3], 3)
doc_pool_norm = tf.reshape(norm('conv_norm', doc_pool_merge), [BS, 64*3])
with tf.name_scope('dense_layer_1'):
l1_par_range = np.sqrt(6.0 / (64*3 + L1_N))
wd1 = tf.Variable(tf.random_uniform([64*3, L1_N], -l1_par_range, l1_par_range))
bd1 = tf.Variable(tf.random_uniform([L1_N], -l1_par_range, l1_par_range))
query_l1 = tf.matmul(query_pool_norm, wd1) + bd1
doc_l1 = tf.matmul(doc_pool_norm, wd1) + bd1
query_l1_out = tf.nn.l2_normalize(tf.nn.relu(query_l1), 1)
doc_l1_out = tf.nn.l2_normalize(tf.nn.relu(doc_l1), 1)
with tf.name_scope('dense_layer_2'):
l2_par_range = np.sqrt(6.0 / (L1_N + L2_N))
wd2 = tf.Variable(tf.random_uniform([L1_N, L2_N], -l2_par_range, l2_par_range))
bd2 = tf.Variable(tf.random_uniform([L2_N], -l2_par_range, l2_par_range))
query_l2 = tf.matmul(query_l1_out, wd2) + bd2
doc_l2 = tf.matmul(doc_l1_out, wd2) + bd2
query_l2_out = tf.nn.l2_normalize(tf.nn.relu(query_l2), 1)
doc_l2_out = tf.nn.l2_normalize(tf.nn.relu(doc_l2), 1)
with tf.name_scope('negative_sampling'):
temp = tf.tile(doc_l2_out, [1,1])
doc_y = tf.tile(doc_l2_out, [1,1])
for i in range(NEG):
rand = int((random.random() + i) * BS / NEG)
doc_y = tf.concat([doc_y, tf.slice(temp, [rand, 0], [BS - rand, -1]), tf.slice(temp, [0, 0], [rand, -1])], 0)
query_x = tf.tile(query_l2_out, [NEG + 1, 1])
with tf.name_scope('hidden_layer'):
hl1_par_range = np.sqrt(6.0 / (120*2 + 300))
wh1 = tf.Variable(tf.random_uniform([120*2, 300], -hl1_par_range, hl1_par_range), 'wh1')
bh1 = tf.Variable(tf.random_uniform([300], -hl1_par_range, hl1_par_range), 'bh1')
hl = tf.matmul(tf.concat([query_x, doc_y], 1), wh1) + bh1
hl_out = tf.nn.l2_normalize(tf.nn.relu(hl), 1)
with tf.name_scope('mlp_out'):
out_par_range = np.sqrt(6.0 / (300 + 1))
wo1 = tf.Variable(tf.random_uniform([300,1], -out_par_range, out_par_range), 'wo1')
bo1 = tf.Variable(tf.random_uniform([1], -out_par_range, out_par_range), 'bo1')
out_raw = tf.matmul(hl_out, wo1) + bo1
out = tf.transpose(tf.reshape(tf.transpose(out_raw), [NEG + 1, BS])) * 20
out_norm = tf.nn.l2_normalize(out, 1)
with tf.name_scope('loss'):
np_y = np.zeros([BS, NEG+1])
np_y[:,0] += 1
y_ = tf.constant(np_y, dtype=tf.int32)
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=out_norm, labels=y_))
pred = tf.equal(tf.argmax(out_norm, 1), tf.argmax(y_, 1))
accuracy = tf.reduce_mean(tf.cast(pred, tf.float32))
# prob = tf.nn.softmax(out_norm)
# hit_prob = tf.slice(prob, [0,0], [-1,1])
# loss = -tf.reduce_sum(tf.log(hit_prob)) / BS
with tf.name_scope('train'):
train_step = tf.train.GradientDescentOptimizer(FLAGS.learning_rate).minimize(loss)
merged = tf.summary.merge_all()
config = tf.ConfigProto()
config.gpu_options.allow_growth = False
saver = tf.train.Saver()
with tf.Session(config=config) as sess:
sess.run(tf.global_variables_initializer())
print(sess.run(tf.shape(query_x)))
exit()
train_writer = tf.summary.FileWriter(FLAGS.summaries_dir + '/train', sess.graph)
max_loss = float('INF')
for epoch in range(FLAGS.max_epoch):
for step in range(FLAGS.epoch_steps):
query_batch_data, doc_batch_data = get_batch_data(step)
acc = sess.run(accuracy, feed_dict={query_batch: query_batch_data, doc_batch: doc_batch_data})
sess.run(train_step, feed_dict={query_batch: query_batch_data, doc_batch: doc_batch_data})
ls = sess.run(loss, feed_dict={query_batch: query_batch_data, doc_batch: doc_batch_data})
print('Epoch %d, Step %d, loss: %f accuracy: %f' % (epoch+1, step+1, ls, acc))
# if ls < max_loss:
# saver_path = saver.save(sess, "model/dssm.ckpt")
|
[
"xu.baochuan@immomo.com"
] |
xu.baochuan@immomo.com
|
f2dbbabc6a8cac5070e8a12ab74f1a72f711378a
|
d440e9a427c3c594cd43fcdbeedcd36b6cb38b9d
|
/random_gen.py
|
15b2e0e435e68d955c733ac35ec7e812dc526425
|
[] |
no_license
|
omariba/Python_Stff
|
7e7d2c4ca7dab0ac50ca75660fd7db9386f64c52
|
3e55fb1b7cb45f00fed2bdf682fc8fadaac47075
|
refs/heads/master
| 2021-05-24T01:36:48.068838
| 2020-06-21T09:43:05
| 2020-06-21T09:43:05
| 83,025,001
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,023
|
py
|
import random
import os
import time
#generate once
#name = ""
#li = []
#while name != "00":
# name = raw_input("Items: ")
# if name == "00":
# pass
# else:
# li.append(name)
#li = ['Gitonga','Mandela','Kevin','Kimani','Cetric','Robert']
#def gen():
# random.shuffle(li)
# for i in li:
# print i
#gen()
#randomise from a file
def gen():
try:
import docx
except:
print "Please type the command 'pip install docx' to continue"
file_name = raw_input("Enter the file name to randomise words from: ")
if file_name.endswith("docx"):
acc = open(file_name,"r")
file_obj = acc.xreadlines()
open_docx = docx.opendocx(file_obj)
data = docx.getdocumenttext(open_docx)
random.shuffle(data)
for i in data:
if i == "\n":
pass
else:
print i
time.sleep(0.5)
else:
acc = open(file_name,"r")
li = acc.readlines()
acc.close()
random.shuffle(li)
for i in li:
if i == "\n":
pass
else:
print i
time.sleep(0.5)
gen()
#path = os.getcwd()
#file_in = path + file_name
|
[
"omariba5@gmail.com"
] |
omariba5@gmail.com
|
72bbb4fa95a412316afbdf8e94c4b6727eb7525a
|
59f5fe9b183a576bd31fb8db667d9e6fb3dd4b85
|
/v12_restricted_params/analyzers/analyze_hm_prevalence_by_density_bin.py
|
a146316825b58b001ca359c4a192e13b87c36fae
|
[] |
no_license
|
JoRussell-IDM/malaria-hm
|
639c4c958622581bd7dc24702e833805c3a18b63
|
0833ef444b74abe291c150773e8f3707532878ed
|
refs/heads/master
| 2022-11-24T22:12:06.711560
| 2020-07-27T20:04:23
| 2020-07-27T20:04:23
| 197,273,629
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,882
|
py
|
import os, re
import sqlite3
import pandas as pd
import numpy as np
import seaborn as sns
#Plotting
import matplotlib.pyplot as plt
from scipy.special import gammaln
from simtools.Analysis.BaseAnalyzers import BaseAnalyzer
from simtools.Analysis.AnalyzeManager import AnalyzeManager
from calibtool.analyzers.Helpers import season_channel_age_density_csv_to_pandas
from add_inputEIR_framework.reference_data.Garki_population_summary import *
from calibtool.LL_calculators import dirichlet_multinomial_pandas
def get_reference_data(self):
dir_path = os.path.abspath(os.path.join(os.path.dirname( __file__ ), '..', 'reference data'))
dfFileName = os.path.join(dir_path, 'Garki_df.csv')
df = pd.read_csv(dfFileName)
ref_df = df[df['Village'] == self.metadata['village']]
density_bins = self.metadata['density_bins']
density_bin_edges = self.metadata['density_bin_edges']
uL_per_field = 0.5 / 200.0 # from Garki PDF - page 111 - 0.5 uL per 200 views
pfprBins = 1 - np.exp(-np.asarray(density_bins) * uL_per_field)
all_surveys_ref_df = pd.DataFrame(columns=['month','density_bin', 'count'])
df = ref_df
df['asexual_parasites'] = [x for x in df['Parasitemia']]
df['density_bin'] = pd.cut(df['Parasitemia'], bins=pfprBins, labels=density_bin_edges)
df['gametocytes'] = [x for x in df['Gametocytemia']]
survey_summary_df = pd.DataFrame(columns=['month', 'density_bin', 'count'])
for index, density_subset in df.groupby(['density_bin', 'Seasons']):
subset_df = pd.DataFrame({
'month': [index[1]],
'density_bin': [index[0]],
'count': [max(density_subset.shape[0], 0)]
})
survey_summary_df = pd.concat([survey_summary_df, subset_df])
all_surveys_ref_df = pd.concat([all_surveys_ref_df, survey_summary_df])
all_surveys_ref_df = all_surveys_ref_df[all_surveys_ref_df['month'].isin(['DH2', 'W2', 'DC2'])]
return all_surveys_ref_df
class Survey_Prevalence_by_Density_Analyzer(BaseAnalyzer):
def __init__(self,dates,years,verbose_plotting):
super().__init__(filenames=['output/MalariaSurveyJSONAnalyzer_Day_%d_0.json' %x for x in dates])
self.years = years
self.dates = dates
self.verbose_plotting = verbose_plotting
self.metadata = {
'density_bins': [0, 50, 200, 500, np.inf], # (, 0] (0, 50] ... (50000, ]
'density_bin_edges':['-1', '0', '50', '200', '500'],
'age_bins': [1, 4, 8, 18, 28, 43, np.inf], # (, 5] (5, 15] (15, ],
'age_bin_labels':['<1', '1-4', '4-8', '8-18', '18-28', '28-43', '>43'],
'survey_days':[365 * (self.years - 1) + x for x in np.arange(0, 365, 30)],
'seasons': ['DC2', 'DH2', 'W2'],
'seasons_by_month': {
'Apr': 'DH2',
'June/Aug': 'W2',
'Dec/Jan': 'DC2'
},
'village': 'Sugungum'
}
def select_simulation_data(self, data, simulation):
months = [
'DC2',
'Feb',
'Mar',
'Apr',
'DH2',
'Jun',
'Jul',
'Aug',
'W2',
'Oct',
'Nov',
'Dec'
]
density_bins = self.metadata['density_bins']
density_bin_edges = self.metadata['density_bin_edges']
survey_days = self.metadata['survey_days']
#loop through survey_report days
all_surveys_df = pd.DataFrame(columns=['survey','age_bin','density_bin','count'])
for day in range(len(survey_days)):
survey_data = data[self.filenames[day]]
numeric_day = int(self.filenames[day][-10:-7])
#loop through age_bins and then calculate the analyzer metrics across individuals
#individuals true ages will betheir initial birthday plus time until report!
df = pd.DataFrame([x for x in survey_data['patient_array']])
df['asexual_parasites'] = [x[0] for x in df['asexual_parasites']]
df['density_bin'] = pd.cut(df['asexual_parasites'],bins = density_bins,labels = density_bin_edges)
df['gametocytes'] = [x[0] for x in df['gametocytes']]
df['asexual_positive'] = [bool(x>100) for x in df['asexual_parasites']]
survey_summary_df = pd.DataFrame(columns=['survey','month','density_bin', 'count'])
for index, density_subset in df.groupby(['density_bin']):
subset_df = pd.DataFrame({
'survey':[survey_days[day]],
'month': [months[day % 12]],
'density_bin':[index],
'count': [max(density_subset.shape[0],0)]
})
survey_summary_df = pd.concat([survey_summary_df,subset_df])
all_surveys_df = pd.concat([all_surveys_df,survey_summary_df])
return {'sim_df': all_surveys_df,
'years': self.years,
'parasitemia_bins': density_bins,
'survey_days':survey_days
}
def finalize(self, all_data):
# first extract the age bins from the reference data structure
cmap = [
'#3498DB',
'#1ABC9C',
'#16A085',
'#27AE60',
'#2ECC71',
'#F1C40F',
'#F39C12',
'#E67E22',
'#D35400',
'#C0392B',
'#8E44AD',
'#2980B9']
months = [
'Jan',
'Feb',
'Mar',
'Apr',
'May',
'Jun',
'Jul',
'Aug',
'Sep',
'Oct',
'Nov',
'Dec'
]
ref_data = get_reference_data(self)
#columns that I want are survey/month, age_bin, density_bin, count
#want to plot the reference data on the same axes as the sim binned data
#want to calc a distance metric between sim and ref (dirch multi?)
results = pd.DataFrame()
for sim, data in all_data.items():
sim_id = sim.id
sim.tags.get("__sample_index__")
sample = sim.tags.get("__sample_index__")
counts_by_density_bin = data['sim_df']
counts_by_density_bin.reset_index(drop=True, inplace=True)
counts_by_density_bin = counts_by_density_bin[counts_by_density_bin.month.isin(self.metadata['seasons'])]
#remove the first January from sim data, as data are reflected from the subsequent Jan in DC2
counts_by_density_bin.drop(
counts_by_density_bin[counts_by_density_bin['survey'] == 730].index, inplace=True)
counts_by_density_bin.drop(columns='survey', inplace=True)
df2 = pd.merge(ref_data, counts_by_density_bin, on=['density_bin', 'month'])
df2['ref_bin_pop'] = df2.groupby(['month']).transform(lambda x: x.sum())['count_x']
df2['sim_bin_pop'] = df2.groupby(['month']).transform(lambda x: x.sum())['count_y']
df2['ref'] = df2['count_x']/df2['ref_bin_pop']
df2['sim'] = df2['count_y'] / df2['sim_bin_pop']
df2['diff'] = df2['sim']-df2['ref']
norm = np.linalg.norm(df2['diff'],ord = 1)
sub_results = pd.DataFrame({'sample': sample,
'sim_id': sim_id,
'value': [norm]})
results = pd.concat([results, sub_results])
results.to_csv(os.path.join('..', 'iter0', 'analyzer_results.csv'))
######
if __name__ == '__main__':
years = 3
survey_days = [365 * (years - 1) + x for x in np.arange(0, 365, 30)]
am = AnalyzeManager('527ff53a-1d33-ea11-a2c3-c4346bcb1551',analyzers=Survey_Prevalence_by_Density_Analyzer(dates = survey_days,years=years,verbose_plotting=True))
am.analyze()
|
[
"jorussell@idmod.org"
] |
jorussell@idmod.org
|
db2dbb711547fe8932e6b1384e8f0ddf20b7b6e2
|
9df2fb0bc59ab44f026b0a2f5ef50c72b2fb2ceb
|
/sdk/containerservice/azure-mgmt-containerservice/azure/mgmt/containerservice/v2022_10_02_preview/operations/_managed_cluster_snapshots_operations.py
|
780040b2d4d8d43f5d366c8651b8fa732b2c35c4
|
[
"MIT",
"LGPL-2.1-or-later",
"LicenseRef-scancode-generic-cla"
] |
permissive
|
openapi-env-test/azure-sdk-for-python
|
b334a2b65eeabcf9b7673879a621abb9be43b0f6
|
f61090e96094cfd4f43650be1a53425736bd8985
|
refs/heads/main
| 2023-08-30T14:22:14.300080
| 2023-06-08T02:53:04
| 2023-06-08T02:53:04
| 222,384,897
| 1
| 0
|
MIT
| 2023-09-08T08:38:48
| 2019-11-18T07:09:24
|
Python
|
UTF-8
|
Python
| false
| false
| 37,905
|
py
|
# pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from io import IOBase
from typing import Any, Callable, Dict, IO, Iterable, Optional, TypeVar, Union, overload
import urllib.parse
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
from ..._serialization import Serializer
from .._vendor import _convert_request, _format_url_section
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_list_request(subscription_id: str, **kwargs: Any) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2022-10-02-preview"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url", "/subscriptions/{subscriptionId}/providers/Microsoft.ContainerService/managedclustersnapshots"
) # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
}
_url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
def build_list_by_resource_group_request(resource_group_name: str, subscription_id: str, **kwargs: Any) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2022-10-02-preview"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedclustersnapshots",
) # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
"resourceGroupName": _SERIALIZER.url(
"resource_group_name", resource_group_name, "str", max_length=90, min_length=1
),
}
_url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
def build_get_request(resource_group_name: str, resource_name: str, subscription_id: str, **kwargs: Any) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2022-10-02-preview"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedclustersnapshots/{resourceName}",
) # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
"resourceGroupName": _SERIALIZER.url(
"resource_group_name", resource_group_name, "str", max_length=90, min_length=1
),
"resourceName": _SERIALIZER.url(
"resource_name",
resource_name,
"str",
max_length=63,
min_length=1,
pattern=r"^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$",
),
}
_url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
def build_create_or_update_request(
resource_group_name: str, resource_name: str, subscription_id: str, **kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2022-10-02-preview"))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedclustersnapshots/{resourceName}",
) # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
"resourceGroupName": _SERIALIZER.url(
"resource_group_name", resource_group_name, "str", max_length=90, min_length=1
),
"resourceName": _SERIALIZER.url(
"resource_name",
resource_name,
"str",
max_length=63,
min_length=1,
pattern=r"^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$",
),
}
_url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
if content_type is not None:
_headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str")
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs)
def build_update_tags_request(
resource_group_name: str, resource_name: str, subscription_id: str, **kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2022-10-02-preview"))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedclustersnapshots/{resourceName}",
) # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
"resourceGroupName": _SERIALIZER.url(
"resource_group_name", resource_group_name, "str", max_length=90, min_length=1
),
"resourceName": _SERIALIZER.url(
"resource_name",
resource_name,
"str",
max_length=63,
min_length=1,
pattern=r"^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$",
),
}
_url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
if content_type is not None:
_headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str")
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="PATCH", url=_url, params=_params, headers=_headers, **kwargs)
def build_delete_request(
resource_group_name: str, resource_name: str, subscription_id: str, **kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2022-10-02-preview"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedclustersnapshots/{resourceName}",
) # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
"resourceGroupName": _SERIALIZER.url(
"resource_group_name", resource_group_name, "str", max_length=90, min_length=1
),
"resourceName": _SERIALIZER.url(
"resource_name",
resource_name,
"str",
max_length=63,
min_length=1,
pattern=r"^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$",
),
}
_url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="DELETE", url=_url, params=_params, headers=_headers, **kwargs)
class ManagedClusterSnapshotsOperations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.containerservice.v2022_10_02_preview.ContainerServiceClient`'s
:attr:`managed_cluster_snapshots` attribute.
"""
models = _models
def __init__(self, *args, **kwargs):
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@distributed_trace
def list(self, **kwargs: Any) -> Iterable["_models.ManagedClusterSnapshot"]:
"""Gets a list of managed cluster snapshots in the specified subscription.
Gets a list of managed cluster snapshots in the specified subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ManagedClusterSnapshot or the result of
cls(response)
:rtype:
~azure.core.paging.ItemPaged[~azure.mgmt.containerservice.v2022_10_02_preview.models.ManagedClusterSnapshot]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2022-10-02-preview"))
cls: ClsType[_models.ManagedClusterSnapshotListResult] = kwargs.pop("cls", None)
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("ManagedClusterSnapshotListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem) # type: ignore
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(get_next, extract_data)
list.metadata = {
"url": "/subscriptions/{subscriptionId}/providers/Microsoft.ContainerService/managedclustersnapshots"
}
@distributed_trace
def list_by_resource_group(
self, resource_group_name: str, **kwargs: Any
) -> Iterable["_models.ManagedClusterSnapshot"]:
"""Lists managed cluster snapshots in the specified subscription and resource group.
Lists managed cluster snapshots in the specified subscription and resource group.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ManagedClusterSnapshot or the result of
cls(response)
:rtype:
~azure.core.paging.ItemPaged[~azure.mgmt.containerservice.v2022_10_02_preview.models.ManagedClusterSnapshot]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2022-10-02-preview"))
cls: ClsType[_models.ManagedClusterSnapshotListResult] = kwargs.pop("cls", None)
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_resource_group_request(
resource_group_name=resource_group_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.list_by_resource_group.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("ManagedClusterSnapshotListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem) # type: ignore
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(get_next, extract_data)
list_by_resource_group.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedclustersnapshots"
}
@distributed_trace
def get(self, resource_group_name: str, resource_name: str, **kwargs: Any) -> _models.ManagedClusterSnapshot:
"""Gets a managed cluster snapshot.
Gets a managed cluster snapshot.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource. Required.
:type resource_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ManagedClusterSnapshot or the result of cls(response)
:rtype: ~azure.mgmt.containerservice.v2022_10_02_preview.models.ManagedClusterSnapshot
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2022-10-02-preview"))
cls: ClsType[_models.ManagedClusterSnapshot] = kwargs.pop("cls", None)
request = build_get_request(
resource_group_name=resource_group_name,
resource_name=resource_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.get.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize("ManagedClusterSnapshot", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedclustersnapshots/{resourceName}"
}
@overload
def create_or_update(
self,
resource_group_name: str,
resource_name: str,
parameters: _models.ManagedClusterSnapshot,
*,
content_type: str = "application/json",
**kwargs: Any
) -> _models.ManagedClusterSnapshot:
"""Creates or updates a managed cluster snapshot.
Creates or updates a managed cluster snapshot.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource. Required.
:type resource_name: str
:param parameters: The managed cluster snapshot to create or update. Required.
:type parameters:
~azure.mgmt.containerservice.v2022_10_02_preview.models.ManagedClusterSnapshot
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ManagedClusterSnapshot or the result of cls(response)
:rtype: ~azure.mgmt.containerservice.v2022_10_02_preview.models.ManagedClusterSnapshot
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
def create_or_update(
self,
resource_group_name: str,
resource_name: str,
parameters: IO,
*,
content_type: str = "application/json",
**kwargs: Any
) -> _models.ManagedClusterSnapshot:
"""Creates or updates a managed cluster snapshot.
Creates or updates a managed cluster snapshot.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource. Required.
:type resource_name: str
:param parameters: The managed cluster snapshot to create or update. Required.
:type parameters: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ManagedClusterSnapshot or the result of cls(response)
:rtype: ~azure.mgmt.containerservice.v2022_10_02_preview.models.ManagedClusterSnapshot
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace
def create_or_update(
self,
resource_group_name: str,
resource_name: str,
parameters: Union[_models.ManagedClusterSnapshot, IO],
**kwargs: Any
) -> _models.ManagedClusterSnapshot:
"""Creates or updates a managed cluster snapshot.
Creates or updates a managed cluster snapshot.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource. Required.
:type resource_name: str
:param parameters: The managed cluster snapshot to create or update. Is either a
ManagedClusterSnapshot type or a IO type. Required.
:type parameters:
~azure.mgmt.containerservice.v2022_10_02_preview.models.ManagedClusterSnapshot or IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ManagedClusterSnapshot or the result of cls(response)
:rtype: ~azure.mgmt.containerservice.v2022_10_02_preview.models.ManagedClusterSnapshot
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2022-10-02-preview"))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
cls: ClsType[_models.ManagedClusterSnapshot] = kwargs.pop("cls", None)
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(parameters, (IOBase, bytes)):
_content = parameters
else:
_json = self._serialize.body(parameters, "ManagedClusterSnapshot")
request = build_create_or_update_request(
resource_group_name=resource_group_name,
resource_name=resource_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self.create_or_update.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize("ManagedClusterSnapshot", pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize("ManagedClusterSnapshot", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {}) # type: ignore
return deserialized # type: ignore
create_or_update.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedclustersnapshots/{resourceName}"
}
@overload
def update_tags(
self,
resource_group_name: str,
resource_name: str,
parameters: _models.TagsObject,
*,
content_type: str = "application/json",
**kwargs: Any
) -> _models.ManagedClusterSnapshot:
"""Updates tags on a managed cluster snapshot.
Updates tags on a managed cluster snapshot.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource. Required.
:type resource_name: str
:param parameters: Parameters supplied to the Update managed cluster snapshot Tags operation.
Required.
:type parameters: ~azure.mgmt.containerservice.v2022_10_02_preview.models.TagsObject
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ManagedClusterSnapshot or the result of cls(response)
:rtype: ~azure.mgmt.containerservice.v2022_10_02_preview.models.ManagedClusterSnapshot
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
def update_tags(
self,
resource_group_name: str,
resource_name: str,
parameters: IO,
*,
content_type: str = "application/json",
**kwargs: Any
) -> _models.ManagedClusterSnapshot:
"""Updates tags on a managed cluster snapshot.
Updates tags on a managed cluster snapshot.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource. Required.
:type resource_name: str
:param parameters: Parameters supplied to the Update managed cluster snapshot Tags operation.
Required.
:type parameters: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ManagedClusterSnapshot or the result of cls(response)
:rtype: ~azure.mgmt.containerservice.v2022_10_02_preview.models.ManagedClusterSnapshot
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace
def update_tags(
self, resource_group_name: str, resource_name: str, parameters: Union[_models.TagsObject, IO], **kwargs: Any
) -> _models.ManagedClusterSnapshot:
"""Updates tags on a managed cluster snapshot.
Updates tags on a managed cluster snapshot.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource. Required.
:type resource_name: str
:param parameters: Parameters supplied to the Update managed cluster snapshot Tags operation.
Is either a TagsObject type or a IO type. Required.
:type parameters: ~azure.mgmt.containerservice.v2022_10_02_preview.models.TagsObject or IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ManagedClusterSnapshot or the result of cls(response)
:rtype: ~azure.mgmt.containerservice.v2022_10_02_preview.models.ManagedClusterSnapshot
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2022-10-02-preview"))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
cls: ClsType[_models.ManagedClusterSnapshot] = kwargs.pop("cls", None)
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(parameters, (IOBase, bytes)):
_content = parameters
else:
_json = self._serialize.body(parameters, "TagsObject")
request = build_update_tags_request(
resource_group_name=resource_group_name,
resource_name=resource_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self.update_tags.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize("ManagedClusterSnapshot", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update_tags.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedclustersnapshots/{resourceName}"
}
@distributed_trace
def delete( # pylint: disable=inconsistent-return-statements
self, resource_group_name: str, resource_name: str, **kwargs: Any
) -> None:
"""Deletes a managed cluster snapshot.
Deletes a managed cluster snapshot.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource. Required.
:type resource_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None or the result of cls(response)
:rtype: None
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2022-10-02-preview"))
cls: ClsType[None] = kwargs.pop("cls", None)
request = build_delete_request(
resource_group_name=resource_group_name,
resource_name=resource_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.delete.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedclustersnapshots/{resourceName}"
}
|
[
"noreply@github.com"
] |
openapi-env-test.noreply@github.com
|
cd767082d5f04442dad3e1070b7c41a12bbccef7
|
ae129b9f01d571bc4b31e7130a297af1ae1f5059
|
/Problem1_to_Problem23_March19/day_1.py
|
3e41b86176b386715a3f2259d3c4d0baac02774c
|
[] |
no_license
|
NightFury13/Daily_Coding_Problem
|
7ca720e937f00a78e0f3697205dc03f32ed73e88
|
2df97f0ef3cb3ffd3aa679b732e94c027ce0f96d
|
refs/heads/master
| 2020-04-27T05:40:39.804629
| 2019-07-02T08:47:39
| 2019-07-02T08:47:39
| 174,087,025
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 774
|
py
|
"""
Given a list of numbers and a number k, return whether any two numbers from the list add up to k.
For example, given [10, 15, 3, 7] and k of 17, return true since 10 + 7 is 17.
"""
# If negative numbers are also to be handled, just add the smallest
# number of the set to all elements and carry on with below algos
# Brute Force
def sum_true_bf(ele_list, k):
for i in range(len(ele_list)):
for j in range(i+1, len(ele_list)):
if ele_list[i]+ele_list[j] == k:
return True
return False
# Single Pass
def sum_true_f(ele_list, k):
diff_list = []
for i in range(len(ele_list)):
diff = k - ele_list[i]
if diff in diff_list:
return True
diff_list.append(ele_list[i])
return False
|
[
"develop13mohit@gmail.com"
] |
develop13mohit@gmail.com
|
4cfc520b257272b8130223ccec432db4bd8eb04e
|
1c0a677b4be25e6be1ecd25c5115d49942c8e2d6
|
/3_ create_laplacian_tiles.py
|
518a5a68144f4243876d021fca4d4b245471f7ef
|
[] |
no_license
|
daghb/MSFS2020_CGLTools
|
d0248a0d8cd686140fcebea985804767ed988863
|
9e131f67f14d1c9243c35e4d454dd8b71158a81e
|
refs/heads/main
| 2023-02-03T13:10:12.633550
| 2020-12-13T21:29:23
| 2020-12-13T21:29:23
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,223
|
py
|
import multiprocessing as mp
import numpy as np
import cv2
import os
import misc
import struct
import bingtile
from matplotlib import pyplot as plt
import math
def loadToNPArray(qkey):
beginningcoords = bingtile.QuadKeyToTileXY(qkey+'0')
xoffset = -1
yoffset = -1
loadingarr = np.zeros((1024, 1024), np.int16)
found = False
while yoffset < 3:
while xoffset < 3:
subqkey = bingtile.TileXYToQuadKey(
beginningcoords[0]+xoffset, beginningcoords[1]+yoffset, beginningcoords[2])
if os.path.isfile("Tile/"+str(len(subqkey))+"/dem_"+subqkey+".rw"):
infile = open("Tile/"+str(len(subqkey)) +
"/dem_"+subqkey+".rw", 'rb')
tilearr = infile.read()
infile.close()
nparr = np.frombuffer(tilearr, np.int16).reshape((257, 257))
x_offset = (xoffset+1)*256
y_offset = (yoffset+1)*256
loadingarr[y_offset:y_offset+nparr.shape[0]-1,
x_offset:x_offset+nparr.shape[1]-1] = nparr[0:256, 0:256]
found = True
xoffset += 1
xoffset = -1
yoffset += 1
return found, loadingarr
def createLevelTileAndSubDeltas(qkey):
found, nparrupper = loadToNPArray(qkey)
if found:
down = np.zeros((512, 512), np.int16)
# down = cv2.pyrDown(nparrupper)
blur = cv2.GaussianBlur(nparrupper, (7, 7), 32)
itrr = 0
itrc = 0
while itrc < 512:
while itrr < 512:
down[itrc, itrr] = blur[itrc*2, itrr*2]
itrr += 1
itrr = 0
itrc += 1
downpadded = np.zeros((257, 257), np.int16)
downpadded[0:257, 0:257] = down[128:128+257, 128:128+257]
saveTile("Tile", qkey, downpadded)
up = np.zeros((1024, 1024), np.int16)
up = cv2.pyrUp(down)
delta = np.zeros((512, 512), np.int16)
delta = np.subtract(nparrupper, up)
delta0 = np.zeros((257, 257), np.int16)
delta0[0:257, 0:257] = delta[256:+256+257, 256:256+257]
delta1 = np.zeros((257, 257), np.int16)
delta1[0:257, 0:257] = delta[256:+256+257, 512:512+257]
delta2 = np.zeros((257, 257), np.int16)
delta2[0:257, 0:257] = delta[512:512+257, 256:256+257]
delta3 = np.zeros((257, 257), np.int16)
delta3[0:257, 0:257] = delta[512:512+257, 512:512+257]
saveDelta("Delta", qkey+str(0), delta0, 1/4, 0)
saveDelta("Delta", qkey+str(1), delta1, 1/4, 0)
saveDelta("Delta", qkey+str(2), delta2, 1/4, 0)
saveDelta("Delta", qkey+str(3), delta3, 1/4, 0)
def saveTile(type, qkey, data):
filename = type+"\\"+str(len(qkey))+"\\"+"dem_"+qkey+".rw"
os.makedirs(os.path.dirname(filename), exist_ok=True)
outfile = open(filename, 'wb')
outfile.write(data)
outfile.close()
def saveDelta(type, qkey, data, multi, offsetm):
offset = (math.floor(offsetm/multi)).to_bytes(2, 'little', signed=True)
heightscale = bytearray(struct.pack("f", multi))
header = bytes(heightscale)+offset+(16).to_bytes(1, 'little')
filename = type+"\\"+str(len(qkey))+"\\"+"dem_"+qkey+".rw"
os.makedirs(os.path.dirname(filename), exist_ok=True)
outfile = open(filename, 'wb')
outfile.write(header)
outfile.write(data)
outfile.close()
def to8bit(qkey):
infile = open("Tile/"+str(len(qkey))+"/dem_"+qkey+".rw", 'rb')
basetile0 = infile.read()
infile.close()
nparr = np.frombuffer(basetile0, np.int16).reshape((257, 257))
saveDelta("Tile", qkey, nparr, 1/4, -20)
def chunks(lst, n):
"""Yield successive n-sized chunks from lst."""
for i in range(0, len(lst), n):
yield lst[i:i + n]
def createLevelTileAndSubDeltasChunk(idx, chk):
for qkey in chk:
createLevelTileAndSubDeltas(qkey)
return 'OK'
def collect_result(result):
return
qkeybase = '102231'
qkeyx,qkeyy,qkeylvl=bingtile.QuadKeyToTileXY(qkeybase)
qkeystoprocess=[]
tilesx=1
tilesy=1
padleft=0
padtop=0
padright=0
padbottom=0
idx=(-1)*padleft
idy=(-1)*padtop
while idy<tilesy+padbottom:
while idx<tilesx+padright:
qkeystoprocess.append(bingtile.TileXYToQuadKey(qkeyx+idx,qkeyy+idy,qkeylvl))
idx+=1
idx=-1
idy+=1
minlevel = 6
maxlevel = 6
level = maxlevel
if __name__ == '__main__':
while level >= minlevel:
for qk in qkeystoprocess:
subqkeys = []
qkey = qk
while len(qkey) < level:
qkey = qkey+'0'
while qkey.startswith(qk):
print(qkey)
subqkeys.append(qkey)
qkey = misc.QuadKeyIncrement(qkey)
parts = int(len(subqkeys)/32)
if parts == 0:
parts = 1
subchunks = chunks(subqkeys, parts)
pool = mp.Pool(int(mp.cpu_count()))
for idx, chunk in enumerate(subchunks):
pool.apply_async(createLevelTileAndSubDeltasChunk, args=(
idx, chunk), callback=collect_result)
pool.close()
pool.join()
level -= 1
for qk in qkeystoprocess:
to8bit(qk)
|
[
"teemu.koskinen11@gmail.com"
] |
teemu.koskinen11@gmail.com
|
68a632e6d578d57dccf5339542c7e2ff1ba25375
|
c9b1aee50b27fb6e761e044d913b6a988f748b07
|
/parajo_api/crawler/migrations/0029_auto_20201019_1241.py
|
b3ff845eee1211e57507299ee9c22e0e54152718
|
[] |
no_license
|
namhyonwoo/crwaler_python
|
d7d27034482c8410031ce67724e9a8e280db8d98
|
0f4f7bae96ec86320bb784204c863a832f0a10bb
|
refs/heads/master
| 2023-03-03T06:41:34.885000
| 2021-02-11T02:29:40
| 2021-02-11T02:29:40
| 337,906,979
| 0
| 0
| null | 2021-02-11T02:29:41
| 2021-02-11T02:04:28
|
Python
|
UTF-8
|
Python
| false
| false
| 396
|
py
|
# Generated by Django 3.0.4 on 2020-10-19 12:41
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('crawler', '0028_auto_20201019_1219'),
]
operations = [
migrations.AlterField(
model_name='carinfo',
name='regdate',
field=models.DateTimeField(auto_now_add=True),
),
]
|
[
"cuya123@hotmail.com"
] |
cuya123@hotmail.com
|
00c6e73ae703d2a2d28f6d366926df631eb78844
|
89af3b0225044511936ef25ef90c4e9b152e29bf
|
/LoginScreen.py
|
6dc881ed115c113d3ad319aec26edbeb840388eb
|
[] |
no_license
|
zaheenrahman/EnterpriseDatabase
|
bd3b861b225d8e3f90a5fc6d97b52202c80ccc2c
|
16d8c33a4edd4911bbb09bcfb932d4d41181e214
|
refs/heads/master
| 2020-04-02T15:02:36.391606
| 2018-10-24T18:35:14
| 2018-10-24T18:35:14
| 154,549,718
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,054
|
py
|
import sqlite3
import sys
import re
import os
running = True
conn = sqlite3.connect('a3.db')
c = conn.cursor()
while running:
os.system('clear')
print("Welcome to Ride Services")
print("0 - Exit ")
print("1 - Login ")
print("2 - Register \n")
while True:
inp = input("Enter desired integer to begin \n")
if inp.isdigit():
if 0 <= int(inp) <= 2:
break
print("Invalid input, please try again. \n")
# If inp is 0, exit:
if(int(inp) == 0):
conn.commit()
sys.exit()
# If inp is 1, login:
if(int(inp) == 1):
while True:
meme = input("Please enter your email: ")
if re.match("^([a-zA-Z0-9_\-\.]+)@([a-zA-Z0-9_\-\.]+)\.([a-zA-Z]{2,5})$", meme):
c.execute("""SELECT * FROM members WHERE email=?;""", (meme,))
useremail = c.fetchone()
print(useremail)
if useremail is None:
print("Invalid email")
break
count = 3
while count != 0:
pwd = input("Please enter your password: ")
c.execute("""SELECT * FROM members WHERE email=? and pwd=?;""", (meme,pwd))
data = c.fetchone()
if data is not None:
break
count = count - 1
print("Invalid password please try again "+str(count)+" tries left.")
running = False
break
if(meme.isdigit()):
if(int(meme) == 0):
break
print("Invalid email, please try again. 0 to Main Menu\n")
key = data[0]
print(data)
c.execute("""SELECT * from inbox where email = ? and seen = "N" order by msgTimestamp;""", (key,))
messagetable = c.fetchall()
print("Welcome "+data[1]+"\nHere are your unseen messages: ")
for x in range(len(messagetable)):
print("Message "+str(x+1))
print(messagetable[x][3])
|
[
"zaheen@ualberta.ca"
] |
zaheen@ualberta.ca
|
2832e0c72e4f9af8d41fd853db00b82738826edd
|
95e9ec4b3b0d86063da53a0e62e138cf794cce3a
|
/python/20190427/demo02.py
|
c12c4e18e989e562f820361bdbf9f7e5309aa93b
|
[] |
no_license
|
wjl626nice/1902
|
c3d350d91925a01628c9402cbceb32ebf812e43c
|
5a1a6dd59cdd903563389fa7c73a283e8657d731
|
refs/heads/master
| 2023-01-05T23:51:47.667675
| 2019-08-19T06:42:09
| 2019-08-19T06:42:09
| 180,686,044
| 4
| 1
| null | 2023-01-04T07:35:24
| 2019-04-11T00:46:43
|
Python
|
UTF-8
|
Python
| false
| false
| 1,813
|
py
|
# 生成器
y = (pow(a, b) for a in range(0, 6) for b in range(1, 3))
print(y)
# 常规函数生成斐波那契数列
def f(n):
# x 表示前前一个, y表示前一个, z表示当前的一个
x, y, z = 0, 1, 1
L = []
while len(L) < n:
# 前一个变成 前前一个
# 当前的变成 前一个
x, y = y, z
# 新的当前的等于新的前一个 + 新的前前一个
z = x + y
# 将新的前前一个放到列表中
L.append(x)
return L
print(f(15))
# yield函数
def f():
# x 表示前前一个, y表示前一个, z表示当前的一个
x, y, z = 0, 1, 1
while True:
# 前一个变成 前前一个
# 当前的变成 前一个
x, y = y, z
yield x
# 新的当前的 等于 新的前一个 + 新的前前一个
z = x + y
fff = f()
print(fff)
# print(next(fff))
# 使用循环遍历生成器
# for i in fff:
# print('%d\n' % i)
# 有限数量的生成器:
def f(n):
x, y, z = 0, 1, 1
L = []
while len(L) < n:
x, y = y, z
yield x
z = x + y
L.append(x)
fff = f(15)
# 使用循环遍历生成器
for i in fff:
print(i)
# 对于有限数量的生成器,使用 next()调用的时候,如果超出边界,会抛出异常,而使用遍历则不会抛出异常
mmm = f(3)
print(next(mmm))
print(next(mmm))
print(next(mmm))
# print(next(mmm))
print("-----------内存比较------------")
import sys
# 无论15个还是150000个,其实内存使用无变化
fff = f(10000)
print(sys.getsizeof(fff))
ggg = [1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233, 377, 610, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233, 377, 610, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233, 377, 610]
print(sys.getsizeof(ggg))
|
[
"18537160262@qq.com"
] |
18537160262@qq.com
|
9040e40cc5434ee777a737e88216932abf411ca9
|
3845b4147d50e60d0c9caf8bf513ef37296f15f8
|
/imobi/apps/imoveis/models.py
|
0e3037985743590729d73cc51675a1dbf838a732
|
[] |
no_license
|
caiokeidi/imobi
|
e0480f22c0e9f0920119d4552541415ed87adad7
|
09f84d9ae49d67f547c9aea11db1ff1d6b412718
|
refs/heads/main
| 2023-03-06T14:38:42.363923
| 2021-02-18T00:23:30
| 2021-02-18T00:23:30
| 333,255,327
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,429
|
py
|
from django.db import models
from django.contrib.auth.models import User
class Imoveis(models.Model):
corretor = models.ForeignKey(User, on_delete=models.CASCADE, default=1)
cliente = models.CharField(max_length=50)
cidade = models.CharField(max_length=50)
bairro = models.CharField(max_length=50)
rua = models.CharField(max_length=50)
numero = models.CharField(max_length=10)
complemento = models.CharField(max_length=50, blank=True, null=True)
tipo_negocio = models.CharField(max_length=20)
tipo_imovel = models.CharField(max_length=25)
valor_aluguel = models.FloatField(blank=True, null=True)
valor_venda = models.FloatField(blank=True, null=True)
valor_iptu = models.FloatField(blank=True, null=True)
valor_condominio = models.FloatField(blank=True, null=True)
area = models.IntegerField()
quartos = models.IntegerField()
suites = models.IntegerField()
banheiros = models.IntegerField()
vagas = models.IntegerField()
andar = models.IntegerField(blank=True, null=True)
metro_proximo = models.BooleanField(default=False)
mobiliado = models.BooleanField(default=False)
descricao = models.TextField(max_length=1500)
publicado = models.BooleanField()
class imagens(models.Model):
imoveis = models.ForeignKey(Imoveis, on_delete=models.CASCADE)
foto = models.ImageField(upload_to='fotos/%d/%m/%Y/', blank=True)
|
[
"caiokeidi@gmail.com"
] |
caiokeidi@gmail.com
|
bb00304ff79b59c42cd647344afc669f7c319ea0
|
ace463d620bddab8a547d9f944786025766307e4
|
/zeets/migrations/0007_zeet_users_like.py
|
89a83b9b19685f385f48422e7abc45785cdf23b8
|
[] |
no_license
|
raul-jr3/zitter
|
2cfba13fd15c9e9dcd53106e665dae6c51a9612c
|
da0b7c0a32f112a320b6df1360a4274a0ba10bb6
|
refs/heads/master
| 2020-12-02T19:27:26.133636
| 2017-07-11T10:22:45
| 2017-07-11T10:22:45
| 96,343,305
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 609
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-07-11 07:09
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('zeets', '0006_auto_20170707_0749'),
]
operations = [
migrations.AddField(
model_name='zeet',
name='users_like',
field=models.ManyToManyField(blank=True, related_name='zeets_liked', to=settings.AUTH_USER_MODEL),
),
]
|
[
"rahul.srivatsav1995@gmail.com"
] |
rahul.srivatsav1995@gmail.com
|
fd640754b23fe8942350bbf848a5afa02c5a4a50
|
2a2ef2d0b9b0a10c016a2f3ecb0b40a1a87d50b2
|
/Look n Say/look_n_say.py
|
479b67417877c9e7c0dc439baececc8e3095f5f6
|
[] |
no_license
|
Delpire/daily-programmer
|
0f8b000d431eb2847a7884a74f039be08a5e1ca7
|
573d49191045f1c9c99c61383b020e4914930839
|
refs/heads/master
| 2020-04-06T04:55:05.442997
| 2014-09-16T03:37:45
| 2014-09-16T03:37:45
| 22,363,605
| 0
| 0
| null | 2015-09-10T21:53:35
| 2014-07-29T01:27:20
|
Python
|
UTF-8
|
Python
| false
| false
| 791
|
py
|
import sys
import re
seed = ""
try:
N = int(sys.argv[1])
if len(sys.argv) > 2:
seed = str(sys.argv[2])
except:
print("usage: look_n_say.py N [--seed]")
print("\npositional arguments:\n N\t number of iterations.")
print("\noptional arguments:\n --seed\t starting seed.")
sys.exit()
if seed == "":
seed = "1"
word = seed
for _ in range(N):
print(word)
length = len(word)
current_amount = 0
current_number = word[0]
new_word = ""
for number in word:
if number == current_number:
current_amount += 1
else:
new_word += str(current_amount)
new_word += current_number
current_number = number
current_amount = 1
new_word += str(current_amount)
new_word += current_number
word = new_word
|
[
"chrisdelpire@gmail.com"
] |
chrisdelpire@gmail.com
|
f74d2e50f0b9251508a3ba77f471021c10df8b75
|
03fa19025a75815a55618bffad4021080bdefcdd
|
/tello_drone.py
|
3b22621991331ff113de12d336a706875521b9d4
|
[] |
no_license
|
Iscaraca/Tello_Workshop
|
90fd8667023a6ea960c0a4546427887419932f56
|
046470919dcb2a432f24ab7ef01336bdf4a502fe
|
refs/heads/main
| 2023-01-21T07:00:44.549121
| 2020-11-27T00:57:59
| 2020-11-27T00:57:59
| 314,851,198
| 0
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,133
|
py
|
import socket
import threading
import cv2 as cv
class Tello:
"""
Handles connection to the DJI Tello drone
"""
def __init__(self, local_ip, local_port, is_dummy=False, tello_ip='192.168.10.1', tello_port=8889):
"""
Initializes connection with Tello and sends both command and streamon instructions
in order to start it and begin receiving video feed.
"""
self.background_frame_read = None
self.response = None
self.abort_flag = False
self.is_dummy = is_dummy
if not is_dummy:
self.socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.tello_address = (tello_ip, tello_port)
self.local_address = (local_ip, local_port)
self.send_command('command')
# self.socket.sendto(b'command', self.tello_address)
print('[INFO] Sent Tello: command')
self.send_command('streamon')
# self.socket.sendto(b'streamon', self.tello_address)
print('[INFO] Sent Tello: streamon')
self.send_command('takeoff')
# self.socket.sendto(b'takeoff', self.tello_address)
print('[INFO] Sent Tello: takeoff')
self.move_up(160)
# thread for receiving cmd ack
self.receive_thread = threading.Thread(target=self._receive_thread)
self.receive_thread.daemon = True
self.receive_thread.start()
def __del__(self):
"""
Stops communication with Tello
"""
if not self.is_dummy:
self.socket.close()
def _receive_thread(self):
"""
Listen to responses from the Tello.
Runs as a thread, sets self.response to whatever the Tello last returned.
"""
while True:
try:
self.response, ip = self.socket.recvfrom(3000)
except socket.error as exc:
print (f"Caught exception socket.error: {exc}")
def send_command(self, command):
"""
Send a command to the Tello and wait for a response.
:param command: Command to send.
:return (str): Response from Tello.
"""
self.abort_flag = False
timer = threading.Timer(0.5, self.set_abort_flag)
self.socket.sendto(command.encode('utf-8'), self.tello_address)
timer.start()
while self.response is None:
if self.abort_flag is True:
break
timer.cancel()
if self.response is None:
response = 'none_response'
else:
response = self.response.decode('utf-8')
self.response = None
return response
def send_command_without_response(self, command):
"""
Sends a command without expecting a response. Useful when sending a lot of commands.
"""
if not self.is_dummy:
self.socket.sendto(command.encode('utf-8'), self.tello_address)
def set_abort_flag(self):
"""
Sets self.abort_flag to True.
Used by the timer in Tello.send_command() to indicate to that a response
timeout has occurred.
"""
self.abort_flag = True
def move_up(self, dist):
"""
Sends up command to Tello and returns its response.
:param dist: Distance in centimeters in the range 20 - 500.
:return (str): Response from Tello
"""
self.send_command_without_response(f'up {dist}')
def move_down(self, dist):
"""
Sends down command to Tello and returns its response.
:param dist: Distance in centimeters in the range 20 - 500.
:return (str): Response from Tello
"""
self.send_command_without_response(f'down {dist}')
def move_right(self, dist):
"""
Sends right command to Tello and returns its response.
:param dist: Distance in centimeters in the range 20 - 500.
:return (str): Response from Tello
"""
self.send_command_without_response(f'right {dist}')
def move_left(self, dist):
"""
Sends left command to Tello and returns its response.
:param dist: Distance in centimeters in the range 20 - 500.
:return (str): Response from Tello
"""
self.send_command_without_response(f'left {dist}')
def move_forward(self, dist):
"""
Sends forward command to Tello without expecting a return.
:param dist: Distance in centimeters in the range 20 - 500.
"""
self.send_command_without_response(f'forward {dist}')
def move_backward(self, dist):
"""
Sends backward command to Tello without expecting a return.
:param dist: Distance in centimeters in the range 20 - 500.
"""
self.send_command_without_response(f'back {dist}')
def rotate_cw(self, deg):
"""
Sends cw command to Tello in order to rotate clock-wise
:param deg: Degrees bewteen 0 - 360.
:return (str): Response from Tello
"""
self.send_command_without_response(f'cw {deg}')
def rotate_ccw(self, deg):
"""
Sends ccw command to Tello in order to rotate clock-wise
:param deg: Degrees bewteen 0 - 360.
:return (str): Response from Tello
"""
self.send_command_without_response(f'ccw {deg}')
def get_udp_video_address(self):
"""
Gets the constructed udp video address for the drone
:return (str): The constructed udp video address
"""
return f'udp://{self.tello_address[0]}:11111'
def get_frame_read(self):
"""
Get the BackgroundFrameRead object from the camera drone. Then, you just need to call
backgroundFrameRead.frame to get the actual frame received by the drone.
:return (BackgroundFrameRead): A BackgroundFrameRead with the video data.
"""
if self.background_frame_read is None:
if self.is_dummy:
self.background_frame_read = BackgroundFrameRead(self, 0).start()
else:
self.background_frame_read = BackgroundFrameRead(self, self.get_udp_video_address()).start()
return self.background_frame_read
def get_video_capture(self):
"""
Get the VideoCapture object from the camera drone
:return (VideoCapture): The VideoCapture object from the video feed from the drone.
"""
if self.cap is None:
if self.is_dummy:
self.cap = cv.VideoCapture(0)
else:
self.cap = cv.VideoCapture(self.get_udp_video_address())
if not self.cap.isOpened():
if self.is_dummy:
self.cap.open(0)
else:
self.cap.open(self.get_udp_video_address())
return self.cap
def end(self):
"""
Call this method when you want to end the tello object
"""
# print(self.send_command('battery?'))
if not self.is_dummy:
self.send_command('land')
if self.background_frame_read is not None:
self.background_frame_read.stop()
# It appears that the VideoCapture destructor releases the capture, hence when
# attempting to release it manually, a segmentation error occurs.
# if self.cap is not None:
# self.cap.release()
class BackgroundFrameRead:
"""
This class read frames from a VideoCapture in background. Then, just call backgroundFrameRead.frame to get the
actual one.
"""
def __init__(self, tello, address):
"""
Initializes the Background Frame Read class with a VideoCapture of the specified
address and the first frame read.
:param tello: An instance of the Tello class
:param address: The UDP address through which the video will be streaming
"""
tello.cap = cv.VideoCapture(address)
self.cap = tello.cap
if not self.cap.isOpened():
self.cap.open(address)
self.grabbed, self.frame = self.cap.read()
self.stopped = False
def start(self):
"""
Starts the background frame read thread.
:return (BackgroundFrameRead): The current BrackgroundFrameRead
"""
threading.Thread(target=self.update_frame, args=()).start()
return self
def update_frame(self):
"""
Sets the current frame to the next frame read from the source.
"""
while not self.stopped:
if not self.grabbed or not self.cap.isOpened():
self.stop()
else:
(self.grabbed, self.frame) = self.cap.read()
def stop(self):
"""
Stops the frame reading.
"""
self.stopped = True
|
[
"noreply@github.com"
] |
Iscaraca.noreply@github.com
|
d4edb863c7e252634120243537e4f30cf8a810a3
|
08615c64a62fc364a802bb92314cf49080ddbcee
|
/django学习/5.3/test5/test5/urls.py
|
5764ca7d2367edae908e79907eeb63a3f4d95cf1
|
[] |
no_license
|
xiangys0134/python_study
|
afc4591fca1db6ebddf83f0604e35ed2ef614728
|
6ec627af7923b9fd94d244c561297ccbff90c1e9
|
refs/heads/master
| 2023-02-24T01:24:45.734510
| 2022-10-29T02:11:20
| 2022-10-29T02:11:20
| 143,358,792
| 2
| 0
| null | 2023-02-08T03:07:26
| 2018-08-03T00:43:46
|
Python
|
UTF-8
|
Python
| false
| false
| 807
|
py
|
"""test5 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path,re_path,include
urlpatterns = [
path('admin/', admin.site.urls),
re_path('^', include('booktest.urls')),
]
|
[
"you@example.com"
] |
you@example.com
|
9f128b640f5e7e4f27ef05a3b485831b2a0a90fa
|
5fcdfdd75ba66cee5601740ddc8f18740da93b2f
|
/blogs/models.py
|
bd684fe27559c5582032509cb47717da0404758d
|
[] |
no_license
|
pioneerwxf/markpro
|
90a28782f7ca66f793d80fc3d0a7969741446c19
|
76a879674f0cf118661fdf3b9622ccd0210f4681
|
refs/heads/master
| 2021-01-10T01:00:50.411973
| 2012-07-14T03:07:21
| 2012-07-14T03:07:21
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 219
|
py
|
from django.db import models
from brands.models import Brand
class Blog(models.Model):
type = models.CharField(max_length = 50)
url = models.URLField()
brand = models.ForeignKey(Brand, related_name="blogs")
|
[
"wangxianfeng.me@gmail.com"
] |
wangxianfeng.me@gmail.com
|
e35cc9abc13e861286e4f0ba696aa7a0b9075f80
|
8600ea155f279e5a8dfe5a1926038511f6b6a7ea
|
/account_date_check/__init__.py
|
d134542ed13c49ec8b2c6bdf6d51c316a2c308f2
|
[] |
no_license
|
MarkNorgate/addons-EAD
|
c2fff89ab16fce3ba19fbe433ee5863705a6f4e5
|
840f28642b5d328e4b86839c413e5164622295a5
|
refs/heads/master
| 2020-04-23T22:11:00.164438
| 2015-07-22T12:24:53
| 2015-07-22T12:24:53
| 39,501,011
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,218
|
py
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>). All Rights Reserved
# $Id$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
#----------------------------------------------------------
# Init Sales
#----------------------------------------------------------
import account_date_check
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
[
"mark.norgate@affinity-digital.com"
] |
mark.norgate@affinity-digital.com
|
19039e3383073a51120c30ccb9ea6ccd87e2ed02
|
f0d713996eb095bcdc701f3fab0a8110b8541cbb
|
/7soAnzpZToSxztnDr_8.py
|
8da9146cad5d4051a555f2260572b45848bad196
|
[] |
no_license
|
daniel-reich/turbo-robot
|
feda6c0523bb83ab8954b6d06302bfec5b16ebdf
|
a7a25c63097674c0a81675eed7e6b763785f1c41
|
refs/heads/main
| 2023-03-26T01:55:14.210264
| 2021-03-23T16:08:01
| 2021-03-23T16:08:01
| 350,773,815
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,141
|
py
|
"""
The **shift left** operation is similar to **multiplication by powers of
two**. This can also be achieved with repetitive addition, thus, the process
can be done **recursively**.
Sample calculation using the shift left operator `<<`:
10 << 3 = 10 * 2^3 = 10 * 8 = 80
-32 << 2 = -32 * 2^2 = -32 * 4 = -128
5 << 2 = 5 * 2^2 = 5 * 4 = 20
Create a **recursive** function that mimics the shift left operator and
returns the result from the two given integers.
### Examples
shift_left(5, 2) ➞ 20
shift_left(10, 3) ➞ 80
shift_left(-32, 2) ➞ -128
shift_left(-6, 5) ➞ -192
shift_left(12, 4) ➞ 192
shift_left(46, 6) ➞ 2944
### Notes
* There will be no negative values for the second parameter `y`.
* You're expected to solve this challenge using a **recursive approach**.
* You can read on more topics about recursion (see **Resources** tab) if you aren't familiar with it yet or haven't fully understood the concept behind it before taking up this challenge.
"""
def shift_left(x, y):
if y == 0:
return x
return shift_left(2 * x, y - 1)
|
[
"daniel.reich@danielreichs-MacBook-Pro.local"
] |
daniel.reich@danielreichs-MacBook-Pro.local
|
eefc1b93a13d6621a047c11086793e12600144b1
|
cbbff22cf94f7a38926213a0c703fb45444ab292
|
/TSP/TSP_lookup.py
|
45ee5978d49897c07fa7cbd0f086217452fde22f
|
[] |
no_license
|
netninja7/ducking-octo-hipster
|
5999fdcee48e036983e79aebc5d1341cfee406f8
|
c28f6e8aae39677550cf33cbd4b6fe465f35f088
|
refs/heads/master
| 2016-09-10T08:23:33.776826
| 2013-10-03T22:46:14
| 2013-10-03T22:46:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 746
|
py
|
def get_xy (city) :
if city == 1 :
return [0,0]
elif city == 2 :
return [0,1]
elif city == 3 :
return [0,2]
elif city == 4 :
return [0,3]
elif city == 5 :
return [0,4]
elif city == 6 :
return [1,4]
elif city == 7 :
return [1,3]
elif city == 8 :
return [1,2]
elif city == 9 :
return [1,1]
elif city == 10 :
return [1,0]
def get1_xy (city) :
if city == 1 :
return [0,0]
elif city == 2 :
return [5,7]
elif city == 3 :
return [6,10]
elif city == 4 :
return [7,8]
elif city == 5 :
return [9,1]
elif city == 6 :
return [3,3]
elif city == 7 :
return [3,8]
elif city == 8 :
return [9,2]
elif city == 9 :
return [10,10]
elif city == 10 :
return [1,0]
|
[
"netninja1000101@gmail.com"
] |
netninja1000101@gmail.com
|
7a8121096a6b913873e161a5f8bc001e89956272
|
0527aea33ec57704edbc2e1ec9943f009590a924
|
/myvenv/bin/gunicorn
|
de922a350c7222771209cba25cca67f6d2b5b992
|
[] |
no_license
|
hyeonJeongByeon/Wordcount_Deploy_Heroku
|
1d17d5b8085cbe1c6db70d981c0fc33007e73c41
|
9434d04e88279df46ef162ec1acd4e0c184390ee
|
refs/heads/master
| 2023-04-28T20:22:12.400867
| 2019-07-31T01:51:19
| 2019-07-31T01:51:19
| 199,744,658
| 0
| 0
| null | 2023-04-21T20:34:38
| 2019-07-30T23:57:23
|
Python
|
UTF-8
|
Python
| false
| false
| 279
|
#!/Users/byeonhyeonjeong/Downloads/wordcountsession-master/myvenv/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from gunicorn.app.wsgiapp import run
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(run())
|
[
"jungjung1218@gmail.com"
] |
jungjung1218@gmail.com
|
|
462cff217a50798d3667a263c07b6f97f12a4ff2
|
dacb257a90310eba03f3128221120a7d54b894ba
|
/pysmartnode/components/switches/remote433mhz.py
|
9aa9b0303e31540327de81626b17cfe2feccf6e5
|
[
"MIT"
] |
permissive
|
SiChiTong/pysmartnode
|
92351efa02e52aa84185a53896957c453b12540a
|
a0998ad6582a28fe5a0529fb15dd4f61e254d25f
|
refs/heads/master
| 2023-01-05T10:00:14.907988
| 2020-09-01T10:07:45
| 2020-09-01T10:07:45
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,123
|
py
|
# Author: Kevin Köck
# Copyright Kevin Köck 2020 Released under the MIT license
# Created on 2020-03-31
"""
example config:
{
package: .switches.433mhz
component: Switch433Mhz
constructor_args: {
pin: 21 # pin number or object
file: "filename" # filename where the captured sequences are stored. Has to be uploaded manually!
name_on: "on_a" # name of the sequence for turning the device on
name_off: "off_a" # name of the sequence for turning the device off
# reps: 5 # optional, amount of times a frame is being sent
}
}
Control 433Mhz devices (e.g. power sockets) with a cheap 433Mhz transmitter.
Uses the excellent library from Peter Hinch: https://github.com/peterhinch/micropython_remote
For this to work you need to have sequences captured and stores on the device.
How to do that is described in his repository.
Note: This component only works on the devices supported by Peter Hinch's library!
(esp32, pyboards but not esp8266).
Be careful with "reps", the amount of repitions as this currently uses a lot of RAM.
NOTE: additional constructor arguments are available from base classes, check COMPONENTS.md!
"""
__updated__ = "2020-04-03"
__version__ = "0.2"
from pysmartnode import config
from pysmartnode.utils.component.switch import ComponentSwitch
from pysmartnode.libraries.micropython_remote.tx import TX
from pysmartnode.components.machine.pin import Pin
import json
import uasyncio as asyncio
import machine
####################
COMPONENT_NAME = "433MhzRemote"
####################
_mqtt = config.getMQTT()
_unit_index = -1
_tx: TX = None
_remotes = {}
_lock = asyncio.Lock()
class Switch433Mhz(ComponentSwitch):
def __init__(self, pin, file: str, name_on: str, name_off: str, reps: int = 5, **kwargs):
global _unit_index
_unit_index += 1
global _tx
if file not in _remotes and _tx is None:
pin = Pin(pin, machine.Pin.OUT)
_tx = TX(pin, file, reps)
_remotes[file] = _tx._data
elif file not in _remotes:
with open(file, 'r') as f:
rem = json.load(f)
# exceptions are forwarded to the caller
_remotes[file] = rem
if name_on not in _remotes[file]:
raise AttributeError("name_on {!r} not in file {!s}".format(name_on, file))
if name_off not in _remotes[file]:
raise AttributeError("name_off {!r} not in file {!s}".format(name_off, file))
super().__init__(COMPONENT_NAME, __version__, _unit_index, wait_for_lock=True,
initial_state=None, **kwargs)
# Unknown initial state. Should be sorted by retained state topic
self._reps = reps
self._file = file
self._len_on = int(sum(_remotes[self._file][name_on]) * 1.1 / 1000)
self._len_off = int(sum(_remotes[self._file][name_off]) * 1.1 / 1000)
self._name_on = name_on
self._name_off = name_off
# one lock for all switches, overrides lock created by the base class
self._lock = _lock
#####################
# Change these methods according to your device.
#####################
async def _on(self):
"""Turn device on."""
_tx._data = _remotes[self._file]
reps = _tx._reps
_tx._reps = self._reps
_tx(self._name_on)
await asyncio.sleep_ms(self._len_on * self._reps)
_tx._reps = reps
# wait until transmission is done so lock only gets released afterwards because
# only one transmission can occur at a time.
return True
async def _off(self):
"""Turn device off. """
_tx._data = _remotes[self._file]
reps = _tx._reps
_tx._reps = self._reps
_tx(self._name_off)
await asyncio.sleep_ms(self._len_off * self._reps)
_tx._reps = reps
# wait until transmission is done so lock only gets released afterwards because
# only one transmission can occur at a time.
return True
#####################
|
[
"kevinkk525@users.noreply.github.com"
] |
kevinkk525@users.noreply.github.com
|
592753e6010898e998d131b59d51fb3be41cb4d4
|
65a645430d1dec1506338e21be7625dd3af9f18e
|
/main.py
|
ae4e3b3b43fbc5f04330ccefea901a3796fd461d
|
[] |
no_license
|
Obamzuro/cassandra_lab1
|
8e1d74b5e39c01d97791923fc480d7e384180514
|
95c3c66ef2e25a07128d38bb13fd07fba4e7cbab
|
refs/heads/master
| 2020-11-27T21:43:04.764774
| 2019-12-23T07:05:07
| 2019-12-23T07:05:07
| 229,612,565
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,536
|
py
|
from cassandra.cluster import Cluster
from cassandra.query import SimpleStatement
from cassandra import ConsistencyLevel
cluster = Cluster(['127.0.0.1'], port=9042)
session = cluster.connect('obamzuro', wait_for_all_pools=True)
session.execute('USE obamzuro')
query = SimpleStatement("""INSERT INTO obamzuro.student JSON '{"student_id": 0,"completed_labs_counter": 2, "student_info": {"name": "Ivan", "surname": "Ivanov", "course": 4}, "student_subject_skill": {"A": {"subject_id": [2], "date_of_creating": "13.12.2008"}, "C": {"subject_id": [3], "date_of_creating": "13.12.2008"}, "E": {"subject_id": [1],"date_of_creating": "13.12.2008"}}}';""", consistency_level=ConsistencyLevel.ONE)
query.consistency_level = ConsistencyLevel.ONE
session.execute(query)
query = SimpleStatement("""INSERT INTO obamzuro.student JSON '{"student_id": 1,"completed_labs_counter": 2, "student_info": {"name": "Oleg", "surname": "Nebamzurov", "course": 4}, "student_subject_skill": {"A": {"subject_id": [2], "date_of_creating": "3.12.2008"}, "C": {"subject_id": [1], "date_of_creating": "3.12.2008"}, "E": {"subject_id": [3],"date_of_creating": "3.12.2008"}}}';""", consistency_level=ConsistencyLevel.ONE)
query.consistency_level = ConsistencyLevel.ONE
session.execute(query)
query = SimpleStatement("""INSERT INTO obamzuro.student JSON '{"student_id": 2,"completed_labs_counter": 1, "student_info": {"name": "Andrey", "surname": "Kosolapov", "course": 4}, "student_subject_skill": {"A": {"subject_id": [3], "date_of_creating": "13.2.2008"}, "C": {"subject_id": [1], "date_of_creating": "3.12.2008"}, "E": {"subject_id": [2],"date_of_creating": "13.12.2008"}}}';""", consistency_level=ConsistencyLevel.ONE)
query.consistency_level = ConsistencyLevel.ONE
session.execute(query)
rows = session.execute("SELECT * FROM student;")
for row in rows:
print(row)
query = SimpleStatement("""INSERT INTO obamzuro.subject_lab (subject_id, lab_id, lab_number, subject_name) VALUES(
%s, %s, %s, %s
);""", consistency_level=ConsistencyLevel.ONE)
query.consistency_level = ConsistencyLevel.ONE
session.execute(query, (1, 1, 1, 'Math'))
query.consistency_level = ConsistencyLevel.ONE
session.execute(query, (1, 2, 2, 'Math'))
query.consistency_level = ConsistencyLevel.ONE
session.execute(query, (1, 3, 3, 'Math'))
query.consistency_level = ConsistencyLevel.ONE
session.execute(query, (2, 4, 1, 'Physics'))
query.consistency_level = ConsistencyLevel.ONE
session.execute(query, (2, 5, 2, 'Physics'))
query.consistency_level = ConsistencyLevel.ONE
session.execute(query, (2, 6, 3, 'Physics'))
rows = session.execute("SELECT * FROM subject_lab;")
for row in rows:
print(row)
query = SimpleStatement("""
INSERT INTO obamzuro.lab_result(
lab_id,
student_id,
is_passed)
VALUES (
%s, %s, %s
);""", consistency_level=ConsistencyLevel.ONE)
query.consistency_level = ConsistencyLevel.ONE
session.execute(query, (1, 0, True))
query.consistency_level = ConsistencyLevel.ONE
session.execute(query, (2, 0, False))
query.consistency_level = ConsistencyLevel.ONE
session.execute(query, (3, 0, True))
rows = session.execute("SELECT * FROM lab_result;")
for row in rows:
print(row)
query = SimpleStatement("""UPDATE obamzuro.lab_result set is_passed=true where student_id=0 and lab_id=2;""", consistency_level=ConsistencyLevel.ONE)
query.consistency_level = ConsistencyLevel.ONE
session.execute(query)
rows = session.execute("""select * from obamzuro.lab_result;""")
for row in rows:
print(row)
query = SimpleStatement("""UPDATE obamzuro.subject_lab set subject_name='Phys' where subject_id=2;""", consistency_level=ConsistencyLevel.ONE)
query.consistency_level = ConsistencyLevel.ONE
session.execute(query)
rows = session.execute("""select * from obamzuro.subject_lab;""")
for row in rows:
print(row)
query = SimpleStatement("""UPDATE obamzuro.student set student_info={"name": 'Vanya', "surname":'Ivanov', "course": 4} where student_id=0;""", consistency_level=ConsistencyLevel.ONE)
query.consistency_level = ConsistencyLevel.ONE
session.execute(query)
rows = session.execute("select * from obamzuro.student;")
for row in rows:
print(row)
query = SimpleStatement("""select student_id, lab_id, is_passed
from obamzuro.lab_result;""", consistency_level=ConsistencyLevel.ONE)
query.consistency_level = ConsistencyLevel.ONE
session.execute(query)
query = SimpleStatement("""select subject_name, lab_id
from obamzuro.subject_lab;""", consistency_level=ConsistencyLevel.ONE)
query.consistency_level = ConsistencyLevel.ONE
session.execute(query)
query = SimpleStatement("""delete subject_name from obamzuro.subject_lab where subject_id=2;""", consistency_level=ConsistencyLevel.ONE)
query.consistency_level = ConsistencyLevel.ONE
session.execute(query)
rows = session.execute("select * from obamzuro.subject_lab;")
for row in rows:
print(row)
query = SimpleStatement("""delete student_info from obamzuro.student where student_id=0;""", consistency_level=ConsistencyLevel.ONE)
query.consistency_level = ConsistencyLevel.ONE
session.execute(query)
rows = session.execute("select * from obamzuro.student;")
for row in rows:
print(row)
query = SimpleStatement("""delete completed_labs_counter from obamzuro.student where student_id=0;""", consistency_level=ConsistencyLevel.ONE)
query.consistency_level = ConsistencyLevel.ONE
session.execute(query)
rows = session.execute("select * from obamzuro.student;")
for row in rows:
print(row)
|
[
"noreply@github.com"
] |
Obamzuro.noreply@github.com
|
689f2cd033a78cfe62d22d97c033a050baf5e09b
|
7bd5ca970fbbe4a3ed0c7dadcf43ba8681a737f3
|
/2017/halfmarathon/final/A/a.py
|
d022308db43a50e6c81737f162cf302c8e1cc84c
|
[] |
no_license
|
roiti46/Contest
|
c0c35478cd80f675965d10b1a371e44084f9b6ee
|
c4b850d76796c5388d2e0d2234f90dc8acfaadfa
|
refs/heads/master
| 2021-01-17T13:23:30.551754
| 2017-12-10T13:06:42
| 2017-12-10T13:06:42
| 27,001,893
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,588
|
py
|
# -*- coding: utf-8 -*-
import sys,copy,math,heapq,itertools as it,fractions,re,bisect,collections as coll, random
N = 8
LOOP = 1000
MIN_D = 1
MAX_D = 50
MIN_T = 1
MAX_T = 50
def flush(s):
print s
sys.stdout.flush()
def fill(i):
flush("fill %d" % (i + 1))
def move(i, j):
flush("move %d %d" % (i + 1, j + 1))
def change(i):
flush("change %d" % (i + 1))
def pazz():
flush("pass")
def sell(x_list):
n = len(x_list)
s = "sell %d" % n
for x in x_list:
s += " %d" % (x + 1)
flush(s)
def do():
def dfs(i, s, used):
res = []
if s == D:
return used
if i >= N:
return []
if A[i] > 0 and s + A[i] <= D:
x_list = dfs(i + 1, s + A[i], used + [i])
if len(x_list) > len(res):
res = x_list
x_list = dfs(i + 1, s, used)
if len(x_list) > len(res):
res = x_list
return res
D, T = map(int, raw_input().split())
C = map(int, raw_input().split())
A = map(int, raw_input().split())
x_list = dfs(0, 0, [])
if x_list:
sell(x_list)
return
mn, idx = 99, 0
for i, a in enumerate(A):
if a == 0 and C[i] < mn:
mn = C[i]
idx = i
if mn < 99:
fill(idx)
return
if sum(C) < 50:
mn = min(C)
for i, c in enumerate(C):
if c == mn:
change(i)
return
pazz()
return
def main():
for loop in xrange(LOOP):
do()
if __name__ == "__main__":
main()
|
[
"roiti46@gmail.com"
] |
roiti46@gmail.com
|
09dadd77d06fc29c5cb436f64dfc953aac760332
|
fc5e0be02478b3aed1a7cc70181024e194df259f
|
/studentpro/student/migrations/0006_auto_20170926_0558.py
|
a3af317f5706f8071428a09a4bec9f019843a393
|
[] |
no_license
|
Trupti4/my-first-blog
|
93a0bb6c8cb5c5f417fdf48f1fc9cd2bcf63db11
|
60f923f57511c0a6a5a97c1025db60987b18b9d6
|
refs/heads/master
| 2021-08-15T02:40:49.947565
| 2017-11-17T05:49:27
| 2017-11-17T05:49:27
| 111,067,070
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 759
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2017-09-26 05:58
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('student', '0005_class_division'),
]
operations = [
migrations.AlterField(
model_name='studenttable',
name='division',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='student.Division'),
),
migrations.AlterField(
model_name='studenttable',
name='student_class',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='student.Class'),
),
]
|
[
"truptibjagtap95@gmail.com"
] |
truptibjagtap95@gmail.com
|
44ee678a2b4d31c2bcc76b9488ba66b930e43aa7
|
0c656371d4d38b435afb7e870c719fe8bed63764
|
/vehicles/models.py
|
bbde423644f83ef32dd331b95d2c926c3acce45c
|
[] |
no_license
|
enias-oliveira/parking-lot
|
cfbe5ca91dcb949e60f04cd26137260f5a9dba36
|
a54f5ca7b7d78bfc1a9b3c389729da14899d4048
|
refs/heads/master
| 2023-05-07T08:48:59.288672
| 2021-05-18T19:52:46
| 2021-05-18T19:52:46
| 369,363,475
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 498
|
py
|
from django.db import models
from levels.models import Space
class Vehicle(models.Model):
license_plate = models.CharField(max_length=255)
vehicle_type = models.CharField(
choices=Space.VarietyChoices.choices,
max_length=1,
)
arrived_at = models.DateTimeField(auto_now_add=True)
paid_at = models.DateTimeField(null=True)
amount_paid = models.FloatField(null=True)
space = models.OneToOneField(
Space,
on_delete=models.CASCADE,
)
|
[
"eniasoliveira27@gmail.com"
] |
eniasoliveira27@gmail.com
|
4bafa5098d0401cf955571e176bcaca041310536
|
9f71e50de671c0ab6f649b02d1b42b75d8f594d3
|
/lab05/lab_2.py
|
b18a4736437f75d26dce4195ab2ecd8230eaeee9
|
[] |
no_license
|
kodingkoning/cs344
|
a00a2327902d5e8cdbfed05bc71b4b06cb2f8ef3
|
d54a4237153a32675918daab528ad493afd4d99e
|
refs/heads/master
| 2020-04-20T03:03:08.484414
| 2019-05-13T18:56:30
| 2019-05-13T18:56:30
| 168,587,212
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,570
|
py
|
"""
CS 344: Lab 05
Elizabeth Koning, February 2019
Exercise 5.2
"""
from probability import BayesNet, enumeration_ask, elimination_ask, gibbs_ask
# Utility variables
T, F = True, False
cancer = BayesNet([
('Cancer', '', 0.01),
('Test1', 'Cancer', {T: 0.9, F: 0.2}),
('Test2', 'Cancer', {T: 0.9, F: 0.2}),
])
# a. P(Cancer | positive results on both tests)
print("a. " + enumeration_ask('Cancer', dict(Test1=T, Test2=T), cancer).show_approx())
"""
By hand:
P(C | t1 and t2) = alpha * sum[P(Cancer, t1, t2)] = alpha * sum[ P(Cancer) * P(t1 | cancer) * P(t2 | cancer)] = alpha * <0.9*0.9*0.01, 0.2*0.2*0.99> = alpha * <0.0081, 0.0396> = 1/0.0477 * <0.0081, 0.0396> = <0.1698, 0.8302>
"""
# b. P(Cancer | a positive result on test 1, but a negative result on test 2)
print("b. " + enumeration_ask('Cancer', dict(Test1=T, Test2=F), cancer).show_approx())
"""
By hand:
P(C | t1 and not t2) = alpha * sum[P(Cancer, t1, not t2)] = alpha * sum [ P(Cancer * P(t1 | cancer) * P(not t2 | cancer) ] = alpha * <0.01*0.9*0.1, 0.99*0.2*0.8> = alpha * <0.0009, 0.1584> = 1/0.1593 * <0.0009, 0.1584> = <0.006, 0.994>
"""
"""
These results make sense and they show that each test has a lot of power on the outcome. Because of a fairly high accuracy rate for people with cancer (0.9) and a lower positive result rate for people without cancer (0.8), one failed test is a good indicator that they do not have cancer. This also make sense with how few of the people have cancer. Because only 1% of those tested have cancer, that is a heavier weight on the outcome.
"""
|
[
"koningelizabeth@gmail.com"
] |
koningelizabeth@gmail.com
|
a375a95d7c91dcc149c50ecaf7a3eb816d4db3e8
|
cc8a7e3129e403bef8f6e28edc5602b64798c807
|
/ui/student/__init__.py
|
61b6891d4d0497a02e4cd6cf2f7958e2a70d76fb
|
[] |
no_license
|
tameTNT/lucahuelle-wjecgce-compsci-unit5
|
0e86a87479ba8bb238e48012865c2099ee9f3de7
|
e12e54b3b6d47ab68e8b13660d4d2480edb7494a
|
refs/heads/master
| 2023-05-12T11:08:44.504621
| 2021-06-04T12:40:41
| 2021-06-04T12:40:41
| 283,845,001
| 1
| 0
| null | 2021-05-16T12:52:22
| 2020-07-30T18:09:40
|
Python
|
UTF-8
|
Python
| false
| false
| 9,606
|
py
|
import logging
import tkinter as tk
import tkinter.ttk as ttk
import ui
import ui.landing
import ui.student.enrolment
import ui.student.section_info
from data_tables import data_handling, SECTION_NAME_MAPPING
class StudentAwardDashboard(ui.GenericPage):
page_name = 'STUDENT_USERNAME - Award Dashboard'
def __init__(self, pager_frame: ui.PagedMainFrame):
super().__init__(pager_frame=pager_frame)
self.logout_button = ttk.Button(self, text='Logout', command=self.logout)
self.logout_button.pack(padx=self.padx, pady=self.pady)
# all variable fields start with a null value before being updated
# with student info when update_attributes is called
self.welcome_text_var = tk.StringVar()
self.welcome_text = ttk.Label(self, textvariable=self.welcome_text_var, justify='center',
font=ui.HEADING_FONT)
self.welcome_text.pack(padx=self.padx, pady=self.pady)
self.current_level_var = tk.StringVar()
self.current_level = ttk.Label(self, textvariable=self.current_level_var,
justify='center', font=ui.BOLD_CAPTION_FONT)
self.current_level.pack(padx=self.padx, pady=self.pady)
# button only shown if student has not yet registered/fully enrolled
# this button and following frame are not packed until the frame is shown to the user
self.complete_enrolment_button = ttk.Button(self, text='Complete Enrolment',
command=self.enrol_fully)
# frame packed into window later depending on registration status
self.fully_enrolled_info_frame = ttk.Frame(self)
# === frame contents below only shown if student not yet registered ===
# == frame containing info on each section of award ==
self.section_info_frame = ttk.Labelframe(self.fully_enrolled_info_frame, text='Section progress')
self.section_info_frame.pack(padx=self.padx, pady=self.pady)
# Builds up GUI by section/column
for col, section_type in enumerate(SECTION_NAME_MAPPING.keys()):
# = Section title row =
title_var_name = f'{section_type}_title_var'
self.__setattr__(title_var_name, tk.StringVar()) # e.g. self.vol_title_var
title_label_name = f'{section_type}_title_label'
label_obj = ttk.Label(self.section_info_frame,
textvariable=self.__getattribute__(title_var_name),
justify='center')
self.__setattr__(title_label_name, label_obj) # e.g. self.vol_title_label
self.__getattribute__(title_label_name).grid(row=0, column=col,
padx=self.padx, pady=self.pady)
# = Section status row =
status_var_name = f'{section_type}_status_var'
self.__setattr__(status_var_name, tk.StringVar()) # e.g. self.vol_status_var
status_label_name = f'{section_type}_status_label'
label_obj = ttk.Label(self.section_info_frame,
textvariable=self.__getattribute__(status_var_name),
justify='center', font=ui.ITALIC_CAPTION_FONT)
self.__setattr__(status_label_name, label_obj) # e.g. self.vol_status_label
self.__getattribute__(status_label_name).grid(row=2, column=col,
padx=self.padx, pady=self.pady)
# = Section edit row =
button_var_name = f'{section_type}_edit_button'
# For explanation of why 'lambda x=section_type:...' is used
# instead of just 'lambda:...' see:
# https://stackoverflow.com/questions/10452770/python-lambdas-binding-to-local-values
button_obj = ttk.Button(self.section_info_frame, text='Edit',
command=lambda x=section_type: self.edit_section(x))
self.__setattr__(button_var_name, button_obj) # e.g. self.vol_edit_button
self.__getattribute__(button_var_name).grid(row=3, column=col,
padx=self.padx, pady=self.pady)
self.title_separator = ttk.Separator(self.section_info_frame, orient='horizontal')
self.title_separator.grid(row=1, columnspan=3, sticky='we', padx=self.padx, pady=self.pady)
# == end of self.section_info_frame ==
# == expedition info frame contents ==
self.expedition_frame = ttk.Labelframe(self.fully_enrolled_info_frame, text='Expedition')
self.expedition_frame.pack(padx=self.padx, pady=self.pady)
# todo: expedition info frame in Student overview page
self.temp_expedition_label = ttk.Label(self.expedition_frame, text='Not Implemented')
self.temp_expedition_label.grid(row=0, column=0, padx=self.padx, pady=self.pady)
# == end of self.expedition_frame contents ==
# == calendar frame contents ==
self.calendar_frame = ttk.Labelframe(self.fully_enrolled_info_frame, text='Calendar')
self.calendar_frame.pack(padx=self.padx, pady=self.pady)
# todo: calendar info frame in Student overview page
self.temp_expedition_label = ttk.Label(self.calendar_frame, text='Not Implemented')
self.temp_expedition_label.grid(row=0, column=0, padx=self.padx, pady=self.pady)
# == end of self.calendar_frame contents ==
# === end of self.fully_enrolled_info_frame contents ===
self.student = None # stores all student information for the window - updated below
self.student_username = ''
db = self.pager_frame.master_root.db
# noinspection PyTypeChecker
self.section_table: data_handling.SectionTable = db.get_table_by_name('SectionTable')
# noinspection PyTypeChecker
self.resource_table: data_handling.ResourceTable = db.get_table_by_name('ResourceTable')
def update_attributes(self, student: data_handling.Student, username: str) -> None:
# updates attributes with submitted parameters
self.student = student
self.student_username = username
self.page_name = f'{self.student_username} - Award Dashboard'
# === updates tkinter StringVar with new information received ===
if self.student.fullname: # registration complete
self.complete_enrolment_button.pack_forget()
if self.student.is_approved: # teacher has approved enrolment
self.welcome_text_var.set(f'Welcome, {self.student.fullname}!')
self.fully_enrolled_info_frame.pack(padx=self.padx, pady=self.pady)
else: # pending teacher approval
self.welcome_text_var.set(f'Welcome!\n Your teacher has not yet approved '
f'your enrolment, {username}.')
self.fully_enrolled_info_frame.pack_forget()
else: # if the student's details aren't complete, they have yet to register
self.welcome_text_var.set('Welcome!\n'
f'You have not yet completed your enrolment, {username}.')
self.complete_enrolment_button.pack(padx=self.padx, pady=self.pady)
self.fully_enrolled_info_frame.pack_forget()
self.current_level_var.set(f'Current level: {self.student.award_level.capitalize()}')
# Goes through each section one by one and updates the GUI's labels
for section_type, long_name in SECTION_NAME_MAPPING.items():
# fetches the tk.StringVar attributes to update with new info
title_var = self.__getattribute__(f'{section_type}_title_var')
status_var = self.__getattribute__(f'{section_type}_status_var')
# self.student.vol_info_id, self.student.skill_info_id, self.student.phys_info_id
section_obj = self.student.get_section_obj(section_type, self.section_table)
if section_obj:
# if get_student_section_obj() isn't None then the table exists and the section has been started
section_length = int(section_obj.activity_timescale) // 30
title_var.set(f'{long_name}\n({section_length} months)')
status_var.set(section_obj.get_activity_status(self.resource_table))
else:
title_var.set(long_name)
status_var.set('Not started')
logging.info(f'Username "{self.student_username}" entered the student dashboard. '
f'They have {"already" if self.student.fullname else "not yet"} '
f'completed their enrolment.')
def logout(self):
"""
Logs the student out of the page - returns them to Welcome page
"""
logging.info(f'Username "{self.student_username}" '
f'successfully logged out of student application')
self.pager_frame.change_to_page(ui.landing.Welcome)
def enrol_fully(self):
self.pager_frame.change_to_page(
destination_page=ui.student.enrolment.Enrolment,
student=self.student,
username=self.student_username,
)
def edit_section(self, section_type_short):
self.pager_frame.change_to_page(
destination_page=ui.student.section_info.SectionInfo,
student=self.student,
username=self.student_username,
section_type_short=section_type_short,
)
|
[
"30503695+tameTNT@users.noreply.github.com"
] |
30503695+tameTNT@users.noreply.github.com
|
f824ea27891db1c68d9afda49cef3f0acb44e804
|
daa0e9d968884202e2d4cc6769a8a1fe060e5e84
|
/Python/bannergraber.py
|
4c4a0be5f39510d4c578baec62f1fb9f2e2404bd
|
[] |
no_license
|
Ankit1dubey/Hacktoberfest-Accepted
|
03ecab59d10f934b8b5ed6f2d65c9a64692e2cff
|
cd2d8a55b7c62d8d56165a74e717ab4733bcdae7
|
refs/heads/main
| 2023-08-22T07:24:14.105087
| 2021-10-30T16:42:51
| 2021-10-30T16:42:51
| 422,935,434
| 2
| 0
| null | 2021-10-30T16:36:39
| 2021-10-30T16:36:39
| null |
UTF-8
|
Python
| false
| false
| 268
|
py
|
import socket
def banner(ip, port):
s = socket.socket()
s.connect((ip, int(port)))
s.settimeout(5)
print(s.recv(1024))
def main():
ip = input("Please enter the IP: ")
port = str(input("Please enter the port: "))
banner(ip, port)
main()
|
[
"noreply@github.com"
] |
Ankit1dubey.noreply@github.com
|
ce600169865d3e60fa5828726f44e90608919b67
|
55cf0aa3e9795dd769a7cce18d1816fa960f9eb6
|
/blog/migrations/0001_initial.py
|
0b7bbe4ec64b92ec8a4582604dab8b1b366ebb11
|
[] |
no_license
|
apascolo1/my-first-blog
|
2cd15642c91cc024bedd9d858ce32c36ee4f1d42
|
d1da1510ee6840d670cefa00579eea34130b3615
|
refs/heads/master
| 2021-01-02T22:48:58.179116
| 2017-08-05T05:56:30
| 2017-08-05T05:56:30
| 99,399,704
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,051
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.7 on 2017-08-05 03:58
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200)),
('text', models.TextField()),
('created_date', models.DateTimeField(default=django.utils.timezone.now)),
('published_date', models.DateTimeField(blank=True, null=True)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
|
[
"apascolo@hotmail.com"
] |
apascolo@hotmail.com
|
999ce221a1abf46e5d54a633af2e8f9a66c8a6a6
|
b382a88e68988e88725f183a197692592f29665a
|
/codepython/StatistiqueY/statistical_analyse.py
|
a2165cc63ca2cadc14195bca4145723c9a24490b
|
[
"BSD-3-Clause"
] |
permissive
|
gdeside/LEPL1506_Projet4
|
0e97c4ad66a9cfdfba005170e6ed56337624229a
|
02b9833fedbadd79bf8fdcd7026d2ee8b7925cc2
|
refs/heads/master
| 2023-04-22T15:34:22.596361
| 2021-05-08T18:57:17
| 2021-05-08T18:57:17
| 354,288,745
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,248
|
py
|
from os import path
import numpy as np
import statsmodels.stats.weightstats as sm2
from scipy import signal
from scipy import stats as sc
import coda_tools as coda
import processing_tools as tool
# %% Comparaison variance
markers_id = [6, 5, 8, 7]
ntrials = [2, 3, 4] # /!\ changer noms de fichiers
positions = ['UR', 'SP', 'UD']
names = ['LH', 'GD', 'PDs', 'MH']
positionsdico = {
"SP": "Supine",
"UD": "UpsidDowm",
"UR": "UpRight"
}
for name in names:
file1 = open("statsdeltay_%s" % name, "w")
for p in positions:
file1.write("######################## Position {} ###############################################\n".format(positionsdico[p]))
for n in ntrials:
file1.write("Stats DeltaY pour %s en position %s et les essais %d et %d\n"%(name,p,n,n+1))
file_path1 = "../../data/Groupe_1_codas/%s_%s_coda000%d.txt" % (name, p, n)
if not path.exists(file_path1):
continue
coda_df1 = coda.import_data(file_path1)
pos1 = coda.manipulandum_center(coda_df1, markers_id)
pos1 = pos1 / 1000
vel1 = tool.derive(pos1, 200, axis=1)
# posxbis1 = pos1[0][~np.isnan(pos1[0])]
pk = signal.find_peaks(vel1[0], prominence=1, width=(100, 1000))
ipk = pk[0] # index
cycle_starts = ipk[:-1]
cycle_ends = ipk[1:] - 1
ecart1 = []
for k in range(len(cycle_starts)):
ecart1.append(abs(np.nanmax(pos1[1][cycle_starts[k]:cycle_ends[k]]) - np.nanmin(
pos1[1][cycle_starts[k]:cycle_ends[k]])))
# seconde file
file_path2 = "../../data/Groupe_1_codas/%s_%s_coda000%d.txt" % (name, p, n + 1)
if not path.exists(file_path2):
continue
coda_df2 = coda.import_data(file_path2)
pos2 = coda.manipulandum_center(coda_df2, markers_id)
pos2 = pos2 / 1000
vel2 = tool.derive(pos2, 200, axis=1)
# posxbis2 = pos2[0][~np.isnan(pos2[0])]
pk = signal.find_peaks(vel2[0], prominence=1, width=(100, 1000))
ipk = pk[0] # index
cycle_starts = ipk[:-1]
cycle_ends = ipk[1:] - 1
ecart2 = []
for k in range(len(cycle_starts)):
ecart2.append(abs(np.nanmax(pos2[1][cycle_starts[k]:cycle_ends[k]]) - np.nanmin(
pos2[1][cycle_starts[k]:cycle_ends[k]])))
Txbis, pvalbis = sc.bartlett(ecart1, ecart2)
file1.write("p_value pour la variance %f \n" % pvalbis)
file1.write("les deux variances sont %f et %f\n" %(np.nanstd(ecart1),np.nanstd(ecart2)))
#print(sc.ttest_ind(ecart1, ecart2, equal_var=True))
X1 = sm2.DescrStatsW(ecart1)
X2 = sm2.DescrStatsW(ecart2)
Ttest = sm2.CompareMeans(X1, X2)
# Ttest.summary() gives us the p-value, the statistics and the critical values
file1.write(Ttest.summary(usevar='pooled').as_text()+"\n")
file1.write("les deux moyennes sont: %f et %f\n"%(np.nanmean(ecart1),np.nanmean(ecart2)))
file1.write('\n')
file1.write('\n')
file1.close()
|
[
"guillaume.deside28@gmail.com"
] |
guillaume.deside28@gmail.com
|
1920681762c2334cb1bce7fbbef9a9b4912ac7ca
|
b7a79f6187ddd8d6f0c7f20dbf28a375b3d5a83b
|
/tools/format-conversion/output-from-py-to-graph.py
|
e047173d86caebb00ea15ddfc696bd079b778cc6
|
[
"ISC",
"CC-BY-4.0"
] |
permissive
|
gracesc7/irc-disentanglement
|
542ddee388cfb31b2107251e1a4e52ed76f94cd8
|
a94d2992bce7648bc79881ed95b34eaac9d62383
|
refs/heads/master
| 2020-08-25T00:00:16.892953
| 2019-09-25T20:14:54
| 2019-09-25T20:14:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 539
|
py
|
#!/usr/bin/env python3
from __future__ import print_function
import argparse
import logging
import sys
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Convert the output from a run of the python system into content that can be evaluated.')
args = parser.parse_args()
done_training = False
for line in sys.stdin:
if line.startswith("data/"):
line = line.split('/')[-1].strip()
parts = line.split()
print("{}:{}".format(parts[0], ' '.join(parts[1:])))
|
[
"jkk@berkeley.edu"
] |
jkk@berkeley.edu
|
6f07ea6ec7b1081eb1cd3ffb088fd7b9cc491c95
|
fe42bdcb32937a477f7cebb184771f05a9f6a7d9
|
/tiny.py
|
189f6f1e9d07e269ce7bc0d0e116559399f3170d
|
[] |
no_license
|
ozamodaz/tinypng
|
81ca1ab22f0d65aa45bbee6df917cdaad46876e7
|
fb9ac925924d51330172047cd077c4255505276f
|
refs/heads/master
| 2020-12-02T22:36:36.253055
| 2017-07-21T13:12:25
| 2017-07-21T13:12:25
| 96,156,111
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,345
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import tinify
import os, sys
from datetime import datetime
from concurrent.futures import ThreadPoolExecutor
max_workers = 4
tinify.key = 'API_KEY'
in_path = os.path.dirname(__file__)
out_path = os.path.join(in_path, 'compressed')
log_file = os.path.join(in_path, 'error.log')
counter = 0
def log(log_msg):
timestamp = datetime.now().strftime('%d.%m.%Y %H:%M')
string = '%s %s \n' % (timestamp, log_msg)
with open(log_file, mode='a') as f:
f.write(string)
def get_queue():
def image_files(path):
allowed_formats = ['jpg', 'jpeg', 'png']
for fname in os.listdir(path):
if os.path.isfile(os.path.join(path, fname)):
extension = fname.split('.')[-1].lower()
if extension in allowed_formats:
yield fname
if os.path.exists(out_path):
done = set(fname for fname in image_files(out_path))
else:
os.makedirs(out_path)
done = set()
in_files = set(fname for fname in image_files(in_path))
uncompressed = in_files - done
return uncompressed
def compress(fname):
"""
Опять сжимаешь, ебучий шакал
"""
try:
source = tinify.from_file(os.path.join(in_path, fname))
resized = source.resize(
method="scale",
width=1600,
)
resized.to_file(os.path.join(out_path, fname))
global counter
counter += 1
sys.stdout.write("Progress: %s of %s \r" % (counter, total))
sys.stdout.flush()
except tinify.errors.ConnectionError as e:
log('A network connection error occurred.')
log('Error message: %s' % e)
log('Retry...')
compress(fname)
except tinify.AccountError as e:
log('Account Error. Verify your API key and account limit.')
log('Compression count: %s' % tinify.compression_count)
log('Error message: %s' % e)
sys.exit(0)
except Exception as e:
log('Error: %s (%s)' % (e, fname))
pass
if __name__ == "__main__":
uncompressed = get_queue()
total = len(uncompressed)
with ThreadPoolExecutor(max_workers=max_workers) as executor:
for fname in sorted(uncompressed, reverse=True):
worker = executor.submit(compress, fname)
|
[
"horen@colocall.net"
] |
horen@colocall.net
|
d0441941e779a89f285dae8308b27bcbcc5c567b
|
15c2d706e2e4e4826f3f3c8cb43646e8fbc0de30
|
/testboringthings.py
|
1f42792192194547db3d9b896a6083d81b33e1fb
|
[
"MIT"
] |
permissive
|
Widdershin/PyGM
|
9bcc8d40c796f74ea0aeb0f7df94cd6948cc4a87
|
3cb898c29bcb20589f2f8ed3f93036c1bd49cf2d
|
refs/heads/master
| 2021-01-01T15:29:52.553756
| 2013-07-02T11:16:01
| 2013-07-02T11:16:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 615
|
py
|
import core
import core.maths
class Player(core.PyGMObj):
def __init__(self, x, y, sprite=None):
super(Player, self).__init__(x, y, sprite)
class Nick(Player):
"""docstring for Nick"""
def __init__(self, x, y, sprite=None):
super(Nick, self).__init__(x, y, sprite)
self.x = x
self.y = y
self.sprite = sprite
a = Player(5, 5)
b = Player(10, 5)
c = Nick(1, 3)
print 'Nick: ' + str(Nick.instances())
print 'Player: ' + str(Player.instances())
a1 = maths.Vector2(5, 5)
a2 = maths.Vector2(10, 25)
print a1
print a2
print a1 + a2
print a1 - a2
print a1 * 5
print a1 * a2
print a1 / 3
print a1 / a2
|
[
"ncwjohnstone@gmail.com"
] |
ncwjohnstone@gmail.com
|
c2e0f9527762406a4d6c13564971bac0124e2b62
|
4c513075de5af0f450268cef78d81a2a8e7a7929
|
/django/guofubao/cmdb/urls.py
|
eb486ed852bba35be1fd9a62d997df627995b038
|
[] |
no_license
|
zaishuiyixia911/python2017
|
436a38aa82efaf74b706eaeb6ec9e76eed7d1f1c
|
9ef6864d2a42da4bb68ab3df0a0d686817bb34fb
|
refs/heads/master
| 2021-01-19T21:50:37.420642
| 2017-09-22T19:24:16
| 2017-09-22T19:24:16
| 88,710,787
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,040
|
py
|
#!/usr/bin/python
#_*_coding:utf8_*_
"""cmdb URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.8/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add an import: from blog import urls as blog_urls
2. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))
"""
from django.conf.urls import include, url
from django.contrib import admin
from assets import rest_urls,urls as asset_urls
from cmdb import views
urlpatterns = [
url(r'^admin/', include(admin.site.urls)),
url(r'^api/', include(rest_urls) ),
url(r'^asset/',include(asset_urls)),
url(r'^$',views.index,name="dashboard"),
url(r'^login/$',views.acc_login,name='login'),
]
|
[
"894197954@qq.com"
] |
894197954@qq.com
|
51ee7c36e58334f7b2f630af3aea0ba3fa79108d
|
e5ff36a263b22a9759175531b4fb0ff319d2ef4e
|
/fib-sphere.py
|
5e05fc950617f6f3bdd95af3c7565b193bc3bfbb
|
[] |
no_license
|
caseprince/rhino-python-scripts
|
95d74b2ee02130fd8c5e36c378831c87817da342
|
2778cda65aa7169b29b5f546805ed2538ecbb5b2
|
refs/heads/master
| 2021-04-29T04:02:50.691805
| 2019-03-18T20:31:20
| 2019-03-18T20:31:20
| 78,032,396
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,271
|
py
|
import rhinoscriptsyntax as rs
import Rhino.Geometry as rg
import math, random
def fibonacci_sphere(samples=1, randomize=True, radius=2.5):
rnd = 1.
if randomize:
rnd = random.random() * samples
points = []
offset = 2./samples
increment = math.pi * (3. - math.sqrt(5.));
for i in range(samples):
z = (((i * offset) - 1) + (offset / 2));
r = math.sqrt(1 - pow(z,2))
phi = ((i + rnd) % samples) * increment
x = math.cos(phi) * r * radius
y = math.sin(phi) * r * radius
z *= -radius
points.append([x,y,z])
return points
def addPts(pt1, pt2):
return [pt1[0]+pt2[0], pt1[1]+pt2[1], pt1[2]+pt2[2]]
def box2pt(p1, p2):
pt0 = p1
pt1 = rs.coerce3dpoint([p2[0], p1[1], p1[2]])
pt2 = rs.coerce3dpoint([p2[0], p2[1], p1[2]])
pt3 = rs.coerce3dpoint([p1[0], p2[1], p1[2]])
pt4 = rs.coerce3dpoint([p1[0], p1[1], p2[2]])
pt5 = rs.coerce3dpoint([p2[0], p1[1], p2[2]])
pt6 = p2
pt7 = rs.coerce3dpoint([p1[0], p2[1], p2[2]])
return rs.AddBox([pt0, pt1, pt2, pt3, pt4, pt5, pt6, pt7])
def reset():
arr1 = rs.AllObjects()
if arr1: rs.DeleteObjects(arr1)
rs.Command("ClearAllMeshes")
rs.Command("ClearUndo")
reset()
# 10"
# radius_mm = 127
# petals = 4098
# 8.5"
radius_mm = 108
sq_mm_per_petal = 50.25 # 50.25 square mm per petal
s = 35 #30.48 #0.48 * radius_mm
# 12" w/ larger petals
radius_mm = 152
sq_mm_per_petal = 160
s = 60
surfaceArea = 4 * math.pi * radius_mm**2
petals = int(surfaceArea / sq_mm_per_petal)
points = fibonacci_sphere(petals, False, radius_mm)
cone = rs.AddCone([0,0,0], .2 * s, .2 * s, True)
cone2 = rs.AddCone([0,-.05 * s,0], .21 * s, .19 * s, True)
petal = rs.BooleanDifference([cone], [cone2])[0]
rs.RotateObject(petal, [0,0,0], -13, rg.Vector3d.XAxis)
clipBoxZ = .43 * s
clipBox = box2pt([.3 * s, .3 * s, (.3 * s) + clipBoxZ], [-.3 * s, -.3 * s, (-.3 * s) + clipBoxZ])
rs.RotateObject(clipBox, [0,0,0], 5, rg.Vector3d.XAxis)
petal = rs.BooleanDifference([petal], [clipBox])[0]
for pt in points:
vector = rs.VectorCreate(pt, [0,0,0])
newPetal = rs.CopyObject(petal, vector)
rs.OrientObject(newPetal, [pt, addPts(pt,[0,0,1]), addPts(pt,[0,1,0])], [pt, [0,0,0], addPts(pt,[0,0,1])])
|
[
"caseprince@gmail.com"
] |
caseprince@gmail.com
|
f77a395cdd517abbc3e9116f21cedf8191c7153f
|
afc8fb99b86de4a639ef7bd50835c6dfef22d66a
|
/lg_common/test/offline/test_helpers.py
|
7d81ecd093ed6ac55bb81cc3c0b7998837ed0b94
|
[
"Apache-2.0"
] |
permissive
|
carlosvquezada/lg_ros_nodes
|
e5d373a59fa990aac5d0a97a3ee705e7aec141b4
|
7560e99272d06ef5c80a5444131dad72c078a718
|
refs/heads/master
| 2020-04-08T13:49:19.622258
| 2018-11-24T00:21:30
| 2018-11-24T00:21:30
| 159,408,388
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 11,742
|
py
|
"""
Test module for all helper functions from the helpers.py module
"""
import os
import pytest
import rospy
import rospkg
import rostopic
from interactivespaces_msgs.msg import GenericMessage
from lg_common.helpers import extract_first_asset_from_director_message
from lg_common.helpers import load_director_message
from lg_common.helpers import unpack_activity_sources
DIRECTOR_MESSAGE_ACTIVITY_CONFIG_NOT_PRESENT = """
{
"description": "bogus",
"duration": 0,
"name": "test whatever",
"resource_uri": "bogus",
"slug": "test message",
"windows": [
{
"activity": "video",
"assets": [
"whatever"
],
"height": 1080,
"presentation_viewport": "center",
"width": 1920,
"x_coord": 0,
"y_coord": 0
}
]
}"""
DIRECTOR_MESSAGE_ACTIVITY_CONFIG_EMPTY = """
{
"description": "bogus",
"duration": 0,
"name": "test whatever",
"resource_uri": "bogus",
"slug": "test message",
"windows": [
{
"activity": "video",
"assets": [
"whatever"
],
"height": 1080,
"presentation_viewport": "center",
"width": 1920,
"x_coord": 0,
"y_coord": 0,
"activity_config": {}
}
]
}"""
DIRECTOR_MESSAGE_ACTIVITY_CONFIG_LOOP = """
{
"description": "bogus",
"duration": 0,
"name": "test whatever",
"resource_uri": "bogus",
"slug": "test message",
"windows": [
{
"activity": "video",
"assets": [
"whatever"
],
"height": 1080,
"presentation_viewport": "center",
"width": 1920,
"x_coord": 0,
"y_coord": 0,
"activity_config": {
"onFinish": "loop"
}
}
]
}"""
DIRECTOR_MESSAGE_ACTIVITY_CONFIG_CLOSE = """
{
"description": "bogus",
"duration": 0,
"name": "test whatever",
"resource_uri": "bogus",
"slug": "test message",
"windows": [
{
"activity": "video",
"assets": [
"whatever"
],
"height": 1080,
"presentation_viewport": "center",
"width": 1920,
"x_coord": 0,
"y_coord": 0,
"activity_config": {
"onFinish": "close"
}
}
]
}"""
DIRECTOR_MESSAGE_ACTIVITY_CONFIG_NOTHING = """
{
"description": "bogus",
"duration": 0,
"name": "test whatever",
"resource_uri": "bogus",
"slug": "test message",
"windows": [
{
"activity": "video",
"assets": [
"whatever"
],
"height": 1080,
"presentation_viewport": "center",
"width": 1920,
"x_coord": 0,
"y_coord": 0,
"activity_config": {
"onFinish": "nothing"
}
}
]
}"""
class TestHelpers(object):
@classmethod
def setup_class(cls):
pass
@classmethod
def teardown_class(cls):
pass
def setup_method(self, method):
self.msg = GenericMessage()
self.msg.type = "json"
def teardown_method(self, _):
pass
def test_load_director_message_wrong_json(self):
self.msg.message = "wrong json"
pytest.raises(ValueError, load_director_message, self.msg)
def test_load_director(self):
self.msg.message = DIRECTOR_MESSAGE_ACTIVITY_CONFIG_NOTHING
d = load_director_message(self.msg)
assert isinstance(d, dict)
assert d["windows"][0]["activity_config"]["onFinish"] == "nothing"
def test_extract_first_asset_from_director_message_return_empty_list(self):
self.msg.message = DIRECTOR_MESSAGE_ACTIVITY_CONFIG_NOTHING
r = extract_first_asset_from_director_message(self.msg, "something", "center")
# get empty list since activity type does not match
assert r == []
r = extract_first_asset_from_director_message(self.msg, "video", "somewhereelse")
# get empty list since viewport does not match
assert r == []
r = extract_first_asset_from_director_message(self.msg, "something", "somewhereelse")
assert r == []
def test_extract_first_asset_from_director_message_general(self):
self.msg.message = DIRECTOR_MESSAGE_ACTIVITY_CONFIG_NOTHING
#extract_first_asset_from_director_message(message, activity_type, viewport)
r = extract_first_asset_from_director_message(self.msg, "video", "center")
assert r[0]["x_coord"] == 0
assert r[0]["y_coord"] == 0
assert r[0]["height"] == 1080
assert r[0]["width"] == 1920
def test_extract_first_asset_from_director_message_activity_config_options(self):
# no activity_config attribute present
self.msg.message = DIRECTOR_MESSAGE_ACTIVITY_CONFIG_NOT_PRESENT
r = extract_first_asset_from_director_message(self.msg, "video", "center")
assert not hasattr(r[0], "on_finish")
# activity_config present but empty
self.msg.message = DIRECTOR_MESSAGE_ACTIVITY_CONFIG_EMPTY
r = extract_first_asset_from_director_message(self.msg, "video", "center")
assert not hasattr(r[0], "on_finish")
# activity_config present, onFinish close, loop, nothing
for m, on_finish in ((DIRECTOR_MESSAGE_ACTIVITY_CONFIG_LOOP, "loop"),
(DIRECTOR_MESSAGE_ACTIVITY_CONFIG_CLOSE, "close"),
(DIRECTOR_MESSAGE_ACTIVITY_CONFIG_NOTHING, "nothing")):
self.msg.message = m
r = extract_first_asset_from_director_message(self.msg, "video", "center")
assert r[0]["on_finish"] == on_finish
def test_unpack_activity_sources(self):
source_string = "/touchscreen/touch:interactivespaces_msgs/GenericMessage:activity"
result = [{"topic": "/touchscreen/touch",
"message_type": "interactivespaces_msgs/GenericMessage",
"strategy": "activity",
"slot": None,
"value_min": None,
"value_max": None,
"value": None}]
assert result == unpack_activity_sources(source_string)
source_string = "/proximity_sensor/distance:sensor_msgs/Range-range:value-0,2.5"
result = [{"topic": "/proximity_sensor/distance",
"message_type": "sensor_msgs/Range",
"strategy": "value",
"slot": "range",
"value_min": "0",
"value_max": "2.5",
"value": None}]
assert result == unpack_activity_sources(source_string)
source_string = "/proximity_sensor/distance:sensor_msgs/Range-range:average"
result = [{"topic": "/proximity_sensor/distance",
"message_type": "sensor_msgs/Range",
"strategy": "average",
"slot": "range",
"value_min": None,
"value_max": None,
"value": None}]
assert result == unpack_activity_sources(source_string)
source_string = ("/proximity_sensor/distance:sensor_msgs/Range-range:value-0,2.5;"
"/touchscreen/touch:interactivespaces_msgs/GenericMessage:delta")
result = [{"topic": "/proximity_sensor/distance",
"message_type": "sensor_msgs/Range",
"strategy": "value",
"slot": "range",
"value_min": "0",
"value_max": "2.5",
"value": None},
{"topic": "/touchscreen/touch",
"message_type": "interactivespaces_msgs/GenericMessage",
"slot": None,
"strategy": "delta",
"value_min": None,
"value_max": None,
"value": None}]
assert result == unpack_activity_sources(source_string)
source_string = ("/earth/query/search:std_msgs/String-data:default;"
"/lg_replay/touchscreen:interactivespaces_msgs/GenericMessage-message:count;"
"/spacenav/twist:geometry_msgs/Twist-angular:count_nonzero;"
"/proximity/distance:sensor_msgs/Range-range:average")
result = [{"topic": "/earth/query/search",
"message_type": "std_msgs/String",
"strategy": "default",
"slot": "data",
"value_min": None,
"value_max": None,
"value": None},
{"topic": "/lg_replay/touchscreen",
"message_type": "interactivespaces_msgs/GenericMessage",
"slot": "message",
"strategy": "count",
"value_min": None,
"value_max": None,
"value": None},
{"topic": "/spacenav/twist",
"message_type": "geometry_msgs/Twist",
"slot": "angular",
"strategy": "count_nonzero",
"value_min": None,
"value_max": None,
"value": None},
{"topic": "/proximity/distance",
"message_type": "sensor_msgs/Range",
"slot": "range",
"strategy": "average",
"value_min": None,
"value_max": None,
"value": None}]
assert result == unpack_activity_sources(source_string)
source_string = ("/appctl/mode:appctl/Mode-mode:value-tactile;"
"/director/scene:interactivespaces_msgs/GenericMessage-message.slug:value-online_scene")
result = [{"topic": "/appctl/mode",
"message_type": "appctl/Mode",
"slot": "mode",
"strategy": "value",
"value_min": None,
"value_max": None,
"value": "tactile"},
{"topic": "/director/scene",
"message_type": "interactivespaces_msgs/GenericMessage",
"slot": "message.slug",
"strategy": "value",
"value_min": None,
"value_max": None,
"value": "online_scene"}]
assert result == unpack_activity_sources(source_string)
if __name__ == "__main__":
test_pkg = "lg_common"
test_name = "test_helpers"
test_dir = os.path.join(rospkg.get_test_results_dir(env=None), test_pkg)
pytest_result_path = os.path.join(test_dir, "rosunit-%s.xml" % test_name)
# run only itself
test_path = os.path.abspath(os.path.abspath(__file__))
# output is unfortunately handled / controlled by above layer of rostest (-s has no effect)
pytest.main("%s -s -v --junit-xml=%s" % (test_path, pytest_result_path))
|
[
"zdenek@endpoint.com"
] |
zdenek@endpoint.com
|
a762e6fd60f036c8ce198f59b5a134d32805574f
|
87bc21c7ec2aaaa86ae1cb76efc4f182b6f86a33
|
/blog/migrations/0001_initial.py
|
0fbf71e4892bce8d6f4e80fa2ecd3fc197022759
|
[] |
no_license
|
malcbwilson/my-first-blog
|
195c080c667a88e97fcb35e02f7f53b07f920e67
|
6030d53206ddc9a36c5d0a6d435df7ac4fc1871a
|
refs/heads/master
| 2021-04-06T19:28:52.026610
| 2018-03-15T08:48:51
| 2018-03-15T08:48:51
| 125,335,832
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,052
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.11 on 2018-03-14 13:08
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200)),
('text', models.TextField()),
('created_date', models.DateTimeField(default=django.utils.timezone.now)),
('published_date', models.DateTimeField(blank=True, null=True)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
|
[
"verysillymalcolm@btinternet.com"
] |
verysillymalcolm@btinternet.com
|
db3d75e52ba527d65b71fa69000fb4bf8b0d4d19
|
12412a9d3f3434d9a0f9d6db099a6cb75a35a77f
|
/bgx/validator-bgx/sawtooth_validator/server/network_handlers.py
|
7719d562eb6795e2a966b6ec493a48ad999f9ef5
|
[
"Zlib",
"MIT",
"Apache-2.0"
] |
permissive
|
DGT-Network/DGT-Mississauga
|
3cf6d79618d2b076ac75b771910cabc5bdd6431a
|
fb0a558f1304696cdf4278b8009001212ee17cb9
|
refs/heads/master
| 2023-03-05T08:58:09.141087
| 2022-05-10T15:48:04
| 2022-05-10T15:48:04
| 241,790,384
| 0
| 0
|
Apache-2.0
| 2023-03-02T17:42:45
| 2020-02-20T04:10:38
|
Python
|
UTF-8
|
Python
| false
| false
| 12,397
|
py
|
# Copyright 2016, 2017 NTRLab
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------------------
import logging
from sawtooth_validator.protobuf import validator_pb2
from sawtooth_validator.journal.completer import CompleterGossipHandler
from sawtooth_validator.journal.completer import CompleterGossipBlockResponseHandler
from sawtooth_validator.journal.completer import CompleterGossipBatchResponseHandler
from sawtooth_validator.gossip import structure_verifier
from sawtooth_validator.journal.responder import BlockResponderHandler
from sawtooth_validator.journal.responder import ResponderBlockResponseHandler
from sawtooth_validator.journal.responder import BatchByBatchIdResponderHandler
from sawtooth_validator.journal.responder import ResponderBatchResponseHandler
from sawtooth_validator.journal.responder import BatchByTransactionIdResponderHandler
from sawtooth_validator.gossip import signature_verifier
from sawtooth_validator.gossip.permission_verifier import NetworkPermissionHandler
from sawtooth_validator.gossip.permission_verifier import NetworkConsensusPermissionHandler
from sawtooth_validator.gossip.gossip_handlers import GossipBroadcastHandler
from sawtooth_validator.gossip.gossip_handlers import GossipMessageDuplicateHandler
from sawtooth_validator.gossip.gossip_handlers import GossipBlockResponseHandler
from sawtooth_validator.gossip.gossip_handlers import GossipBatchResponseHandler
from sawtooth_validator.gossip.gossip_handlers import PeerRegisterHandler
from sawtooth_validator.gossip.gossip_handlers import PeerUnregisterHandler
from sawtooth_validator.gossip.gossip_handlers import GetPeersRequestHandler
from sawtooth_validator.gossip.gossip_handlers import GetPeersResponseHandler
from sawtooth_validator.gossip.gossip_handlers import GossipConsensusMessageHandler
from sawtooth_validator.networking.handlers import PingHandler
from sawtooth_validator.networking.handlers import ConnectHandler
from sawtooth_validator.networking.handlers import DisconnectHandler
from sawtooth_validator.networking.handlers import AuthorizationTrustRequestHandler
from sawtooth_validator.networking.handlers import AuthorizationChallengeRequestHandler
from sawtooth_validator.networking.handlers import AuthorizationChallengeSubmitHandler
from sawtooth_validator.networking.handlers import AuthorizationViolationHandler
LOGGER = logging.getLogger(__name__)
def add(
dispatcher,
interconnect,
gossip,
completer,
responder,
thread_pool,
sig_pool,
has_block,
has_batch,
permission_verifier,
block_publisher,
consensus_notifier
):
# -- Basic Networking -- #
dispatcher.add_handler(
validator_pb2.Message.PING_REQUEST,
PingHandler(network=interconnect),
thread_pool)
dispatcher.add_handler(
validator_pb2.Message.NETWORK_CONNECT,
ConnectHandler(network=interconnect),
thread_pool)
dispatcher.add_handler(
validator_pb2.Message.NETWORK_DISCONNECT,
DisconnectHandler(network=interconnect),
thread_pool)
# -- Authorization -- #
dispatcher.add_handler(
validator_pb2.Message.AUTHORIZATION_VIOLATION,
AuthorizationViolationHandler(
network=interconnect,
gossip=gossip),
thread_pool)
dispatcher.add_handler(
validator_pb2.Message.AUTHORIZATION_TRUST_REQUEST,
AuthorizationTrustRequestHandler(
network=interconnect,
permission_verifier=permission_verifier,
gossip=gossip),
thread_pool)
challenge_request_handler = AuthorizationChallengeRequestHandler(
network=interconnect)
dispatcher.add_handler(
validator_pb2.Message.AUTHORIZATION_CHALLENGE_REQUEST,
challenge_request_handler,
thread_pool)
dispatcher.add_handler(
validator_pb2.Message.AUTHORIZATION_CHALLENGE_SUBMIT,
AuthorizationChallengeSubmitHandler(
network=interconnect,
permission_verifier=permission_verifier,
gossip=gossip,
cache=challenge_request_handler.get_challenge_payload_cache()),
thread_pool)
# -- Gossip -- #
dispatcher.add_handler(
validator_pb2.Message.GOSSIP_GET_PEERS_REQUEST,
NetworkPermissionHandler(
network=interconnect,
permission_verifier=permission_verifier,
gossip=gossip
),
thread_pool)
dispatcher.add_handler(
validator_pb2.Message.GOSSIP_GET_PEERS_REQUEST,
GetPeersRequestHandler(gossip=gossip),
thread_pool)
dispatcher.add_handler(
validator_pb2.Message.GOSSIP_GET_PEERS_RESPONSE,
NetworkPermissionHandler(
network=interconnect,
permission_verifier=permission_verifier,
gossip=gossip
),
thread_pool)
dispatcher.add_handler(
validator_pb2.Message.GOSSIP_GET_PEERS_RESPONSE,
GetPeersResponseHandler(gossip=gossip),
thread_pool)
dispatcher.add_handler(
validator_pb2.Message.GOSSIP_REGISTER,
NetworkPermissionHandler(
network=interconnect,
permission_verifier=permission_verifier,
gossip=gossip
),
thread_pool)
dispatcher.add_handler(
validator_pb2.Message.GOSSIP_REGISTER,
PeerRegisterHandler(gossip=gossip),
thread_pool)
dispatcher.add_handler(
validator_pb2.Message.GOSSIP_UNREGISTER,
PeerUnregisterHandler(gossip=gossip),
thread_pool)
# GOSSIP_MESSAGE ) Check if this is a block and if we already have it
dispatcher.add_handler(
validator_pb2.Message.GOSSIP_MESSAGE,
GossipMessageDuplicateHandler(completer, has_block, has_batch),
thread_pool)
# GOSSIP_MESSAGE ) Verify Network Permissions
dispatcher.add_handler(
validator_pb2.Message.GOSSIP_MESSAGE,
NetworkPermissionHandler(
network=interconnect,
permission_verifier=permission_verifier,
gossip=gossip
),
thread_pool)
# GOSSIP_MESSAGE ) Verifies signature
dispatcher.add_handler(
validator_pb2.Message.GOSSIP_MESSAGE,
signature_verifier.GossipMessageSignatureVerifier(),
sig_pool)
# GOSSIP_MESSAGE ) Verifies batch structure
dispatcher.add_handler(
validator_pb2.Message.GOSSIP_MESSAGE,
structure_verifier.GossipHandlerStructureVerifier(),
thread_pool)
# GOSSIP_MESSAGE ) Verifies that the node is allowed to publish a
# block
dispatcher.add_handler(
validator_pb2.Message.GOSSIP_MESSAGE,
NetworkConsensusPermissionHandler(
network=interconnect,
permission_verifier=permission_verifier,
gossip=gossip
),
thread_pool)
# GOSSIP_MESSAGE ) Determines if we should broadcast the
# message to our peers. It is important that this occur prior
# to the sending of the message to the completer, as this step
# relies on whether the gossip message has previously been
# seen by the validator to determine whether or not forwarding
# should occur
dispatcher.add_handler(
validator_pb2.Message.GOSSIP_MESSAGE,
GossipBroadcastHandler(
gossip=gossip,
completer=completer),
thread_pool)
# GOSSIP_MESSAGE ) Send message to completer
dispatcher.add_handler(
validator_pb2.Message.GOSSIP_MESSAGE,
CompleterGossipHandler(completer),
thread_pool)
dispatcher.add_handler(
validator_pb2.Message.GOSSIP_BLOCK_REQUEST,
NetworkPermissionHandler(
network=interconnect,
permission_verifier=permission_verifier,
gossip=gossip
),
thread_pool)
dispatcher.add_handler(
validator_pb2.Message.GOSSIP_BLOCK_REQUEST,
BlockResponderHandler(responder, gossip),
thread_pool)
# GOSSIP_BLOCK_RESPONSE 1) Check for duplicate responses
dispatcher.add_handler(
validator_pb2.Message.GOSSIP_BLOCK_RESPONSE,
GossipBlockResponseHandler(completer, responder, has_block),
thread_pool)
# GOSSIP_MESSAGE 2) Verify Network Permissions
dispatcher.add_handler(
validator_pb2.Message.GOSSIP_BLOCK_RESPONSE,
NetworkPermissionHandler(
network=interconnect,
permission_verifier=permission_verifier,
gossip=gossip
),
thread_pool)
# GOSSIP_BLOCK_RESPONSE 3) Verifies signature
dispatcher.add_handler(
validator_pb2.Message.GOSSIP_BLOCK_RESPONSE,
signature_verifier.GossipBlockResponseSignatureVerifier(),
sig_pool)
# GOSSIP_BLOCK_RESPONSE 4) Check batch structure
dispatcher.add_handler(
validator_pb2.Message.GOSSIP_BLOCK_RESPONSE,
structure_verifier.GossipBlockResponseStructureVerifier(),
thread_pool)
# GOSSIP_BLOCK_RESPONSE 5) Send message to completer
dispatcher.add_handler(
validator_pb2.Message.GOSSIP_BLOCK_RESPONSE,
CompleterGossipBlockResponseHandler(
completer),
thread_pool)
dispatcher.add_handler(
validator_pb2.Message.GOSSIP_BLOCK_RESPONSE,
ResponderBlockResponseHandler(responder, gossip),
thread_pool)
dispatcher.add_handler(
validator_pb2.Message.GOSSIP_BATCH_BY_BATCH_ID_REQUEST,
NetworkPermissionHandler(
network=interconnect,
permission_verifier=permission_verifier,
gossip=gossip
),
thread_pool)
dispatcher.add_handler(
validator_pb2.Message.GOSSIP_BATCH_BY_BATCH_ID_REQUEST,
BatchByBatchIdResponderHandler(responder, gossip),
thread_pool)
dispatcher.add_handler(
validator_pb2.Message.GOSSIP_BATCH_BY_TRANSACTION_ID_REQUEST,
NetworkPermissionHandler(
network=interconnect,
permission_verifier=permission_verifier,
gossip=gossip
),
thread_pool)
dispatcher.add_handler(
validator_pb2.Message.GOSSIP_BATCH_BY_TRANSACTION_ID_REQUEST,
BatchByTransactionIdResponderHandler(responder, gossip),
thread_pool)
dispatcher.add_handler(
validator_pb2.Message.GOSSIP_BATCH_RESPONSE,
NetworkPermissionHandler(
network=interconnect,
permission_verifier=permission_verifier,
gossip=gossip
),
thread_pool)
# GOSSIP_BATCH_RESPONSE 1) Check for duplicate responses
dispatcher.add_handler(
validator_pb2.Message.GOSSIP_BATCH_RESPONSE,
GossipBatchResponseHandler(completer, responder, has_batch),
thread_pool)
# GOSSIP_BATCH_RESPONSE 2) Verifies signature
dispatcher.add_handler(
validator_pb2.Message.GOSSIP_BATCH_RESPONSE,
signature_verifier.GossipBatchResponseSignatureVerifier(),
sig_pool)
# GOSSIP_BATCH_RESPONSE 3) Check batch structure
dispatcher.add_handler(
validator_pb2.Message.GOSSIP_BATCH_RESPONSE,
structure_verifier.GossipBatchResponseStructureVerifier(),
thread_pool)
# GOSSIP_BATCH_RESPONSE 4) Send message to completer
dispatcher.add_handler(
validator_pb2.Message.GOSSIP_BATCH_RESPONSE,
CompleterGossipBatchResponseHandler(
completer),
thread_pool)
dispatcher.add_handler(
validator_pb2.Message.GOSSIP_BATCH_RESPONSE,
ResponderBatchResponseHandler(responder, gossip),
thread_pool)
# GOSSIP_CONSENSUS_MESSAGE
dispatcher.add_handler(
validator_pb2.Message.GOSSIP_CONSENSUS_MESSAGE,
GossipConsensusMessageHandler(consensus_notifier),
thread_pool)
|
[
"sparsov@sinergo.ru"
] |
sparsov@sinergo.ru
|
e2b674451ac2cc1a67893c942d74e7393622b16e
|
1e6d47e228afe6343a577e71cb18f05b74ffcc18
|
/app.py
|
55024ab1e4bb53ac4d26a8f1cc4a04076ba3bdd4
|
[] |
no_license
|
MuhweziDeo/maintainaceTrackerApi
|
cf14af6651067b312a550c8eefa2ea32c9f970d5
|
5261d9f3f723e66d55079219c0915ddb4c0d6e6d
|
refs/heads/master
| 2020-03-23T19:28:06.481753
| 2018-07-24T15:25:37
| 2018-07-24T15:25:37
| 141,979,812
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,573
|
py
|
from flask import Flask, request,jsonify,render_template,flash,session
from werkzeug.security import check_password_hash,generate_password_hash
from flask_sqlalchemy import SQLAlchemy
from functools import wraps
# import jwt
import uuid
import datetime
app = Flask(__name__)
app.config['SECRET_KEY']="thisisecreys"
db =SQLAlchemy(app)
#user interfaces
@app.route('/')
def index():
return "Hello World"
#user routes
@app.route('/signup')
def signup():
return render_template('signup.html')
@app.route('/login')
def login():
return render_template('login.html')
@app.route('/make_request')
def make_request():
return render_template('make_request.html')
@app.route('/user_requests')
def user_requests():
return render_template('user_requests.html')
#admin routes
@app.route('/requests')
def requests():
return render_template('requests.html')
@app.route('/resolve')
def resolve():
return render_template('resolve.html')
# api endpoints
# USERS
@app.route('/api/v1/users/requests', methods=['GET'])
def get_all_requests():
# get all requests of logged in users
return ''
@app.route('/api/v1/users/requests/<request_id>', methods=['GET'])
def get_one_request(request_id):
# get a single request
return ''
@app.route('/api/v1/users/request', methods=['POST'])
def create_request():
# create a request
data=request.get_json()
return ''
@app.route('/api/v1/users/requests/<request_id>', methods=['PUT'])
def modify_request(request_id):
# modify a request
return ''
if __name__=='__main__':
db.create_all()
app.run(debug=True)
|
[
"aggrey256@gmail.com"
] |
aggrey256@gmail.com
|
1426fe5efd18a006fb26d6b93e7099cb59991d46
|
e10a6d844a286db26ef56469e31dc8488a8c6f0e
|
/rrlfd/data_utils/compress.py
|
bd4473ba68f6110f8afbc835fef1bfa587f81a86
|
[
"Apache-2.0",
"CC-BY-4.0"
] |
permissive
|
Jimmy-INL/google-research
|
54ad5551f97977f01297abddbfc8a99a7900b791
|
5573d9c5822f4e866b6692769963ae819cb3f10d
|
refs/heads/master
| 2023-04-07T19:43:54.483068
| 2023-03-24T16:27:28
| 2023-03-24T16:32:17
| 282,682,170
| 1
| 0
|
Apache-2.0
| 2020-07-26T15:50:32
| 2020-07-26T15:50:31
| null |
UTF-8
|
Python
| false
| false
| 1,024
|
py
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PNG compression utilities."""
import io
import numpy as np
from PIL import Image
def compress_image(img_obs):
pil_img = Image.fromarray(img_obs)
img_buf = io.BytesIO()
pil_img.save(img_buf, format='PNG')
img_bytes = img_buf.getvalue()
return img_bytes
def decompress_image(img_bytes):
img_buf = io.BytesIO(img_bytes)
pil_img = Image.open(img_buf)
img_obs = np.array(pil_img)
return img_obs
|
[
"copybara-worker@google.com"
] |
copybara-worker@google.com
|
26d28491c05fe3675ba6b8ef2b5237750238c9b4
|
ba58ab7abe5e790a43ad013d39f8471912bab519
|
/【03】分布式多进程/【2】处理对象/Master.py
|
37ad67ea0f668f554b294100271855f40134b8d3
|
[] |
no_license
|
simpleweiwei/Python.ParallelFramework
|
6015621883cd7268d190058e3c3a7b0caa497fc6
|
25b3ea09225fdd826451024285303f86213b9f56
|
refs/heads/master
| 2021-01-12T16:15:13.602830
| 2016-11-29T05:22:12
| 2016-11-29T05:22:12
| 71,961,055
| 3
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,620
|
py
|
from queue import Queue
from multiprocessing.managers import BaseManager
from Job import Job
class Master:
def __init__(self):
# 派发出去的作业队列
self.dispatched_job_queue = Queue()
# 完成的作业队列
self.finished_job_queue = Queue()
def get_dispatched_job_queue(self):
return self.dispatched_job_queue
def get_finished_job_queue(self):
return self.finished_job_queue
def start(self):
# 把派发作业队列和完成作业队列注册到网络上
BaseManager.register('get_dispatched_job_queue', callable=self.get_dispatched_job_queue)
BaseManager.register('get_finished_job_queue', callable=self.get_finished_job_queue)
# 监听端口和启动服务
manager = BaseManager(address=('0.0.0.0', 8888), authkey=b'jobs')
manager.start()
# 使用上面注册的方法获取队列
dispatched_jobs = manager.get_dispatched_job_queue()
finished_jobs = manager.get_finished_job_queue()
# 这里一次派发10个作业,等到10个作业都运行完后,继续再派发10个作业
job_id = 0
while True:
for i in range(0, 10):
job_id += 1
job = Job(job_id)
print('Dispatch job: %s' % job.job_id)
dispatched_jobs.put(job)
while not dispatched_jobs.empty():
job = finished_jobs.get(60)
print('Finished Job: %s' % job.job_id)
manager.shutdown()
if __name__ == "__main__":
master = Master()
master.start()
|
[
"coderweiwei@outlook.com"
] |
coderweiwei@outlook.com
|
13a92e52791f944fe5b8eb4651c491e58442eb57
|
a9c01bacd3d54f66a34f0f51bf0883cf067199f9
|
/karaoke_views.py
|
ab9693f6b9339a60a4c797aee170451055ab0421
|
[] |
no_license
|
alxpck/django_basics
|
75108dbe1586a53b118776cfbd4d8b9058c19ad4
|
602b748e351a5c12c26fbe032a1d71224734f5dd
|
refs/heads/master
| 2021-01-15T09:23:47.292722
| 2015-07-31T17:31:19
| 2015-07-31T17:31:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 551
|
py
|
from django.shortcuts import render, get_object_or_404
from .models import Song, Performer
def song_list(request):
songs = Song.objects.all()
return render(request, 'songs/song_list.html', {'songs':songs})
def song_detail(request, pk):
song = Song.objects.get(pk=pk)
return render(request, 'songs/song_detail.html', {'song':song})
def performer_detail(request, pk):
performer = get_object_or_404(Performer, pk=pk)
#performer = Performer.objects.get(pk=pk)
return render(request, 'songs/performer_detail.html', {'performer':performer})
|
[
"alex@alexmilesyounger.com"
] |
alex@alexmilesyounger.com
|
68a8ff0f5d7cc5447729ba87c02dd93eb7b8ea0f
|
3a9f2b3d79cf214704829427ee280f4b49dca70a
|
/saigon/rat/u/snmp/zd/verify_negative_input_v2.py
|
63a9f15ffe39a1c233ba5dbb722ba25e5740a8e8
|
[] |
no_license
|
jichunwei/MyGitHub-1
|
ae0c1461fe0a337ef459da7c0d24d4cf8d4a4791
|
f826fc89a030c6c4e08052d2d43af0b1b4b410e3
|
refs/heads/master
| 2021-01-21T10:19:22.900905
| 2016-08-20T03:34:52
| 2016-08-20T03:34:52
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,836
|
py
|
'''
@copyright: Ruckus Wireless, Inc. - 2011
@since: 2011.04.10
@author: cherry.cheng@ruckuswireless.com (developed)
@summary:
Verify negative input for readwrite nodes which support snmpv2: system information [basic and snmp], ZD AP.
Commands samples:
tea.py u.snmp.zd.verify_negative_input_v2
tea.py u.snmp.zd.verify_negative_input_v2 ip_addr='192.168.0.10' username='admin' password='admin' shell_key='!v54!'
tea.py u.snmp.zd.verify_negative_input_v2 ip_addr='192.168.0.10'
tea.py u.snmp.zd.verify_negative_input_v2 ip_addr='192.168.0.10' version=2
#Also can set related config for agent, user read write account when execute snmp get commands.
tea.py u.snmp.zd.verify_negative_input_v2 ip_addr='192.168.0.10' version=2 ro_community='public' rw_community='private' timeout=30 retries=3
'''
import logging
from RuckusAutoTest.components import create_zd_cli_by_ip_addr,clean_up_rat_env
from RuckusAutoTest.components.lib.zdcli.configure_snmp import config_snmp_agent
from RuckusAutoTest.components.lib.snmp import snmphelper as helper
from RuckusAutoTest.components.lib.snmp.zd import sys_info, sys_snmp_info
zd_cfg = {'ip_addr': '192.168.0.2',
'username': 'admin',
'password': 'admin',
'shell_key': '!v54!',
}
#Notes snmp config, user auth info will update from agent_config.
snmp_cfg = {'ip_addr': '192.168.0.2',
'version': 2,
'timeout': 20,
'retries': 3,
}
test_cfg = {'oids': 'all',
'index': 0,
'result':{},
}
agent_config = {'version': 2,
'enabled': True,
'ro_community': 'public',
'rw_community': 'private',
'contact': 'support@ruckuswireless.com',
'location': 'shenzhen',
'ro_sec_name': 'ruckus-read',
'ro_auth_protocol': 'MD5',
'ro_auth_passphrase': '12345678',
'ro_priv_protocol': 'DES',
'ro_priv_passphrase': '12345678',
'rw_sec_name': 'ruckus-write',
'rw_auth_protocol': 'MD5',
'rw_auth_passphrase': '12345678',
'rw_priv_protocol': 'DES',
'rw_priv_passphrase': '12345678',
}
def _cfg_test_params(**kwargs):
for k, v in kwargs.items():
if snmp_cfg.has_key(k):
snmp_cfg[k] = v
if test_cfg.has_key(k):
test_cfg[k] = v
if zd_cfg.has_key(k):
zd_cfg[k] = v
if agent_config.has_key(k):
agent_config[k] = v
conf = {}
conf.update(zd_cfg)
conf.update(test_cfg)
conf.update(snmp_cfg)
conf['zd_cli'] = create_zd_cli_by_ip_addr(**zd_cfg)
logging.info('Preparation: Enable snmp agent with config.')
config_snmp_agent(conf['zd_cli'], agent_config)
#Update snmp config, get read write config for it.
snmp_cfg.update(helper.get_update_snmp_cfg(agent_config))
conf['snmp'] = helper.create_snmp(snmp_cfg)
return conf
def do_config(**kwargs):
return _cfg_test_params(**kwargs)
def do_test(conf):
try:
is_all = conf['oids'].upper() == 'ALL'
if is_all:
obj_names_list = []
else:
obj_names_list = conf['oids'].split(',')
res_d = {}
snmp = conf['snmp']
sys_basic_info_cfg = sys_info.gen_test_data_sys_info_negative()
res_sys_basic_d = sys_info.update_sys_info(snmp, sys_basic_info_cfg, obj_names_list)
res_d.update(res_sys_basic_d)
sys_snmp_info_cfg = sys_snmp_info.gen_test_data_sys_snmp_info_negative()
res_sys_snmp_d = sys_snmp_info.set_sys_snmp_info(snmp, sys_snmp_info_cfg, obj_names_list, False)
res_d.update(res_sys_snmp_d)
pass_d, fail_d = helper.verify_error_for_negative(res_d)
if pass_d:
conf['result']['PASS'] = pass_d
else:
conf['result']['FAIL'] = fail_d
if not conf['result']:
conf['result'] = 'PASS'
except Exception, e:
conf['result'] = {'Exception': 'Message: %s' % (e,)}
return conf['result']
def do_clean_up(conf):
clean_up_rat_env()
def main(**kwargs):
conf = {}
try:
if kwargs.has_key('help'):
print __doc__
else:
conf = do_config(**kwargs)
res = do_test(conf)
do_clean_up(conf)
return res
except Exception, e:
logging.info('[TEST BROKEN] %s' % e.message)
return conf
finally:
pass
if __name__ == '__main__':
kwargs = dict()
main(**kwargs)
|
[
"tan@xx.com"
] |
tan@xx.com
|
4d5ec47dc34b9214083ba2b459ff61eb354ac78b
|
f07c98898c51551a50d96a33e86c4f4f9a797ae7
|
/Image Processing/code.py
|
68c79de1c738d5635f5b55633cb8ccfa566f046f
|
[] |
no_license
|
dhruvgoel99/GLADIATOR_Signature-Detection
|
26d4714bc10a667b982d15cd9904eeafd130462f
|
6b16b3ec4f33306c589a68cc6bf16c2485319bab
|
refs/heads/master
| 2022-11-15T01:14:51.310884
| 2020-07-05T10:10:49
| 2020-07-05T10:10:49
| 277,273,322
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 455
|
py
|
import numpy as np
import cv2
face_cascade=cv2.CascadeClassifier("cascade.xml")
img=cv2.imread("picture.jpg",1)
gray_img=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
faces=face_cascade.detectMultiScale(gray_img,scaleFactor=1.05,minNeighbors=5)
for x,y,w,h in faces:
img=cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2)
resized=cv2.resize(img,(600,500))
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.imshow("pic",resized)
cv2.waitKey(0)
cv2.destroyAllWindows()
|
[
"noreply@github.com"
] |
dhruvgoel99.noreply@github.com
|
77555503dd0d9f21308f63ce1579ee889c92d9a3
|
c10858b59cef322e31d8f19e32264fde7ae9fd1c
|
/venv/bin/easy_install
|
80c7c00758a874acd859c1baaa8603971178ae94
|
[] |
no_license
|
arunmastermind/genReportViaDB
|
57f79f9075b09829e479bb25bd61df2e9bb120d7
|
ba9bec07e3560c1db4d44605ca836d719874ae79
|
refs/heads/master
| 2020-12-27T13:41:17.827651
| 2020-02-03T11:30:47
| 2020-02-03T11:30:47
| 237,920,803
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 265
|
#!/Users/arunkumar/genReportViaDB/venv/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from setuptools.command.easy_install import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"arunkumar@azuls-MacBook-Pro.local"
] |
arunkumar@azuls-MacBook-Pro.local
|
|
443466ea482adaece843f1cafd4b56e975fcc554
|
c3b77a364f943b1aa14099b84c4b69910d44b075
|
/test/test_oneview_switch_type_facts.py
|
bb902bd6c526a56110bdc3a81fc2d5b6ac316ba5
|
[
"LicenseRef-scancode-generic-cla",
"Apache-2.0"
] |
permissive
|
bsouza/oneview-ansible
|
f711caa3d17bb62eeaf567945d237cc97d065881
|
0f784a8332af842273e699881a3a2655138fe0c6
|
refs/heads/master
| 2020-12-07T00:38:08.614405
| 2016-10-07T20:15:48
| 2016-10-07T20:15:48
| 66,856,976
| 0
| 0
| null | 2016-08-29T15:42:59
| 2016-08-29T15:42:59
| null |
UTF-8
|
Python
| false
| false
| 3,997
|
py
|
###
# Copyright (2016) Hewlett Packard Enterprise Development LP
#
# Licensed under the Apache License, Version 2.0 (the "License");
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###
import unittest
import mock
from hpOneView.oneview_client import OneViewClient
from oneview_switch_type_facts import SwitchTypeFactsModule
from test.utils import create_ansible_mock
ERROR_MSG = 'Fake message error'
PARAMS_GET_ALL = dict(
config='config.json',
name=None
)
PARAMS_GET_BY_NAME = dict(
config='config.json',
name="Test Switch Type 2"
)
SWITCH_TYPES = [{"name": "Test Switch Type 1"}, {"name": "Test Switch Type 2"}, {"name": "Test Switch Type 3"}]
class SwitchTypeFactsSpec(unittest.TestCase):
@mock.patch.object(OneViewClient, 'from_json_file')
@mock.patch('oneview_switch_type_facts.AnsibleModule')
def test_should_get_all_switch_types(self, mock_ansible_module, mock_ov_client_from_json_file):
mock_ov_instance = mock.Mock()
mock_ov_instance.switch_types.get_all.return_value = SWITCH_TYPES
mock_ov_client_from_json_file.return_value = mock_ov_instance
mock_ansible_instance = create_ansible_mock(PARAMS_GET_ALL)
mock_ansible_module.return_value = mock_ansible_instance
SwitchTypeFactsModule().run()
mock_ansible_instance.exit_json.assert_called_once_with(
changed=False,
ansible_facts=dict(switch_types=(SWITCH_TYPES))
)
@mock.patch.object(OneViewClient, 'from_json_file')
@mock.patch('oneview_switch_type_facts.AnsibleModule')
def test_should_fail_when_get_all_raises_exception(self, mock_ansible_module, mock_ov_client_from_json_file):
mock_ov_instance = mock.Mock()
mock_ov_instance.switch_types.get_all.side_effect = Exception(ERROR_MSG)
mock_ov_client_from_json_file.return_value = mock_ov_instance
mock_ansible_instance = create_ansible_mock(PARAMS_GET_ALL)
mock_ansible_module.return_value = mock_ansible_instance
SwitchTypeFactsModule().run()
mock_ansible_instance.fail_json.assert_called_once()
@mock.patch.object(OneViewClient, 'from_json_file')
@mock.patch('oneview_switch_type_facts.AnsibleModule')
def test_should_get_switch_type_by_name(self, mock_ansible_module, mock_ov_client_from_json_file):
mock_ov_instance = mock.Mock()
mock_ov_instance.switch_types.get_by.return_value = [SWITCH_TYPES[1]]
mock_ov_client_from_json_file.return_value = mock_ov_instance
mock_ansible_instance = create_ansible_mock(PARAMS_GET_BY_NAME)
mock_ansible_module.return_value = mock_ansible_instance
SwitchTypeFactsModule().run()
mock_ansible_instance.exit_json.assert_called_once_with(
changed=False,
ansible_facts=dict(switch_types=([SWITCH_TYPES[1]]))
)
@mock.patch.object(OneViewClient, 'from_json_file')
@mock.patch('oneview_switch_type_facts.AnsibleModule')
def test_should_fail_when_get_by_name_raises_exception(self, mock_ansible_module, mock_ov_client_from_json_file):
mock_ov_instance = mock.Mock()
mock_ov_instance.switch_types.get_by.side_effect = Exception(ERROR_MSG)
mock_ov_client_from_json_file.return_value = mock_ov_instance
mock_ansible_instance = create_ansible_mock(PARAMS_GET_BY_NAME)
mock_ansible_module.return_value = mock_ansible_instance
SwitchTypeFactsModule().run()
mock_ansible_instance.fail_json.assert_called_once()
if __name__ == '__main__':
unittest.main()
|
[
"mariana.kreisig@hpe.com"
] |
mariana.kreisig@hpe.com
|
9ec162c356d7febbb1f3a98336c7305e9452bc15
|
8069e993efd9b0c6a2b073844f1dea4d7b2b3230
|
/notebooks/Users/michael.mengarelli@databricks.com/NLP/04_sarcasm_classifier_tf_idf.py
|
a4dacc4e06cdd7a2809fd4796e6fee1593ea73ff
|
[] |
no_license
|
mmengarelli/notebooks
|
07f7f8e3ab4cc2d642518c6620cea75f68ee6c33
|
2dea4b533f304fdce0d0df8b83ca5ffeae855527
|
refs/heads/master
| 2023-05-13T20:00:51.558080
| 2023-04-27T13:15:43
| 2023-04-27T13:15:43
| 148,656,794
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,571
|
py
|
# Databricks notebook source
# MAGIC %md-sandbox
# MAGIC # Detecting Sarcasm with Databricks
# MAGIC
# MAGIC <div class="markdown-converter__text--rendered"><h3>Context</h3>
# MAGIC
# MAGIC <img src="https://memesbams.com/wp-content/uploads/2017/11/sheldon-sarcasm-meme.jpg" style="float:right; height: 450px; margin: 10px; padding: 10px"/>
# MAGIC
# MAGIC <p>This dataset contains 1.3 million sarcastic comments from the internet commentary website **Reddit**. The dataset was generated by scraping comments from Reddit containing the <code>\s</code> ( sarcasm) tag. This tag is often used by Redditors to indicate that their comment is in jest and not meant to be taken seriously, and is generally a reliable indicator of sarcastic content.</p>
# MAGIC
# MAGIC <h3>Content</h3>
# MAGIC
# MAGIC <p>The corpus has 1.3 million sarcastic statements and responses, as well as many non-sarcastic comments from the same source.</p>
# MAGIC
# MAGIC <h3>Acknowledgements</h3>
# MAGIC
# MAGIC <p>The data was gathered by: Mikhail Khodak and Nikunj Saunshi and Kiran Vodrahalli for their article "<a href="https://arxiv.org/abs/1704.05579" rel="nofollow">A Large Self-Annotated Corpus for Sarcasm</a>". The data is hosted <a href="http://nlp.cs.princeton.edu/SARC/0.0/" rel="nofollow">here</a>.</p>
# MAGIC
# MAGIC <p>Citation:</p>
# MAGIC
# MAGIC <pre><code>@unpublished{SARC,
# MAGIC authors={Mikhail Khodak and Nikunj Saunshi and Kiran Vodrahalli},
# MAGIC title={A Large Self-Annotated Corpus for Sarcasm},
# MAGIC url={https://arxiv.org/abs/1704.05579},
# MAGIC year=2017
# MAGIC }
# MAGIC </code></pre>
# MAGIC
# MAGIC <h3>Inspiration</h3>
# MAGIC
# MAGIC <ul>
# MAGIC <li>Predicting sarcasm and relevant NLP features (e.g. subjective determinant, racism, conditionals, sentiment heavy words, "Internet Slang" and specific phrases). </li>
# MAGIC <li>Sarcasm vs Sentiment</li>
# MAGIC <li>Unusual linguistic features such as caps, italics, or elongated words. e.g., "Yeahhh, I'm sure THAT is the right answer".</li>
# MAGIC <li>Topics that people tend to react to sarcastically</li>
# MAGIC </ul></div>
# MAGIC
# MAGIC
# MAGIC https://www.kaggle.com/danofer/sarcasm
# MAGIC
# MAGIC <!--
# MAGIC # mm-demo
# MAGIC # demo-ready
# MAGIC # TF-IDF
# MAGIC -->
# COMMAND ----------
# MAGIC %md #### Explore data
# COMMAND ----------
trainBalancedSarcasmDF = spark.read.option("header", True).csv("/mnt/mikem/tmp/train-balanced-sarcasm.csv")
trainBalancedSarcasmDF.createOrReplaceTempView('data')
# COMMAND ----------
df = sql("""
select cast(label as int), concat(parent_comment,"\n",comment) as comment
from data where comment is not null
and parent_comment is not null
limit 100000""")
display(df)
# COMMAND ----------
# MAGIC %md #### NLP Pipeline
# COMMAND ----------
from sparknlp.annotator import *
from sparknlp.common import *
from sparknlp.base import *
from pyspark.ml import Pipeline
document_assembler = DocumentAssembler() \
.setInputCol("comment") \
.setOutputCol("document")
sentence_detector = SentenceDetector() \
.setInputCols(["document"]) \
.setOutputCol("sentence") \
.setUseAbbreviations(True)
tokenizer = Tokenizer() \
.setInputCols(["sentence"]) \
.setOutputCol("token")
stemmer = Stemmer() \
.setInputCols(["token"]) \
.setOutputCol("stem")
normalizer = Normalizer() \
.setInputCols(["stem"]) \
.setOutputCol("normalized")
finisher = Finisher() \
.setInputCols(["normalized"]) \
.setOutputCols(["ntokens"]) \
.setOutputAsArray(True) \
.setCleanAnnotations(True)
nlp_pipeline = Pipeline(stages=[document_assembler, sentence_detector, tokenizer, stemmer, normalizer, finisher])
nlp_model = nlp_pipeline.fit(df)
processed = nlp_model.transform(df)
# COMMAND ----------
display(processed.where("label = 1").select("comment"))
# COMMAND ----------
train, test = processed.randomSplit(weights=[0.7, 0.3], seed=123)
print(train.count())
print(test.count())
# COMMAND ----------
# MAGIC %md #### Train Classifier
# COMMAND ----------
from pyspark.ml import feature as spark_ft
stopWords = spark_ft.StopWordsRemover.loadDefaultStopWords('english')
sw_remover = spark_ft.StopWordsRemover(inputCol='ntokens', outputCol='clean_tokens', stopWords=stopWords)
tf = spark_ft.CountVectorizer(vocabSize=500, inputCol='clean_tokens', outputCol='tf')
idf = spark_ft.IDF(minDocFreq=5, inputCol='tf', outputCol='idf')
feature_pipeline = Pipeline(stages=[sw_remover, tf, idf])
feature_model = feature_pipeline.fit(train)
train_featurized = feature_model.transform(train).persist()
# COMMAND ----------
display(train_featurized.groupBy("label").count())
# COMMAND ----------
from pyspark.ml import classification as spark_cls
rf = spark_cls. RandomForestClassifier(labelCol="label", featuresCol="idf", numTrees=100)
model = rf.fit(train_featurized)
# COMMAND ----------
# MAGIC %md #### Predict
# COMMAND ----------
test_featurized = feature_model.transform(test)
preds = model.transform(test_featurized)
display(preds.select("comment", "label", "prediction"))
# COMMAND ----------
import pandas as pd
from sklearn import metrics as skmetrics
pred_pds = preds.select('comment', 'label', 'prediction').toPandas()
pd.DataFrame(
data=skmetrics.confusion_matrix(pred_pds['label'], pred_pds['prediction']),
columns=['pred ' + l for l in ['0','1']],
index=['true ' + l for l in ['0','1']]
)
# COMMAND ----------
print(skmetrics.classification_report(pred_pds['label'], pred_pds['prediction'], target_names=['0','1']))
|
[
"mengam01@gmail.com"
] |
mengam01@gmail.com
|
0c839ddf6a512a13731cc6c2ca44d1c993d473fb
|
e414ec62afaa75d187d68831df6c91919c5bad56
|
/build/husky/husky_viz/catkin_generated/pkg.installspace.context.pc.py
|
11d298fe4f110b2bfc9fd657581ff1ea86fb2156
|
[] |
no_license
|
mark2n/tianbao-robtech
|
1458a9aca3d20adb0f62e92de271d7aa968be7da
|
771cd5ad9194d30fa358e65d3609ede3b643d332
|
refs/heads/master
| 2021-01-19T22:07:45.969245
| 2017-05-26T09:09:38
| 2017-05-26T09:09:38
| 83,771,271
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 379
|
py
|
# generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "husky_viz"
PROJECT_SPACE_DIR = "/home/tudeng/tianbao-robtech/install"
PROJECT_VERSION = "0.2.2"
|
[
"tianbao.zhang@yahoo.com"
] |
tianbao.zhang@yahoo.com
|
cdd784e36f7228a590923cbf364b8aad4f0b3360
|
518ed159ee612b9803c7e803e0387a2736c56621
|
/tests/test_datastructures/test_stack.py
|
cd809b2b8270cb229359954dba04b1fe89147203
|
[] |
no_license
|
varskann/datastructures_and_algorithms
|
dd87b33bfda64d87a5dd609bc5514034ffa3bf5b
|
bc7b8c011c9c7d2cd6ed682aeff5a34281ec925e
|
refs/heads/master
| 2022-12-24T17:19:16.268396
| 2020-10-10T17:59:37
| 2020-10-10T17:59:37
| 261,686,193
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 530
|
py
|
import pytest
from datastructures_and_algorithms.datastructures.stack import Stack
def test_stack():
q = Stack()
q.push(1)
q.push(2)
assert q.pop() == 2, "Stack didn't return last element"
q.push(3)
q.push(4)
assert q.traverse() == [4, 3, 1], "Stack storage corrupt"
def test_stack_char():
q = Stack()
q.push("a")
q.push("b")
assert q.pop() == "b", "Stack didn't return last element"
q.push("c")
q.push("d")
assert q.traverse() == ["d", "c", "a"], "Stack storage corrupt"
|
[
"varskann1993@gmail.com"
] |
varskann1993@gmail.com
|
d8b6a3e17bd2c120e11b114a24b107ea372dd24a
|
cf5da5a84359b9fa163cd6810c3b0857220da002
|
/R/RProjectURLProvider.py
|
138f28caa3e0f72c2b7e148f54b565c7caa8f602
|
[
"Apache-2.0"
] |
permissive
|
nstrauss/homebysix-recipes
|
a21aff6c0d0115f0aaf428d8aaf94609dc2ecaa9
|
e75aed4f6ae44baa1313f8f45b41ee6e1b759df7
|
refs/heads/master
| 2023-01-09T20:27:14.009885
| 2023-01-03T18:22:24
| 2023-01-03T18:22:24
| 201,100,832
| 0
| 0
| null | 2019-08-07T17:51:19
| 2019-08-07T17:51:18
| null |
UTF-8
|
Python
| false
| false
| 2,771
|
py
|
# -*- coding: utf-8 -*-
#
# Copyright 2022 Elliot Jordan
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
# pylint: disable=unused-import
from autopkglib import ProcessorError, URLGetter
__all__ = ["RProjectURLProvider"]
# R project Mac downloads page
DL_PAGE = "https://cran.r-project.org/bin/macosx/"
class RProjectURLProvider(URLGetter):
"""Provides a download URL for the latest R Project
(https://cran.r-project.org/) product release."""
input_variables = {
"architecture": {
"required": False,
"description": "Architecture of the R package to download. "
"Possible values are 'x86_64' (Intel) or 'arm64' (Apple Silicon). "
"Defaults to 'x86_64' (Intel).",
},
}
output_variables = {
"url": {"description": "URL to the latest R release."},
"version": {"description": "Version of the latest R release."},
}
description = __doc__
def main(self):
"""Main process."""
# Read and validate input variables
arch = self.env.get("architecture", "x86_64")
if arch not in ("x86_64", "arm64"):
raise ProcessorError("Architecture must be one of: x86_64, arm64")
# Prepare regular expression
if arch == "x86_64":
pattern = r'base\/(?P<file>R-(?P<vers>[\d.]+)\.pkg)">R-[\d.]+\.pkg</a>'
url_base = "https://cran.r-project.org/bin/macosx/base/"
elif arch == "arm64":
pattern = r'base\/(?P<file>R-(?P<vers>[\d.]+)-arm64\.pkg)">R-[\d.]+-arm64\.pkg</a>'
url_base = "https://cran.r-project.org/bin/macosx/big-sur-arm64/base/"
# Get and parse download page contents
download_page_html = self.download(DL_PAGE, text=True)
m = re.search(pattern, download_page_html)
if not m:
raise ProcessorError(f"No match found on {DL_PAGE}")
# Set URL and version in environment
self.env["url"] = url_base + m.groupdict()["file"]
self.output("Found url: %s" % self.env["url"])
self.env["version"] = m.groupdict()["vers"]
self.output("Found version: %s" % self.env["version"])
if __name__ == "__main__":
PROCESSOR = RProjectURLProvider()
PROCESSOR.execute_shell()
|
[
"homebysix@users.noreply.github.com"
] |
homebysix@users.noreply.github.com
|
9b2c682f2ffd0fc52a85cf406d2b07b7f25f23d8
|
2df46cfdd985906dafdef119d1aa5a01a59c6563
|
/shortly/shortlink/migrations/0002_link_visited.py
|
d6c1d3622ec71bb122bf4c86cd3b027d314b4222
|
[] |
no_license
|
KayafasKain/djangoshory
|
f7e0a166b92db646f8556b3b3abe532d678d8aef
|
eb9ab0bd921e096fb981a992a6f9532ab04d8497
|
refs/heads/master
| 2020-03-19T22:37:23.002564
| 2018-06-16T16:56:35
| 2018-06-16T16:56:35
| 136,975,818
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 373
|
py
|
# Generated by Django 2.0.6 on 2018-06-11 18:14
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('shortlink', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='link',
name='visited',
field=models.IntegerField(default=0),
),
]
|
[
"bismark9988@gmail.com"
] |
bismark9988@gmail.com
|
9390e7777516dd16c02fa641d3a058bc8f151466
|
f5bf67a840af96182fafaf9528fac2b83fed62fa
|
/homework/day5/problem10.py
|
ce122dff503ec794da8389e42c37826792f276ba
|
[] |
no_license
|
vedantshr/python-learning
|
4a6d4e2d230d8db4b4370a7ef9375bf4c4dc96c2
|
637efb29f2f13c47cd3d9b402735b15657392835
|
refs/heads/main
| 2023-06-28T04:24:25.215630
| 2021-07-28T15:13:11
| 2021-07-28T15:13:11
| 281,611,580
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,378
|
py
|
class FoodItem:
def __init__(self, item_id, item_name, item_category, item_price):
self.item_id = item_id
self.item_name = item_name
self.item_category = item_category
self.item_price = item_price
def provideDiscount(self, discountPercentage):
return (self.item_price - (self.item_price*discountPercentage)/100)
class Restaurant:
def __init__(self, restaurant_name, fooditem_list):
self.restaurant_name = restaurant_name
self.fooditem_list = fooditem_list
def retrieveUpdatedPrice(self, updatePercentage, itemId):
for objects in self.fooditem_list:
if itemId == objects.item_id:
objects.item_price = objects.provideDiscount(updatePercentage)
return objects
if __name__ == "__main__":
n = int(input())
fooditem_list = []
for i in range(n):
item_id = int(input())
item_name = input()
item_category = input()
item_price = float(input())
obj = FoodItem(item_id, item_name, item_category, item_price)
fooditem_list.append(obj)
itemId = int(input())
updatePercentage = float(input())
obj2 = Restaurant("Moti Mahal", fooditem_list)
updatedPrice = obj2.retrieveUpdatedPrice(updatePercentage, itemId)
print(updatedPrice.item_name, updatedPrice.item_price)
|
[
"vedantsharma@GIRISHs-MacBook-Pro.local"
] |
vedantsharma@GIRISHs-MacBook-Pro.local
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.