blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 288 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 684 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 147 values | src_encoding stringclasses 25 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 128 12.7k | extension stringclasses 142 values | content stringlengths 128 8.19k | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
a5557e1e7860c8f253f595c34e566932589397fe | 9d5ae8cc5f53f5aee7247be69142d9118769d395 | /105. Construct Binary Tree from Preorder and Inorder Traversal.py | 07ece7f58fcc51506392b5c3a8ab71150a8ac29c | [] | no_license | BITMystery/leetcode-journey | d4c93319bb555a7e47e62b8b974a2f77578bc760 | 616939d1599b5a135747b0c4dd1f989974835f40 | refs/heads/master | 2020-05-24T08:15:30.207996 | 2017-10-21T06:33:17 | 2017-10-21T06:33:17 | 84,839,304 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 862 | py | # Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def buildTree(self, preorder, inorder):
"""
:type preorder: List[int]
:type inorder: List[int]
:rtype: TreeNode
"""
if not preorder:
return None
if len(preorder) == 1:
return TreeNode(preorder[0])
root = TreeNode(preorder[0])
i = inorder.index(preorder[0])
left_inorder = inorder[:i]
right_inorder = inorder[i + 1:]
left_preorder = preorder[1: i + 1]
right_preorder = preorder[i + 1:]
root.left = self.buildTree(left_preorder, left_inorder)
root.right = self.buildTree(right_preorder, right_inorder)
return root | [
"noreply@github.com"
] | BITMystery.noreply@github.com |
861203770fe40eabf96cb4818d736ba3918bcd4f | 46f2834ae92da9e17463def0c635f75bf05886a1 | /abc/abc122/A/main.py | 73450f43c55c708928dfc5e028569620a5730e24 | [] | no_license | replu/atcoder | bf3da10c937c955ca1bc3fa33b8f24c74d2d6c50 | a6183d03355058bccc2b89db5e07b7f72598fea3 | refs/heads/master | 2023-03-30T15:03:47.879783 | 2021-03-28T17:08:19 | 2021-03-28T17:08:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 486 | py | #!/usr/bin/env python3
import sys
def solve(b: str):
return
# Generated by 1.1.7.1 https://github.com/kyuridenamida/atcoder-tools (tips: You use the default template now. You can remove this line by using your custom template)
def main():
def iterate_tokens():
for line in sys.stdin:
for word in line.split():
yield word
tokens = iterate_tokens()
b = next(tokens) # type: str
solve(b)
if __name__ == '__main__':
main()
| [
"n.small.island@gmail.com"
] | n.small.island@gmail.com |
566fef1988f54e052d1f14803548c6240db965c9 | 376b6933872b9110765154094d2c77713da2c853 | /rnaseq/5clusterall/cap3Wrapper.py | b5254bbb0bf980bfe85607197f3869afde7b2059 | [] | no_license | markphuong/geographus-genome | 46b037e7789641895f1a99b8bf6dee3418887600 | a0ff439fbc0c350279359a51321e40e7778f5170 | refs/heads/master | 2020-03-19T07:21:29.297458 | 2018-06-05T04:15:18 | 2018-06-05T04:15:18 | 136,107,214 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,684 | py | #!/usr/bin/env python
#this concatenates all read files into R1 and R2 files [if you get multiple read files per index from illumina]
import os
import sys
import argparse
import multiprocessing
def get_args(): #arguments needed to give to this script
parser = argparse.ArgumentParser(description="concatenate reads")
#forces required argument to let it run
required = parser.add_argument_group("required arguments")
# required.add_argument("--map", help="textfile with ID that relate to read files you want to concatenate. for ex., index1, index2, index3 (with new lines after each thing)", required=True) #A map file with the sample ID and the fasta file it goes to
return parser.parse_args()
def concat(element):
variables = dict(
index = element)
commands = """
/home/phuong/CAP3/cap3 geographus.transcripts.fa > all.cap3.out
cat geographus.transcripts.fa.cap.contigs geographus.transcripts.fa.cap.singlets > geographus.transcripts.cap3.fasta
cd-hit-est -i geographus.transcripts.cap3.fasta -o geographus.transcripts.clustered.fasta -c 0.99 > geographus.cd-hit-est
cp -p * /pylon5/bi4s86p/phuong/geographus.genome/rnaseq/5clusterall/RESULTS
""".format(**variables)
cmd_list = commands.split("\n")
for cmd in cmd_list:
os.system(cmd)
concat('tes')
#def main():
# args = get_args()
#Make a list with the indexes you want to process
# mylist = []
# with open(args.map) as rfile:
# for line in rfile:
# line = line.strip()
# mylist.append(line)
#start the multiprocessing
# pool = multiprocessing.Pool(10)
# pool.map(concat, mylist)#run the function with the arguments
#if __name__ == "__main__": #run main over multiple processors
# main()
| [
"phuong@br006.pvt.bridges.psc.edu"
] | phuong@br006.pvt.bridges.psc.edu |
9402a70a7e7b3c955d989eb346cb982d1328408e | d51c0aeddb864973ec1171e99e8f174ad622d965 | /baselines/baselines/deepq/experiments/train_cartpole.py | a767a8d208ad52f4e5f63f24ce59ea74005c806b | [
"MIT"
] | permissive | Kiwoo/HVHRL | c42af09faec716e727a6fb4a82171412e66abad9 | b883c6a36655e2d348114e320b953f12dc799fd4 | refs/heads/master | 2021-01-25T14:04:11.848972 | 2018-04-26T08:20:41 | 2018-04-26T08:20:41 | 123,648,363 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 683 | py | import gym
from baselines import deepq
def callback(lcl, _glb):
# stop training if reward exceeds 199
is_solved = lcl['t'] > 100 and sum(lcl['episode_rewards'][-101:-1]) / 100 >= 199
return is_solved
def main():
env = gym.make("Pendulum-v0")
model = deepq.models.mlp([256,256])
act = deepq.learn(
env,
q_func=model,
lr=1e-4,
max_timesteps=400000,
buffer_size=50000,
exploration_fraction=0.1,
exploration_final_eps=0.02,
print_freq=10,
callback=callback
)
print("Saving model to cartpole_model.pkl")
act.save("cartpole_model.pkl")
if __name__ == '__main__':
main()
| [
"kiwoo.shin@berkeley.edu"
] | kiwoo.shin@berkeley.edu |
1c8cad55af6edfc57241f881d4848b14031864af | 34de2b3ef4a2478fc6a03ea3b5990dd267d20d2d | /Python/plotting/plotting1/panda_module/remove_whitespace/a.py | 4a80e59c6a1bd9d3df8994693660f2b22cde87a1 | [
"MIT"
] | permissive | bhishanpdl/Programming | d4310f86e1d9ac35483191526710caa25b5f138e | 9654c253c598405a22cc96dfa1497406c0bd0990 | refs/heads/master | 2020-03-26T06:19:01.588451 | 2019-08-21T18:09:59 | 2019-08-21T18:09:59 | 69,140,073 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 5,682 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Author : Bhishan Poudel
# Date : Apr 04, 2016
# Ref : http://www.randalolson.com/2014/06/28/how-to-make-beautiful-data-visualizations-in-python-with-matplotlib/
# Imports
import matplotlib.pyplot as plt
import pandas as pd
# Read the data into a pandas DataFrame.
gender_degree_data = pd.read_csv("http://www.randalolson.com/wp-content/uploads/percent-bachelors-degrees-women-usa.csv")
# These are the "Tableau 20" colors as RGB.
tableau20 = [(31, 119, 180), (174, 199, 232), (255, 127, 14), (255, 187, 120),
(44, 160, 44), (152, 223, 138), (214, 39, 40), (255, 152, 150),
(148, 103, 189), (197, 176, 213), (140, 86, 75), (196, 156, 148),
(227, 119, 194), (247, 182, 210), (127, 127, 127), (199, 199, 199),
(188, 189, 34), (219, 219, 141), (23, 190, 207), (158, 218, 229)]
# Scale the RGB values to the [0, 1] range, which is the format matplotlib accepts.
for i in range(len(tableau20)):
r, g, b = tableau20[i]
tableau20[i] = (r / 255., g / 255., b / 255.)
# You typically want your plot to be ~1.33x wider than tall. This plot is a rare
# exception because of the number of lines being plotted on it.
# Common sizes: (10, 7.5) and (12, 9)
plt.figure(figsize=(12, 14))
# Remove the plot frame lines. They are unnecessary chartjunk.
ax = plt.subplot(111)
ax.spines["top"].set_visible(False)
ax.spines["bottom"].set_visible(False)
ax.spines["right"].set_visible(False)
ax.spines["left"].set_visible(False)
# Ensure that the axis ticks only show up on the bottom and left of the plot.
# Ticks on the right and top of the plot are generally unnecessary chartjunk.
ax.get_xaxis().tick_bottom()
ax.get_yaxis().tick_left()
# Limit the range of the plot to only where the data is.
# Avoid unnecessary whitespace.
plt.ylim(0, 90)
plt.xlim(1968, 2014)
# Make sure your axis ticks are large enough to be easily read.
# You don't want your viewers squinting to read your plot.
plt.yticks(range(0, 91, 10), [str(x) + "%" for x in range(0, 91, 10)], fontsize=14)
plt.xticks(fontsize=14)
# Provide tick lines across the plot to help your viewers trace along
# the axis ticks. Make sure that the lines are light and small so they
# don't obscure the primary data lines.
for y in range(10, 91, 10):
plt.plot(range(1968, 2012), [y] * len(range(1968, 2012)), "--", lw=0.5, color="black", alpha=0.3)
# Remove the tick marks; they are unnecessary with the tick lines we just plotted.
plt.tick_params(axis="both", which="both", bottom="off", top="off",
labelbottom="on", left="off", right="off", labelleft="on")
# Now that the plot is prepared, it's time to actually plot the data!
# Note that I plotted the majors in order of the highest % in the final year.
majors = ['Health Professions', 'Public Administration', 'Education', 'Psychology',
'Foreign Languages', 'English', 'Communications\nand Journalism',
'Art and Performance', 'Biology', 'Agriculture',
'Social Sciences and History', 'Business', 'Math and Statistics',
'Architecture', 'Physical Sciences', 'Computer Science',
'Engineering']
for rank, column in enumerate(majors):
# Plot each line separately with its own color, using the Tableau 20
# color set in order.
plt.plot(gender_degree_data.Year.values,
gender_degree_data[column.replace("\n", " ")].values,
lw=2.5, color=tableau20[rank])
# Add a text label to the right end of every line. Most of the code below
# is adding specific offsets y position because some labels overlapped.
y_pos = gender_degree_data[column.replace("\n", " ")].values[-1] - 0.5
if column == "Foreign Languages":
y_pos += 0.5
elif column == "English":
y_pos -= 0.5
elif column == "Communications\nand Journalism":
y_pos += 0.75
elif column == "Art and Performance":
y_pos -= 0.25
elif column == "Agriculture":
y_pos += 1.25
elif column == "Social Sciences and History":
y_pos += 0.25
elif column == "Business":
y_pos -= 0.75
elif column == "Math and Statistics":
y_pos += 0.75
elif column == "Architecture":
y_pos -= 0.75
elif column == "Computer Science":
y_pos += 0.75
elif column == "Engineering":
y_pos -= 0.25
# Again, make sure that all labels are large enough to be easily read
# by the viewer.
plt.text(2011.5, y_pos, column, fontsize=14, color=tableau20[rank])
# matplotlib's title() call centers the title on the plot, but not the graph,
# so I used the text() call to customize where the title goes.
# Make the title big enough so it spans the entire plot, but don't make it
# so big that it requires two lines to show.
# Note that if the title is descriptive enough, it is unnecessary to include
# axis labels; they are self-evident, in this plot's case.
plt.text(1995, 93, "Percentage of Bachelor's degrees conferred to women in the U.S.A."
", by major (1970-2012)", fontsize=17, ha="center")
# Always include your data source(s) and copyright notice! And for your
# data sources, tell your viewers exactly where the data came from,
# preferably with a direct link to the data. Just telling your viewers
# that you used data from the "U.S. Census Bureau" is completely useless:
# the U.S. Census Bureau provides all kinds of data, so how are your
# viewers supposed to know which data set you used?
plt.text(1966, -8, "Data source: nces.ed.gov/programs/digest/2013menu_tables.asp"
"\nAuthor: Randy Olson (randalolson.com / @randal_olson)"
"\nNote: Some majors are missing because the historical data "
"is not available for them", fontsize=10)
# Finally, save the figure as a PNG.
# You can also save it as a PDF, JPEG, etc.
# Just change the file extension in this call.
# bbox_inches="tight" removes all the extra whitespace on the edges of your plot.
plt.savefig("percent-bachelors-degrees-women-usa.png", bbox_inches="tight")
| [
"bhishantryphysics@gmail.com"
] | bhishantryphysics@gmail.com |
529e0aa15f413725f9b4e3a6945022c0fd4e7083 | c91d029b59f4e6090a523bf571b3094e09852258 | /src/logistica/migrations/0004_auto_20170711_1738.py | 57bb29d599c643d69a5cac048dfd62e473e192cd | [
"MIT"
] | permissive | anselmobd/fo2 | d51b63ebae2541b00af79448ede76b02638c41f0 | 8e7f8f3d9a296c7da39d0faf38a266e9c6c162ab | refs/heads/master | 2023-08-31T19:59:33.964813 | 2023-08-31T19:50:53 | 2023-08-31T19:50:53 | 92,856,677 | 1 | 0 | MIT | 2023-04-21T21:50:46 | 2017-05-30T17:04:27 | Python | UTF-8 | Python | false | false | 898 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-07-11 20:38
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('logistica', '0003_auto_20170710_1212'),
]
operations = [
migrations.AddField(
model_name='notafiscal',
name='dest_cnpj',
field=models.CharField(blank=True, max_length=20, null=True, verbose_name='CNPJ'),
),
migrations.AddField(
model_name='notafiscal',
name='dest_nome',
field=models.CharField(blank=True, max_length=100, null=True, verbose_name='Destinatário'),
),
migrations.AddField(
model_name='notafiscal',
name='natu_venda',
field=models.BooleanField(default=False, verbose_name='venda'),
),
]
| [
"anselmo.blanco.dominguez+github@gmail.com"
] | anselmo.blanco.dominguez+github@gmail.com |
4a6518abbc5f98a6b738dbfb78048ecc912f48c8 | f336bcdc1eeab553e0d3d1de2ca6da64cd7f27bc | /kline/mail.py | d612df174c3dcde334dfc541671e8a871f20655b | [] | no_license | tonylibing/stockpractice | 04568c017a96815e3796c895e74f11fa128d3ffe | 039e144b3a4cc00e400338174b31fa277df55517 | refs/heads/main | 2023-09-05T03:53:02.565539 | 2021-10-30T22:08:16 | 2021-10-30T22:08:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,716 | py | # coding:utf-8
# 1000元实盘练习程序
# 发送邮件模块
# 参考https://zhuanlan.zhihu.com/p/24180606
# 防止硬编码泄露用户名密码,参考:https://blog.csdn.net/lantian_123/article/details/101518724
# 需在源码目录下自行编辑.env文件,定义USERNAME和PASSWORD的值
import smtplib
from email.mime.text import MIMEText
from dotenv import load_dotenv
import os
# 发送邮件
def sentMail(title, content):
# 加载用户名和密码
load_dotenv()
username = os.getenv("USERNAME")
password = os.getenv("PASSWORD")
senderAddress = username+"@163.com"
# 设置服务器所需信息
mail_host = "smtp.163.com"
# 用户名
mail_user = username
# 密码
mail_pass = password
# 邮件发送方地址
sender = senderAddress
# 接收方地址
receivers = [senderAddress]
# 设置邮件信息
# 邮件内容
message = MIMEText(content, 'plain', 'utf-8')
# 邮件主题
message['Subject'] = title
# 发送方信息
message['From'] = sender
# 接受方信息
message['To'] = receivers[0]
# 登陆并发送邮件
try:
smtpObj = smtplib.SMTP_SSL(mail_host, 465)
# 连接到服务器
# smtpObj.connect(mail_host, 465)
# 登录到服务器
smtpObj.login(mail_user,mail_pass)
# 发送
smtpObj.sendmail(
sender,receivers,message.as_string())
# 退出
smtpObj.quit()
print('发送成功')
except smtplib.SMTPException as e:
print('发送错误', e) #打印错误
if __name__ == "__main__":
sentMail("测试", "这又是一封通过python发送的测试邮件。")
| [
"zwdnet@163.com"
] | zwdnet@163.com |
689c9e0171cd5a49c7db0431676dafac55e3f56c | 99c4d4a6592fded0e8e59652484ab226ac0bd38c | /code/batch-2/dn4 - krajevne funkcije/M-17237-2540.py | 6f6a7e522553764587c45140bc09e8f3a722ee3e | [] | no_license | benquick123/code-profiling | 23e9aa5aecb91753e2f1fecdc3f6d62049a990d5 | 0d496d649247776d121683d10019ec2a7cba574c | refs/heads/master | 2021-10-08T02:53:50.107036 | 2018-12-06T22:56:38 | 2018-12-06T22:56:38 | 126,011,752 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,487 | py | def koordinate(ime, kraji):
for name,x,y in kraji:
if name == ime:
return(x,y)
elif name == None:
return None
def razdalja_koordinat(x1, y1, x2, y2):
import math
return math.sqrt(math.pow(x2-x1, 2)+ math.pow(y2-y1,2))
def razdalja(ime1, ime2, kraji):
x_1, y_1 = koordinate(ime1, kraji)
x_2, y_2 =koordinate(ime2, kraji)
return razdalja_koordinat(x_1, y_1, x_2, y_2)
def v_dometu(ime, domet, kraji):
x_1, y_1 = koordinate(ime, kraji)
mesta = []
for name, x_2, y_2 in kraji:
razdalja = razdalja_koordinat(x_1, y_1, x_2, y_2)
if razdalja <= domet and name != ime:
mesta.append(name)
return mesta
def najbolj_oddaljeni(ime, imena, kraji):
x_1, y_1 = koordinate(ime, kraji)
naj_oddaljeni = 0
for name in imena:
x_2, y_2 = koordinate(name, kraji)
razdalja = razdalja_koordinat(x_1, y_2, x_2, y_2)
if naj_oddaljeni<razdalja:
naj_oddaljeni=razdalja
naj_oddaljeni_ime = name
return naj_oddaljeni_ime
def zalijemo(ime, domet, kraji):
naj_razdalja = 0
x_1, y_1 = koordinate(ime, kraji)
for name, x, y in kraji:
x_2 = x
y_2 = y
razdalja = razdalja_koordinat(x_1, y_1, x_2, y_2)
if razdalja < domet :
if naj_razdalja < razdalja:
naj_razdalja = razdalja
naj_ime = name
return naj_ime
| [
"benjamin.fele@gmail.com"
] | benjamin.fele@gmail.com |
204c66a406901687afa7adb378d50f6169acde6a | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2429/60776/289149.py | 8b653e07054c352ba83431c6436e14577e5b51d9 | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 275 | py | a=int(input())
for k in range(0,a):
a=input()
b = input().split(' ')
result=0
for i in range(0, len(b)):
b[i] = int(b[i])
for i in range(0,len(b)):
if result<max(b[i:len(b)])-b[i]:
result=max(b[i:len(b)])-b[i]
print(result) | [
"1069583789@qq.com"
] | 1069583789@qq.com |
8a7e16854616347b5368b07913fd92aefa97e9d2 | d10c5d3603e027a8fd37115be05e62634ec0f0a5 | /08_Statistical-Thinking-in-Python-1/08_ex_3-10.py | 06c4213345f1eafa6d736dac7e4763b791cd08d6 | [] | no_license | stacygo/2021-01_UCD-SCinDAE-EXS | 820049125b18b38ada49ffc2036eab33431d5740 | 027dc2d2878314fc8c9b2796f0c2e4c781c6668d | refs/heads/master | 2023-04-29T01:44:36.942448 | 2021-05-23T15:29:28 | 2021-05-23T15:29:28 | 335,356,448 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 483 | py | # Exercise 3-10: Sampling out of the Binomial distribution
import numpy as np
import matplotlib.pyplot as plt
from functions import ecdf
np.random.seed(42)
# Take 10,000 samples out of the binomial distribution: n_defaults
n_defaults = np.random.binomial(100, 0.05, size=10000)
# Compute CDF: x, y
x, y = ecdf(n_defaults)
# Plot the CDF with axis labels
plt.plot(x, y, marker='.', linestyle='none')
plt.xlabel('number of defaults')
plt.ylabel('CDF')
# Show the plot
plt.show()
| [
"stacy.gorbunova@gmail.com"
] | stacy.gorbunova@gmail.com |
8a37254576bc719509e3ff8e4e2ec21a1bace1e1 | 0fc6ff5eb90ced71a3927b0e326481d40b020e66 | /validate/__init__.py | 78fd72485db94da66de27612181fbd5d36b3e316 | [] | no_license | chairco/lazy_email_validate | 155c8f8020a13a7c527c7d56f42014f5d75e9fdb | 169ed825894c21a0a04841d1e82b3a2b3a7df802 | refs/heads/master | 2021-01-21T14:19:40.946601 | 2017-06-26T07:38:53 | 2017-06-26T07:38:53 | 95,269,879 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 177 | py | # -*- coding: utf-8 -*-
__version__ = '0.0.1'
__author__ = 'chairco'
__email__ = 'chairco@gmail.com'
__license__ = 'MIT'
__copyright__ = 'Copyright (c) 2017, chairco.'
| [
"chairco@gmail.com"
] | chairco@gmail.com |
e4ff02c8eee5d437306988c001615f0dfcb35b4a | 8799cbe3a261fea3ff05af2fba7e3eade40b57f5 | /SocialMedia/home/migrations/0001_initial.py | c845d53ec77a6f58857930c9c7677535d3ca89ee | [] | no_license | Anoop-Suresh/Training | 83b5759db0d2113bb90731b243a1dd2d5be5992f | e6f4dd8a77fec058917dd25c424a1f3afc7df236 | refs/heads/master | 2022-11-30T08:18:21.432284 | 2019-10-13T03:48:15 | 2019-10-13T03:48:15 | 190,737,085 | 0 | 0 | null | 2022-11-22T04:17:20 | 2019-06-07T12:05:47 | Python | UTF-8 | Python | false | false | 982 | py | # Generated by Django 2.2.4 on 2019-08-09 06:04
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Profile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(blank=True, max_length=250)),
('email', models.EmailField(blank=True, max_length=254)),
('phone', models.IntegerField(default=0, null=True)),
('image', models.FileField(blank=True, upload_to='media/images/')),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"anoopsuresh.sayonetech@gmail.com"
] | anoopsuresh.sayonetech@gmail.com |
81d5962d85aef36a06189c0aeba78b1165d363b6 | 989eea1d9110972ec6b2f4cedcc1759c4859a7c0 | /RemoteClientMain.py | 861c279cb9ebcc314dbc586755a41b093f7b0190 | [] | no_license | writefaruq/EpuckDistributedClient | 9d09a58ad95e6905912f93f285d520e890d0a489 | 499a2a633654dbe20b183a7ee2d35151d2075aff | refs/heads/master | 2020-05-31T17:16:32.110668 | 2010-05-27T09:06:24 | 2010-05-27T09:06:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,915 | py | #!/usr/bin/python
from multiprocessing.managers import BaseManager
from multiprocessing import *
import time
import sys
import logging, logging.config, logging.handlers
logging.config.fileConfig("\
/home/newport-ril/centralized-expt/EpuckDistributedClient/logging-remote.conf")
logger = logging.getLogger("EpcLogger")
from EpuckDistributedClient.data_manager import *
from EpuckDistributedClient.device_controller_remote import *
from RILCommonModules.RILSetup import *
class RemoteManager(BaseManager):
pass
RemoteManager.register('get_target')
def main():
logging.debug("--- Start EPC---")
device_controller.start()
time.sleep(2)
try:
device_controller.join()
except (KeyboardInterrupt, SystemExit):
logging.debug("--- End EPC---")
print "User requested exit..ClientMain shutting down now"
sys.exit(0)
if __name__ == '__main__':
# parse robot id
numargs = len(sys.argv) - 1
if numargs > 1 or numargs < 1:
print "usage:" + sys.argv[0] + " <robot id >"
sys.exit(1)
else:
robotid = int(sys.argv[1])
DATA_MGR_PORT = EXPT_SERVER_PORT_BASE + robotid
# connect to server's data manager
mgr = RemoteManager(address=(EXPT_SERVER_IP, DATA_MGR_PORT), authkey="123")
mgr.connect()
datamgr = mgr.get_target()
myid = datamgr.GetRobotID()
if int(myid) != robotid:
print "robot id: " + str(robotid) + "and DataMgr port: " +\
str(DATA_MGR_PORT) + "mismatch -- check both are started..."
sys.exit(1)
# setup processes
device_controller = Process(\
target=controller_main,\
name="DeviceController",
args=(datamgr,))
#print tgt.GetRobotPose()
#print tgt.GetTaskInfo()
##print tgt.IsRobotPoseAvailable()
##tgt.SetSelectedTaskStarted()
#print tgt.IsSelectedTaskAvailable()
#print tgt.GetSelectedTask()
main()
| [
"Mdomarfaruque.Sarker@newport.ac.uk"
] | Mdomarfaruque.Sarker@newport.ac.uk |
b04ec232dad42146af4dce5ba79fdd3343c9be31 | fc29ccdcf9983a54ae2bbcba3c994a77282ae52e | /Leetcode_By_Topic/bfs_pq-407.py | 5dc3ac844da50106fb2abfa7e638275b9d16f6e8 | [] | no_license | linnndachen/coding-practice | d0267b197d9789ab4bcfc9eec5fb09b14c24f882 | 5e77c3d7a0632882d16dd064f0aad2667237ef37 | refs/heads/master | 2023-09-03T19:26:25.545006 | 2021-10-16T16:29:50 | 2021-10-16T16:29:50 | 299,794,608 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,142 | py | # the reason why we are using pq is because we are always
# looking for the smallest/lowest height
class Solution:
def trapRainWater(self, heightMap: List[List[int]]) -> int:
m, n, = len(heightMap), len(heightMap[0])
heap, trapped = [], 0
for i in range(m):
for j in range(n):
if i in {0, m - 1} or j in {0, n - 1}:
# enque the edges
heapq.heappush(heap, (heightMap[i][j], i, j))
heightMap[i][j] = -1
while heap:
# started with the lowest height
h, i, j = heapq.heappop(heap)
for x, y in ((i+1, j), (i-1, j), (i, j+1), (i, j-1)):
# looped through the enclosed area
if 0 < x < m-1 and 0 < y < n-1 and heightMap[x][y] != -1:
# if there's a difference, add the area
trapped += max(h - heightMap[x][y], 0)
# increase the minimum height if needed
heapq.heappush(heap, (max(heightMap[x][y], h), x, y))
heightMap[x][y] = -1
return trapped | [
"lchen.msc2019@ivey.ca"
] | lchen.msc2019@ivey.ca |
00f59bbf612888b96061b5e1941b039693ae8bd6 | a838d4bed14d5df5314000b41f8318c4ebe0974e | /sdk/synapse/azure-mgmt-synapse/azure/mgmt/synapse/aio/operations/_integration_runtime_connection_infos_operations.py | 025f88c15bb2e51084fa176d23b21e8d83390cf2 | [
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] | permissive | scbedd/azure-sdk-for-python | ee7cbd6a8725ddd4a6edfde5f40a2a589808daea | cc8bdfceb23e5ae9f78323edc2a4e66e348bb17a | refs/heads/master | 2023-09-01T08:38:56.188954 | 2021-06-17T22:52:28 | 2021-06-17T22:52:28 | 159,568,218 | 2 | 0 | MIT | 2019-08-11T21:16:01 | 2018-11-28T21:34:49 | Python | UTF-8 | Python | false | false | 5,248 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class IntegrationRuntimeConnectionInfosOperations:
"""IntegrationRuntimeConnectionInfosOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.synapse.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def get(
self,
resource_group_name: str,
workspace_name: str,
integration_runtime_name: str,
**kwargs
) -> "_models.IntegrationRuntimeConnectionInfo":
"""Get integration runtime connection info.
Get connection info for an integration runtime.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param integration_runtime_name: Integration runtime name.
:type integration_runtime_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: IntegrationRuntimeConnectionInfo, or the result of cls(response)
:rtype: ~azure.mgmt.synapse.models.IntegrationRuntimeConnectionInfo
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.IntegrationRuntimeConnectionInfo"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-03-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'workspaceName': self._serialize.url("workspace_name", workspace_name, 'str'),
'integrationRuntimeName': self._serialize.url("integration_runtime_name", integration_runtime_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('IntegrationRuntimeConnectionInfo', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/integrationRuntimes/{integrationRuntimeName}/getConnectionInfo'} # type: ignore
| [
"noreply@github.com"
] | scbedd.noreply@github.com |
6aa9adbe021700f4492779c1e1475e5eae105d7b | 1e9c9f2a9639db7cdb032aae69cb4d99aef1d3a5 | /hackerEarth/practice/dataStructures/advancedDataStructures/segmentTrees/chemicalReaction.py | abb01081b288bab7983f9f021a980fb0f4895a1c | [
"MIT"
] | permissive | sagarnikam123/learnNPractice | f0da3f8acf653e56c591353ab342765a6831698c | 1b3b0cb2cff2f478006626a4c37a99102acbb628 | refs/heads/master | 2023-02-04T11:21:18.211654 | 2023-01-24T14:47:52 | 2023-01-24T14:47:52 | 61,184,927 | 2 | 1 | MIT | 2022-03-06T11:07:18 | 2016-06-15T06:57:19 | Python | UTF-8 | Python | false | false | 4,202 | py | # Chemical Reaction
#######################################################################################################################
#
# Ani and his Favourite Chemistry Teacher Lissa were performing an Experiment in the Chemistry Lab. Experiment
# involves a N step Chemical Reaction. An N step Chemical Reaction requires N different reactants from the
# periodic table . (Do you know about Periodic Table? No , still you can try solving this problem). N elements
# are stacked in the bottom up manner with their reacting times. Look at the given figure.
# image
#
# Lissa is very good at performing experiment (offcourse as she is a chemistry teacher). So, she is doing the
# actual job alone. Ani is there only to provide her a helping hand. After every step, Lissa ordered Ani to put
# kth element from the stack (counting start from bottom) into the ongoing chemical reaction and record the
# expected time taken by the chemical reaction to be accomplished.
# Expected Time of a Chemical reaction is defined as the maximum of reacting time of all the reactants present
# in the chemical reaction at that instant of time.
# Considering a 6 step Chemical reaction with the same set of reactants given above. Let the order of elements
# given by Lissa to Ani follows this list.
# Note that the list contains N-1 elements only.
#
# 2 2 1 2 2
#
# Step 1: Ani puts the second element from the bottom i.e titanium into the chemical reaction and records
# the expected time as 799 .
# New stack configuration :: image
# Step 2: Ani puts the second element from the bottom i.e barium into the chemical reaction and records
# the expected time as 799.
# New stack configuration ::image
# Step 3: Ani puts the first element from the bottom i.e zinc into the chemical reaction and records
# the expected time as 999.
# New stack configuration ::image
# Step 4: Ani puts the second element from the bottom i.e potassium into the chemical reaction and records
# the expected time as 999.
# New stack configuration ::image
# Step 5: Ani puts the second element from the bottom i.e sodium into the chemical reaction and records
# the expected time as 999.
# New stack configuration ::image
# As there is only one element left on the stack in the end. Ani puts that element into the reaction without
# asking his teacher (He is over-smart actually ). While doing this, he dropped some chemical on the record
# taken by him. This made Miss Lissa very angry and she decided to punish him. Ani does not want to be punished
# by his favourite teacher. So, can you save him from being punished ?. Can you generate same record for him.
#
# Input:
# First line of input contains a single integer T denoting the number of Experiments to be performed.
# Next 4*T lines contains description of each experiment. Each experiment's description consists of 4 lines.
# First line of description contains a single integer N denoting the order of reaction (number of reactants).
# Next line of description contains N space separated strings i.e names of reactants. Next line of description
# contains N integers denoting the reacting time of each element. Next line of description contains N-1 integers
# denoting the ordered list of elements given by Lissa to Ani.
#
# Output:
# For each Experiment, Output consists of N lines where ith line contains one string (name of Element added in
# the ith step) and expected time of the Chemical Reaction after ith step.
#
# Constraints:
# 1 <= T <=10
# 1 <= N <= 5*105
# *Element names composed of only lower case letters *
# 1 <=|Reactant's name| <= 10
# 0 <= Reacting time <= 109
# sum of all N over all the test cases is 5*109
#
# NOTE:
# Prefer to use Printf / Scanf instead of cin / cout (very large input set).
#
# SAMPLE INPUT
# 1
# 6
# zinc titanium barium lithium potassium sodium
# 999 799 600 140 7 100
# 2 2 1 2 2
#
# SAMPLE OUTPUT
# titanium 799
# barium 799
# zinc 999
# potassium 999
# sodium 999
# lithium 999
#
#######################################################################################################################
| [
"sagarnikam123@gmail.com"
] | sagarnikam123@gmail.com |
a09ca7216adbbeef3f5c000eb167936a1f1ed25e | 49f61714a6f78d984fd2194d6064d84e891bc5b7 | /2019-1/230/users/4041/codes/1683_2471.py | 825a290d780306bb8586be7773cb306f47c9bd76 | [] | no_license | psbarros/Variaveis3 | b5c4e1517e7d94a846ee03791d25d5821a1c651c | 3dcf6f810709ce03c78335acf9533e008a2ae125 | refs/heads/master | 2023-06-13T07:05:00.878430 | 2021-07-06T17:51:37 | 2021-07-06T17:51:37 | 383,549,597 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 659 | py | idade = int(input("insira sua idade: "))
imc = float(input("insira sua imc: "))
if(idade<45 and imc<22.0):
print("Entradas:", idade, "anos e IMC", imc)
risco = "Baixo"
print("Risco:", risco)
elif(idade<45 and imc>=22.0):
print("Entradas:", idade, "anos e IMC", imc)
risco = "Medio"
print("Risco:", risco)
elif(idade>=45 and imc<22.0):
print("Entradas:", idade, "anos e IMC", imc)
risco = "Medio"
print("Risco:", risco)
elif(idade>=45 and imc>=22.0):
print("Entradas:", idade, "anos e IMC", imc)
risco = "Alto"
print("Risco:", risco)
elif(idade<=0 and idade>130 and imc<=0):
print("Entradas:", idade, "anos e IMC", imc)
print("Dados invalidos") | [
"psb@icomp.ufam.edu.br"
] | psb@icomp.ufam.edu.br |
e3e4c8b3377b575c08d598c785954c535352ffad | 55a273347cb103fe2b2704cb9653956956d0dd34 | /code/tmp_rtrip/test/mp_preload.py | e346c5f11d3c41466fad2388a00794a035b708d4 | [
"MIT"
] | permissive | emilyemorehouse/ast-and-me | 4af1bc74fc967ea69ac1aed92664f6428acabe6a | 3f58117512e125e1ecbe3c72f2f0d26adb80b7b3 | refs/heads/master | 2022-11-18T03:50:36.505882 | 2018-05-12T17:53:44 | 2018-05-12T17:53:44 | 115,035,148 | 25 | 1 | MIT | 2022-11-04T11:36:43 | 2017-12-21T18:27:19 | Python | UTF-8 | Python | false | false | 318 | py | import multiprocessing
multiprocessing.Lock()
def f():
print('ok')
if __name__ == '__main__':
ctx = multiprocessing.get_context('forkserver')
modname = 'test.mp_preload'
__import__(modname)
ctx.set_forkserver_preload([modname])
proc = ctx.Process(target=f)
proc.start()
proc.join()
| [
"emily@cuttlesoft.com"
] | emily@cuttlesoft.com |
f510490adc4d4b3e88b0b150cfd42fd22a80f11f | 0afc497dafc54da5fe3e88aea7f7244f43767259 | /Vents/migrations/0003_auto_20170522_1321.py | c2c0e77eeb4e557c7139a8037990a1f9016d62d2 | [] | no_license | matheo97/Multitenant-Django-app | 816da2f37c73487f82ecd26f9c52132609558d53 | 555bc4b737a52e4446f4de78e21c22a2206336fb | refs/heads/master | 2020-04-26T15:56:40.550895 | 2019-03-06T13:30:35 | 2019-03-06T13:30:35 | 173,662,386 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 838 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2017-05-22 18:21
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('Vents', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='ventaregistrada',
name='direccion',
field=models.CharField(default='', max_length=100, null=True),
),
migrations.AddField(
model_name='ventaregistrada',
name='nombre',
field=models.CharField(default='', max_length=100, null=True),
),
migrations.AddField(
model_name='ventaregistrada',
name='tarjeta',
field=models.CharField(default='', max_length=100, null=True),
),
]
| [
"mateo.salazar@correounivalle.edu.co"
] | mateo.salazar@correounivalle.edu.co |
3ba9446a9bf04ecf5c8fc5716de850d07d603a73 | 0796e7c0ce6c9e1d4dc820873d3c1ff15804b312 | /test.py | cdc0c3cb8f51040a1a4b1e6eb52cae36ca43a34b | [] | no_license | parwisenlared/GAKeras | 8ef9c3ab1af7a93cbe3bfc95e9f5b072b54aac29 | b5ad2e3a9aa6e4b774a97b5add2606d0406c3804 | refs/heads/master | 2022-11-16T02:12:29.350678 | 2018-11-07T12:39:20 | 2018-11-07T12:39:20 | 278,607,991 | 0 | 0 | null | 2020-07-10T10:39:34 | 2020-07-10T10:39:33 | null | UTF-8 | Python | false | false | 2,694 | py | import unittest
from convindividual import ConvIndividual
from mutation import MutationConv
from crossover import CrossoverConv
from fitness import Fitness
from config import Config
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation, Flatten
from keras.layers import Convolution2D, MaxPooling2D
class IndividualTest(unittest.TestCase):
def xtest_net(self):
input_shape = (28,28,1)
model = Sequential()
model.add(MaxPooling2D(pool_size=(3,3), input_shape = input_shape))
print("----->", model.layers[-1].output_shape)
model.add(MaxPooling2D(pool_size=(3,3)))
print("----->", model.layers[-1].output_shape)
model.add(MaxPooling2D(pool_size=(3,3)))
print("----->", model.layers[-1].output_shape)
if model.layers[-1].output_shape[1] >= 2 and model.layers[-1].output_shape[2] >= 2:
model.add(MaxPooling2D(pool_size=(2,2)))
print("----->", model.layers[-1].output_shape)
model.add(Flatten())
#model.add(Convolution2D(20, 5, 5, border_mode='same'))
#model.add(MaxPooling2D(pool_size=(2,2)))
#model.add(MaxPooling2D(pool_size=(2,2)))
#model.add(MaxPooling2D(pool_size=(2,2)))
#model.add(Flatten())
model.summary()
def test_create_individual(self):
Config.input_shape = (28,28,1)
Config.noutputs = 10
for i in range(1000):
print("--------------------- start {} -------------------".format(i))
ind = ConvIndividual()
ind.randomInit()
print(ind)
net = ind.createNetwork()
net.summary()
def xtest_evaluate(self):
ind = ConvIndividual()
ind.randomInit()
print(ind)
fit = Fitness("data/mnist2d.train")
print("evaluating")
print( fit.evaluate(ind) )
def xtest_mutation(self):
print(" *** test mutation *** ")
Config.input_shape = (28,28,1)
Config.noutputs = 10
ind = ConvIndividual()
ind.randomInit()
print(ind)
mut = MutationConv()
mut.mutate(ind)
print(ind)
def xtest_crossover(self):
print(" *** test crossover *** ")
Config.input_shape = (28,28,1)
Config.noutputs = 10
ind1 = ConvIndividual()
ind1.randomInit()
print(ind1)
ind2 = ConvIndividual()
ind2.randomInit()
print(ind2)
cross = CrossoverConv()
off1, off2 = cross.cxOnePoint(ind1, ind2)
print(off1)
print(off2)
if __name__ == "__main__":
unittest.main()
| [
"petra.vidnerova@gmail.com"
] | petra.vidnerova@gmail.com |
8f7c292fb090ecefffa1b0e549a821774baa67a5 | 215a7b5d0fcbfd85a3770a981fa2031f733e98db | /week8/133clone_graph.py | 03ccc307d099c643e4bcc7768ef16b4e90e7e38a | [] | no_license | shivani-aradhya/Leetcode-DSA | 7b571de15ef216a5a17f91dbfc895bd69ce8789e | c73e3ed8112454167d381bd497665021e36d1257 | refs/heads/main | 2023-07-09T10:59:38.987614 | 2021-08-18T05:50:44 | 2021-08-18T05:50:44 | 334,694,883 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 737 | py | class UndirectedGraphNode(object):
def __init__(self, x):
self.label = x
self.neighbors = []
class Solution(object):
def cloneGraph(self, node):
if node is None:
return None
cloned_node = UndirectedGraphNode(node.label)
cloned, queue = {node:cloned_node}, [node]
while queue:
current = queue.pop()
for neighbor in current.neighbors:
if neighbor not in cloned:
queue.append(neighbor)
cloned_neighbor = UndirectedGraphNode(neighbor.label)
cloned[neighbor] = cloned_neighbor
cloned[current].neighbors.append(cloned[neighbor])
return cloned[node] | [
"65466500+shivani-aradhya@users.noreply.github.com"
] | 65466500+shivani-aradhya@users.noreply.github.com |
0efc77aecb76941ad9ba6674deec524dec657bec | f0adca7cac7fb12cdb89e7e821559fe2603bf4bc | /src/199/recipe_199_02.py | f56c8c403955dc27daf67afa541b742e89b2df67 | [] | no_license | eriamavro/python-recipe-src | dccfa06bc56fcc713f8da9e466f04d07c1f961f0 | d14f3e4cd885515e9a9a7b8e3f064609c8e50fad | refs/heads/master | 2023-02-13T02:08:44.531621 | 2021-01-14T12:03:05 | 2021-01-14T12:03:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 378 | py | from datetime import datetime, date, time, timedelta
# 2021/12/22のdate型を生成
d1 = date(2021, 12, 22)
# 2021/12/22 12:00:30のdatetime型を生成
dt1 = datetime(2021, 12, 22, 12, 00, 30)
# 100日分のtimedelta型を生成
delta = timedelta(days=100)
# 100日前の日付を計算
d2 = d1 - delta
dt2 = dt1 - delta
# 計算結果をprint出力
print(d2)
print(dt2)
| [
"kurozumi.ta@gmail.com"
] | kurozumi.ta@gmail.com |
548e197af8cd6dcb51d001c3700a9359ef7afc89 | 5a8214b3a452c574e6c883bf5d90ba58ba87c461 | /leetcode/434.number-of-segments-in-a-string.py | 056b6cffd5334e0bade58dda9fd8cc91cc2205e2 | [] | no_license | phlalx/algorithms | 69a3c8519687816e3c6333ec12b40659d3e3167f | f4da5a5dbda640b9bcbe14cb60a72c422b5d6240 | refs/heads/master | 2023-02-03T10:30:30.181735 | 2020-12-26T09:47:38 | 2020-12-26T09:47:38 | 129,254,618 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 860 | py | #
# @lc app=leetcode id=434 lang=python3
#
# [434] Number of Segments in a String
#
# https://leetcode.com/problems/number-of-segments-in-a-string/description/
#
# algorithms
# Easy (37.22%)
# Likes: 161
# Dislikes: 619
# Total Accepted: 63.8K
# Total Submissions: 171K
# Testcase Example: '"Hello, my name is John"'
#
# Count the number of segments in a string, where a segment is defined to be a
# contiguous sequence of non-space characters.
#
# Please note that the string does not contain any non-printable characters.
#
# Example:
#
# Input: "Hello, my name is John"
# Output: 5
#
#
#
# @lc code=start
class Solution:
def countSegments(self, s: str) -> int:
res = 0
prev = ' '
for cur in s:
if cur != ' ' and prev == ' ':
res += 1
prev = cur
return res
# @lc code=end
| [
"phlalx@users.noreply.github.com"
] | phlalx@users.noreply.github.com |
694353b37809e6c6e79c255f2e73c685974086a1 | dd65b9bc9475a6cc58817fd45c078e5a6abae241 | /VISION/FT700/ch11/11-1.py | 9c19edae1bc84ef389208f2341f5ce56b5c947e7 | [] | no_license | jumbokh/gcp_class | 5b68192ab4ad091362d89ad667c64443b3b095bb | 0a8e2663bfb5b01ce20146da178fa0c9bd7c6625 | refs/heads/master | 2021-10-22T09:22:04.634899 | 2021-10-21T12:46:10 | 2021-10-21T12:46:10 | 228,617,096 | 8 | 7 | null | 2021-08-25T15:55:30 | 2019-12-17T12:58:17 | Python | UTF-8 | Python | false | false | 465 | py | import cv2
def get_edge(img):
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # 灰階處理
blur = cv2.GaussianBlur(gray, (13, 13), 0) # 高斯模糊
canny = cv2.Canny(blur, 50, 150) # 邊緣偵測
return canny
#----------------------------------------------#
img = cv2.imread('road.jpg') # 讀取圖片
edge = get_edge(img)
cv2.imshow('Edge', edge) # 顯示邊緣圖
cv2.waitKey(0)
cv2.destroyAllWindows() | [
"jumbokh@gmail.com"
] | jumbokh@gmail.com |
230fda537b2e0091fc4d861174f5188b3647a1ea | 0daf6763c960cd898e9bb5612b1314d7e34b8870 | /sorting/data.py | a062769b4b09c3f3f46732a2430c4454d8f6e24c | [
"MIT"
] | permissive | evanthebouncy/nnhmm | a6ba2a1f0ed2c90a0188de8b5e162351e6668565 | acd76edaa1b3aa0c03d39f6a30e60d167359c6ad | refs/heads/master | 2021-01-12T02:27:32.814908 | 2017-04-01T05:01:24 | 2017-04-01T05:01:24 | 77,956,435 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,535 | py | import numpy as np
from draw import *
# total number of observations
OBS_SIZE = 20
# length of the field i.e. LxL field
N_BATCH = 50
L = 8
# ------------------------------------------------------------------ helpers
# turn a coordinate to a pair of numpy objects
def vectorize(coords):
retX, retY = np.zeros([L]), np.zeros([L])
retX[coords[0]] = 1.0
retY[coords[1]] = 1.0
return retX, retY
# show dimension of a data object (list of list or a tensor)
def show_dim(lst1):
if hasattr(lst1, '__len__') and len(lst1) > 0:
return [len(lst1), show_dim(lst1[0])]
else:
try:
return lst1.get_shape()
except:
try:
return lst1.shape
except:
return type(lst1)
# --------------------------------------------------------------- modelings
# generate the hidden state
def gen_X():
return np.random.permutation(L)
def gen_A(X):
return np.argmin(X), np.argmax(X)
def mk_query(X):
def query(O):
Ox, Oy = O
if X[Ox] < X[Oy]:
return [1.0, 0.0]
return [0.0, 1.0]
return query
def gen_O(X):
query = mk_query(X)
Ox = np.random.randint(0, L)
Oy = np.random.randint(0, L)
O = (Ox, Oy)
return O, query(O)
# data of the form of
# A: the answer we're trying to infer
# obs: the OBS_SIZE number of observations
# divided into obs_x and obs_y
# obs_tfs: the true/false of these observations
# all variables are a list of tensors of dimention [n_batch x ...]
def gen_data(n_batch = N_BATCH):
# Answer
new_ob_x = []
new_ob_y = []
new_ob_tf = []
obs_x = [[] for i in range(OBS_SIZE)]
obs_y = [[] for i in range(OBS_SIZE)]
obs_tfs = [[] for i in range(OBS_SIZE)]
orig_x = []
for bb in range(n_batch):
# generate a hidden variable X
perm = gen_X()
orig_x.append(perm)
new_obb_xy, new_obb_tf = gen_O(perm)
new_obb_x, new_obb_y = vectorize(new_obb_xy)
new_ob_x.append(new_obb_x)
new_ob_y.append(new_obb_y)
new_ob_tf.append(new_obb_tf)
# generate observations for this hidden variable x
for ob_idx in range(OBS_SIZE):
_ob_coord, _ob_lab = gen_O(perm)
_ob_x, _ob_y = vectorize(_ob_coord)
obs_x[ob_idx].append(_ob_x)
obs_y[ob_idx].append(_ob_y)
obs_tfs[ob_idx].append(_ob_lab)
return np.array(obs_x, np.float32),\
np.array(obs_y, np.float32),\
np.array(obs_tfs, np.float32),\
np.array(new_ob_x, np.float32),\
np.array(new_ob_y, np.float32),\
np.array(new_ob_tf, np.float32),\
orig_x
| [
"evanthebouncy@gmail.com"
] | evanthebouncy@gmail.com |
0c3826235cc92f3219c43f5b9bce8807ac403ebb | 2fd0c65aa0f72133f773dac5d9a5c48fe9e26fac | /Dsz/PyScripts/Lib/dsz/mca/status/cmd/handles/types.py | 6a1f4c77e8aa675f29091252ea44fdbd6e6ae2b8 | [] | no_license | FingerLeakers/DanderSpritz_docs | f5d2430e0b86b1b2f0684f02ddd4fa973a5a7364 | d96b6a71c039b329f9f81544f645857c75360e7f | refs/heads/master | 2021-01-25T13:05:51.732149 | 2018-03-08T01:22:49 | 2018-03-08T01:22:49 | 123,527,268 | 2 | 0 | null | 2018-03-02T03:48:31 | 2018-03-02T03:48:30 | null | UTF-8 | Python | false | false | 1,171 | py | # uncompyle6 version 2.9.10
# Python bytecode 2.7 (62211)
# Decompiled from: Python 2.7.10 (default, Feb 6 2017, 23:53:20)
# [GCC 4.2.1 Compatible Apple LLVM 8.0.0 (clang-800.0.34)]
# Embedded file name: types.py
from types import *
MSG_KEY_PARAMS_QUERY = 65536
MSG_KEY_PARAMS_QUERY_PROCESS_ID = 65537
MSG_KEY_PARAMS_QUERY_ALL = 65538
MSG_KEY_PARAMS_QUERY_MEMORY = 65539
MSG_KEY_PARAMS_DUPLICATE = 131072
MSG_KEY_PARAMS_DUPLICATE_PROCESS_ID = 131073
MSG_KEY_PARAMS_DUPLICATE_HANDLE = 131074
MSG_KEY_PARAMS_CLOSE = 196608
MSG_KEY_PARAMS_CLOSE_PROCESS_ID = 196609
MSG_KEY_PARAMS_CLOSE_HANDLE = 196610
MSG_KEY_RESULT_HANDLE = 1114112
MSG_KEY_RESULT_HANDLE_PROCESS_ID = 1114113
MSG_KEY_RESULT_HANDLE_HANDLE = 1114114
MSG_KEY_RESULT_HANDLE_RIGHTS = 1114115
MSG_KEY_RESULT_HANDLE_TYPE = 1114116
MSG_KEY_RESULT_HANDLE_METADATA = 1114117
MSG_KEY_RESULT_DUPLICATE = 1179648
MSG_KEY_RESULT_DUPLICATE_ORIG_PROCESS_ID = 1179649
MSG_KEY_RESULT_DUPLICATE_ORIG_HANDLE = 1179650
MSG_KEY_RESULT_DUPLICATE_NEW_PROCESS_ID = 1179651
MSG_KEY_RESULT_DUPLICATE_NEW_HANDLE = 1179652
MSG_KEY_RESULT_CLOSE = 1245184
MSG_KEY_RESULT_CLOSE_PROCESS_ID = 1245185
MSG_KEY_RESULT_CLOSE_HANDLE = 1245186 | [
"francisck@protonmail.ch"
] | francisck@protonmail.ch |
83c0df6ea2b7386857e988a1a82a8102befb33e4 | 2ce3ef971a6d3e14db6615aa4da747474d87cc5d | /练习/python框架/flask_test/flask_demo/flask_test/flask_sql_test.py | fe3af6542c189d8a31672803529966213bef0dfc | [] | no_license | JarvanIV4/pytest_hogwarts | 40604245807a4da5dbec2cb189b57d5f76f5ede3 | 37d4bae23c030480620897583f9f5dd69463a60c | refs/heads/master | 2023-01-07T09:56:33.472233 | 2020-11-10T15:06:13 | 2020-11-10T15:06:13 | 304,325,109 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,079 | py | # -*- coding: utf-8 -*-
# @Time : 2020/02/16
# @Author : Wind
from 练习.python框架.flask_test.flask_demo.flask_SQLalchemy_demo import *
class FlaskSQL:
def __init__(self):
db.drop_all() # 删除表
db.create_all() # 创建表
global role, user
role = Role(name='admin')
db.session.add(role)
db.session.commit()
user = User(name='heima', role_id=role.id)
db.session.add(user)
db.session.commit()
def add(self):
# 新增数据
pass
# role = Role(name='admin')
# db.session.add(role)
# db.session.commit()
#
# user = User(name='heima', role_id=role.id)
# db.session.add(user)
# db.session.commit()
def update(self):
# 修改数据
user.name = 'chengxuyuan'
db.session.commit()
def delete(self):
# 删除数据
db.session.delete(user)
db.session.commit()
if __name__ == '__main__':
flask = FlaskSQL()
# flask.add()
flask.update()
# flask.delete() | [
"2268035948@qq.com"
] | 2268035948@qq.com |
64a9454f5620d0735efc3b811ed47a1cceb58908 | fc1c1e88a191b47f745625688d33555901fd8e9a | /meraki_sdk/models/update_device_switch_port_model.py | 706fb93e1ed903692e8113465e3b2e2c937d86e3 | [
"MIT",
"Python-2.0"
] | permissive | RaulCatalano/meraki-python-sdk | 9161673cfd715d147e0a6ddb556d9c9913e06580 | 9894089eb013318243ae48869cc5130eb37f80c0 | refs/heads/master | 2022-04-02T08:36:03.907147 | 2020-02-03T19:24:04 | 2020-02-03T19:24:04 | 416,889,849 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,244 | py | # -*- coding: utf-8 -*-
"""
meraki_sdk
This file was automatically generated for meraki by APIMATIC v2.0 ( https://apimatic.io ).
"""
class UpdateDeviceSwitchPortModel(object):
"""Implementation of the 'updateDeviceSwitchPort' model.
TODO: type model description here.
Attributes:
name (string): The name of the switch port
tags (string): The tags of the switch port
enabled (bool): The status of the switch port
mtype (string): The type of the switch port ("access" or "trunk")
vlan (int): The VLAN of the switch port. A null value will clear the
value set for trunk ports.
voice_vlan (int): The voice VLAN of the switch port. Only applicable
to access ports.
allowed_vlans (string): The VLANs allowed on the switch port. Only
applicable to trunk ports.
poe_enabled (bool): The PoE status of the switch port
isolation_enabled (bool): The isolation status of the switch port
rstp_enabled (bool): The rapid spanning tree protocol status
stp_guard (string): The state of the STP guard ("disabled", "Root
guard", "BPDU guard", "Loop guard")
access_policy_number (int): The number of the access policy of the
switch port. Only applicable to access ports.
link_negotiation (string): The link speed for the switch port
port_schedule_id (string): The ID of the port schedule. A value of
null will clear the port schedule.
udld (UdldEnum): The action to take when Unidirectional Link is
detected (Alert only, Enforce). Default configuration is Alert
only.
mac_whitelist (list of string): Only devices with MAC addresses
specified in this list will have access to this port. Up to 20 MAC
addresses can be defined. To disable MAC whitelist, set
accessPolicyNumber to null.
sticky_mac_whitelist (list of string): The initial list of MAC
addresses for sticky Mac whitelist. To reset Sticky MAC whitelist,
set accessPolicyNumber to null.
sticky_mac_whitelist_limit (int): The maximum number of MAC addresses
for sticky MAC whitelist.
storm_control_enabled (bool): The storm control status of the switch
port
"""
# Create a mapping from Model property names to API property names
_names = {
"name":'name',
"tags":'tags',
"enabled":'enabled',
"mtype":'type',
"vlan":'vlan',
"voice_vlan":'voiceVlan',
"allowed_vlans":'allowedVlans',
"poe_enabled":'poeEnabled',
"isolation_enabled":'isolationEnabled',
"rstp_enabled":'rstpEnabled',
"stp_guard":'stpGuard',
"access_policy_number":'accessPolicyNumber',
"link_negotiation":'linkNegotiation',
"port_schedule_id":'portScheduleId',
"udld":'udld',
"mac_whitelist":'macWhitelist',
"sticky_mac_whitelist":'stickyMacWhitelist',
"sticky_mac_whitelist_limit":'stickyMacWhitelistLimit',
"storm_control_enabled":'stormControlEnabled'
}
def __init__(self,
name=None,
tags=None,
enabled=None,
mtype=None,
vlan=None,
voice_vlan=None,
allowed_vlans=None,
poe_enabled=None,
isolation_enabled=None,
rstp_enabled=None,
stp_guard=None,
access_policy_number=None,
link_negotiation=None,
port_schedule_id=None,
udld=None,
mac_whitelist=None,
sticky_mac_whitelist=None,
sticky_mac_whitelist_limit=None,
storm_control_enabled=None):
"""Constructor for the UpdateDeviceSwitchPortModel class"""
# Initialize members of the class
self.name = name
self.tags = tags
self.enabled = enabled
self.mtype = mtype
self.vlan = vlan
self.voice_vlan = voice_vlan
self.allowed_vlans = allowed_vlans
self.poe_enabled = poe_enabled
self.isolation_enabled = isolation_enabled
self.rstp_enabled = rstp_enabled
self.stp_guard = stp_guard
self.access_policy_number = access_policy_number
self.link_negotiation = link_negotiation
self.port_schedule_id = port_schedule_id
self.udld = udld
self.mac_whitelist = mac_whitelist
self.sticky_mac_whitelist = sticky_mac_whitelist
self.sticky_mac_whitelist_limit = sticky_mac_whitelist_limit
self.storm_control_enabled = storm_control_enabled
@classmethod
def from_dictionary(cls,
dictionary):
"""Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object as
obtained from the deserialization of the server's response. The keys
MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if dictionary is None:
return None
# Extract variables from the dictionary
name = dictionary.get('name')
tags = dictionary.get('tags')
enabled = dictionary.get('enabled')
mtype = dictionary.get('type')
vlan = dictionary.get('vlan')
voice_vlan = dictionary.get('voiceVlan')
allowed_vlans = dictionary.get('allowedVlans')
poe_enabled = dictionary.get('poeEnabled')
isolation_enabled = dictionary.get('isolationEnabled')
rstp_enabled = dictionary.get('rstpEnabled')
stp_guard = dictionary.get('stpGuard')
access_policy_number = dictionary.get('accessPolicyNumber')
link_negotiation = dictionary.get('linkNegotiation')
port_schedule_id = dictionary.get('portScheduleId')
udld = dictionary.get('udld')
mac_whitelist = dictionary.get('macWhitelist')
sticky_mac_whitelist = dictionary.get('stickyMacWhitelist')
sticky_mac_whitelist_limit = dictionary.get('stickyMacWhitelistLimit')
storm_control_enabled = dictionary.get('stormControlEnabled')
# Return an object of this model
return cls(name,
tags,
enabled,
mtype,
vlan,
voice_vlan,
allowed_vlans,
poe_enabled,
isolation_enabled,
rstp_enabled,
stp_guard,
access_policy_number,
link_negotiation,
port_schedule_id,
udld,
mac_whitelist,
sticky_mac_whitelist,
sticky_mac_whitelist_limit,
storm_control_enabled)
| [
"api-pm@meraki.com"
] | api-pm@meraki.com |
5c1ab956338dcb9308279fad56bf1b39cb5b5de7 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2710/60727/295696.py | 6fe2ad78e9c68b5c106f342fd469c6299ebeedfb | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 442 | py | arr = list(map(int, input().split()))
n, q = arr[0], arr[1]
arr, res = [], []
for i in range(0, q):
arr.append(input().split())
for i in range(0, q):
t = arr[i]
if t[0] == 'M':
res.append((int(t[1]), int(t[2])))
elif t[0] == 'D':
ans = n + 1
for item in res:
if item[0] <= int(t[1]) and item[1] >= int(t[2]):
ans = min(ans, item[1])
print(ans if ans < n + 1 else -1) | [
"1069583789@qq.com"
] | 1069583789@qq.com |
0429d5df3473aeb93f6ac3d93454a2eb5ff3e162 | 3fac68967637842325cc8242caa7910fc22e759d | /challenges/gcd.py | cec9be158405f150d5cde1d724e44e0e0bd97302 | [] | no_license | fox016/learnpython | 31c947274e025488579f226f7931382aebf9a9d4 | cd1f8c4d31094ad04932a77ea0e0df88788f5328 | refs/heads/master | 2021-01-11T21:07:54.785903 | 2017-04-13T17:18:33 | 2017-04-13T17:18:33 | 79,253,425 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 438 | py | print(gcd(108,48))
print(gcd(9823759873245674572938,23897856))
print(gcd(52129982331677168,3258123895729823))
print(gcd(3742284139568368,3274498622122322))
print(gcd(47156645998656522469911,9100405368161785038053))
print(gcd(1617470750160875729262056498097501089239401390,312143478101221631962853008404780911958480970))
print(gcd(5599495879952728975999543669592960035350709387033509,1080604468061052960280613690623202813839610583462607))
| [
"fox016@gmail.com"
] | fox016@gmail.com |
0736cc76a26fb7b580d93517031eadc500ddf4bf | f662a786ca7bcedaec3ff5d488ce28f5c455da93 | /source/conf.py | 3280c79eb86f523eaf6210317dfe0d7982b5328a | [
"MIT"
] | permissive | AppImageX/standards | e4a058df188af948de8025e962cdd0d8c9f967e3 | cf25225a246b00ab2924e90b8bb464e2be270049 | refs/heads/master | 2023-02-10T03:03:14.753311 | 2020-12-27T23:33:12 | 2020-12-28T19:07:54 | 324,832,510 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,414 | py | # Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
# import sys
# sys.path.insert(0, os.path.abspath("."))
# -- Project information -----------------------------------------------------
project = "AppImageX Standards"
copyright = "2020, The AppImageX Team"
author = "The AppImageX Team"
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named "sphinx.ext.*") or your custom
# ones.
extensions = [
"sphinx.ext.intersphinx",
"sphinx.ext.todo",
"sphinx.ext.ifconfig",
"sphinx_last_updated_by_git",
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "sphinx_rtd_theme"
html_theme_options = {
"display_version": True,
"sticky_navigation": True,
"includehidden": True,
"collapse_navigation": True,
"titles_only": True,
"prev_next_buttons_location": "both",
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# try to fetch current Git commit ID from the environment
commit = os.environ.get("GITHUB_SHA", os.environ.get("GIT_COMMIT", None))
# if this is not possible for some reason, try to fetch it via the git command
if not commit:
import subprocess
try:
commit = subprocess.check_output(["git", "rev-parse", "--short", "HEAD"]).decode().split()[0]
except subprocess.CalledProcessError:
commit = "<not available>"
# make sure to use short commit
commit = commit[:7]
html_context = {
"display_github": True,
"github_user": "AppImage",
"github_repo": "docs.appimage.org",
"github_version": "master/source/",
"commit": commit,
}
# -- Options for intersphinx extension ---------------------------------------
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {"https://docs.python.org/": None}
# -- Options for todo extension ----------------------------------------------
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# apply some subtle changes to the selected theme via custom CSS file
def setup(app):
app.add_stylesheet("css/custom.css")
| [
"theassassin@assassinate-you.net"
] | theassassin@assassinate-you.net |
c1278e40a748f85fd99e5abac06ec8c0a87ec483 | 09a8648805c390594be0908da3188f287dedc471 | /src/practices/github/handle_beautifulsoup_1.py | 990c8181860ac65fa8e346a7de8af0c82f4db7d2 | [
"Apache-2.0"
] | permissive | lgonline/mp | 9d17abbb41ff42fbaf1666059504e2377485c6a9 | 21ef1bfb2feacf6a7abda858c083e0c49878f889 | refs/heads/master | 2020-12-29T02:36:32.332387 | 2019-11-16T03:02:02 | 2019-11-16T03:02:02 | 44,308,720 | 1 | 1 | null | 2015-10-20T16:24:08 | 2015-10-15T09:50:46 | Python | UTF-8 | Python | false | false | 1,186 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2018/6/7 19:10
# @Author : liugang9
# @Email : mlcc330@hotmail.com
# @File : handle_beautifulsoup_1.py
# @Software: PyCharm
# @license: Apache Licence
# @contact: 3323202070@qq.com
"""
"""
from urllib.request import urlopen
from bs4 import BeautifulSoup
import re
def useUrllibGetWebpage():
html = urlopen('https://www.baidu.com')
print(html.read())
pass
def useBeautifulSoupGetWebpage():
url = 'https://www.baidu.com'
webpage = urlopen(url)
soup = BeautifulSoup(webpage,'html.parser')
print(soup)
def getBookFromDouban():
url = 'https://www.douban.com/tag/%E5%B0%8F%E8%AF%B4/?focus=book'
soup = BeautifulSoup(urlopen(url),'html.parser')
# book_div = soup.find(attrs={'id':'book'})
books = soup.findAll(attrs={'class':'title'})
# print(type(books))
# for book in books:
# print(book)
for book in books:
clear_books = re.findall(r'>(\S+)<',str(book))
print(clear_books)
# for mybook in clear_books:
# print(mybook)
if __name__ == '__main__':
# useUrllibGetWebpage()
# useUrllibGetWebpage()
getBookFromDouban() | [
"lg_online@hotmail.com"
] | lg_online@hotmail.com |
20cd6063dfa85464b0fe6949728b420cf2de23ff | 55c75df9dc3a5479a7524c8e2f6546e113d2d89c | /src/gamesbyexample/magichexagon.py | 39dbfaacc9376ed100c74e20234ba604b1980a43 | [
"MIT"
] | permissive | spp2/PythonStdioGames | 4b9bef97fef7dc84fc4c09b2585298cdab865c6c | 7edc6a07ef816a44579800e773f30217541971fa | refs/heads/master | 2022-10-04T02:14:50.789665 | 2020-06-02T16:03:35 | 2020-06-02T16:03:35 | 268,824,285 | 0 | 0 | MIT | 2020-06-02T15:18:41 | 2020-06-02T14:31:53 | Python | UTF-8 | Python | false | false | 6,503 | py | """Magic Hexagon, by Al Sweigart al@inventwithpython.com
Place numbers in a hexagon so each row adds up to 38.
More info at https://en.wikipedia.org/wiki/Magic_hexagon
More info at https://www.youtube.com/watch?v=ZkVSRwFWjy0
This and other games are available at https://nostarch.com/XX
Tags: large, game, puzzle game, board game"""
__version__ = 0
import sys
# Print the title and instructions:
print('''Magic Hexagon, by Al Sweigart al@inventwithpython.com
Place the numbers 1 to 19 on spaces A through S such that all 15
horizontal and diagonal rows add up to 38. The unused numbers are
stored in the Z box until you place them.
We'll start the board with 3 and 17 placed.
''')
input('Press Enter to begin...')
# A large, multi-line string that acts as a template for the game board:
# You can copy/paste this from https://pastebin.com/raw/h9ufKzSz
boardTemplate = r"""Sum to 38: {29} {30} {31}
_ / _ / _ /
/ \/ / \/ / \/ {32}
/ \ / \ / \ / +-Space Map-+
| {0} | {1} | {2} |--/-----{19} | A B C |
/ \ / \ / \ / \/ {33} | D E F G |
/ \ / \ / \ / \ / | H I J K L |
| {3} | {4} | {5} | {6} |--/--{20} | M N O P |
/ \ / \ / \ / \ / \/ | Q R S |
/ \ / \ / \ / \ / \ +-----------+
| {7} | {8} | {9} | {10} | {11} |--{21}
\ / \ / \ / \ / \ / +-----Z-----+
\ / \ / \ / \ / \ /\ |{34} {35} {36} {37}|
| {12} | {13} | {14} | {15} |--\--{22} |{38} {39} {40} {41}|
\ / \ / \ / \ / \ |{42} {43} {44} {45}|
\ / \ / \ / \ /\ {24} |{46} {47} {48} {49}|
| {16} | {17} | {18} |--\-----{23} |{50} {51} {52} |
\ / \ / \ / \ +-----------+
\_/\ \_/\ \_/\ {25}
\ \ \
{28} {27} {26}"""
# The hex board starts off with 3 and 17 placed in A and B:
board = {}
for space in 'ABCDEFGHIJKLMNOPQRS':
board[space] = 0 # Set the space to blank (that is, 0).
board['A'] = 3 # Start with 3 in space A.
board['B'] = 17 # Start with 17 in space B.
# The unused numbers box starts with integers 1 to 19, except 3 and 17:
unusedNums = set()
for i in range(1, 20):
unusedNums.add(i)
unusedNums.remove(3)
unusedNums.remove(17)
while True: # Main game loop.
rowSums = {} # The keys are row numbers, value is the row's sum.
# ROW NUMBERING:
# 12 14
# 11 / 13/15
# / / / / /
# A B C-/-/--1
# D E F G-/---2
# H I J K L----3
# M N O P-\---4
# Q R S-\-6--5
# \ \ \ 7
# 10 9 8
# Calculate the sum for each of the 15 rows:
b = board # Syntactic sugar to have a shorter variable name.
rowSums[1] = b['A'] + b['B'] + b['C']
rowSums[2] = b['D'] + b['E'] + b['F'] + b['G']
rowSums[3] = b['H'] + b['I'] + b['J'] + b['K'] + b['L']
rowSums[4] = b['M'] + b['N'] + b['O'] + b['P']
rowSums[5] = b['Q'] + b['R'] + b['S']
rowSums[6] = b['C'] + b['G'] + b['L']
rowSums[7] = b['B'] + b['F'] + b['K'] + b['P']
rowSums[8] = b['A'] + b['E'] + b['J'] + b['O'] + b['S']
rowSums[9] = b['D'] + b['I'] + b['N'] + b['R']
rowSums[10] = b['H'] + b['M'] + b['Q']
rowSums[11] = b['A'] + b['D'] + b['H']
rowSums[12] = b['B'] + b['E'] + b['I'] + b['M']
rowSums[13] = b['C'] + b['F'] + b['J'] + b['N'] + b['Q']
rowSums[14] = b['G'] + b['K'] + b['O'] + b['R']
rowSums[15] = b['L'] + b['P'] + b['S']
# Prepare the arguments to use for the boardTemplate string:
templateArgs = []
# Indexes 0 to 18 of templateArgs are for the numbers 1 to 19:
for space in 'ABCDEFGHIJKLMNOPQRS':
if board[space] == 0:
templateArgs.append(' .')
else:
templateArgs.append(str(board[space]).rjust(2))
# Indexes 19 to 33 of templateArgs are for the row sums:
for rowNumber in range(1, 16):
templateArgs.append(str(rowSums[rowNumber]).rjust(2))
# Indexes 34 to 52 of templateArgs are for the unused numbers box:
for i in range(1, 20):
if i in unusedNums:
templateArgs.append(str(i).rjust(2))
else:
templateArgs.append(' .')
# Display the hex board:
print(boardTemplate.format(*templateArgs))
# Quit the program if all rows add up to 38:
isSolved = True
for i in range(1, 16): # Loop over all 15 rows.
if rowSums[i] != 38:
isSolved = False # Unsolved if at least one row isn't 38.
if isSolved:
print('You\'ve solved the puzzle! Hurray!')
break
# Get the selected space from the user:
while True:
print('Select a space A to S (or Z or QUIT): ')
response = input('> ').upper()
if response == 'QUIT':
print('Thanks for playing!')
sys.exit()
if response in tuple('ABCDEFGHIJKLMNOPQRSZ'):
selectedSpace = response
break
# Get the selected number from the user to put on the selected space:
while True:
print('Enter 1 to 19 for', selectedSpace, '(or "quit"):')
response = input('> ')
if response.lower().startswith('q'):
print('Thanks for playing!')
sys.exit()
if response.isdecimal() and (1 <= int(response) <= 19):
selectedNumber = int(response)
break
if selectedSpace == 'Z':
# Move the number to the unused numbers box:
unusedNums.add(selectedNumber)
for space in 'ABCDEFGHIJKLMNOPQRS':
if board[space] == selectedNumber:
board[space] = 0 # Set this space to blank.
elif selectedNumber in unusedNums:
# Move the number from the unused numbers box to the board:
numberAtOriginalSpace = board[selectedSpace]
board[selectedSpace] = selectedNumber # Put number on board.
unusedNums.remove(selectedNumber)
if numberAtOriginalSpace != 0:
unusedNums.add(numberAtOriginalSpace)
else:
# Since the number must already be on the board, do a swap to
# move it to the selected space:
for space in 'ABCDEFGHIJKLMNOPQRS':
if board[space] == selectedNumber:
spaceOfOriginalNumber = space
numberAtOriginalSpace = board[selectedSpace]
# Swap the two numbers on the board:
board[selectedSpace] = selectedNumber
board[spaceOfOriginalNumber] = numberAtOriginalSpace
| [
"asweigart@gmail.com"
] | asweigart@gmail.com |
15d5c55905fb974d4fd57d95c5db4742865c48fe | 75fa11b13ddab8fd987428376f5d9c42dff0ba44 | /metadata-ingestion/src/datahub/__init__.py | 3ac3efefc14f064f1ce41e1262d80b9e73fd2735 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"BSD-2-Clause",
"MIT"
] | permissive | RyanHolstien/datahub | 163d0ff6b4636919ed223ee63a27cba6db2d0156 | 8cf299aeb43fa95afb22fefbc7728117c727f0b3 | refs/heads/master | 2023-09-04T10:59:12.931758 | 2023-08-21T18:33:10 | 2023-08-21T18:33:10 | 246,685,891 | 0 | 0 | Apache-2.0 | 2021-02-16T23:48:05 | 2020-03-11T21:43:58 | TypeScript | UTF-8 | Python | false | false | 808 | py | import sys
import warnings
# Published at https://pypi.org/project/acryl-datahub/.
__package_name__ = "acryl-datahub"
__version__ = "0.0.0.dev0"
def is_dev_mode() -> bool:
return __version__.endswith("dev0")
def nice_version_name() -> str:
if is_dev_mode():
return "unavailable (installed in develop mode)"
return __version__
if sys.version_info < (3, 7):
warnings.warn(
"DataHub requires Python 3.7 or newer. "
"Please upgrade your Python version to continue using DataHub.",
FutureWarning,
stacklevel=2,
)
elif sys.version_info < (3, 8):
warnings.warn(
"DataHub will require Python 3.8 or newer soon. "
"Please upgrade your Python version to continue using DataHub.",
FutureWarning,
stacklevel=2,
)
| [
"noreply@github.com"
] | RyanHolstien.noreply@github.com |
d29d7ead3beeb69f4e7257874393395a45b514f2 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/335/usersdata/281/101497/submittedfiles/matriz1.py | d5be8f9b97f00c041645b6fbf32654da27cf0484 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 861 | py | # -*- coding: utf-8 -*-
n=int(input('Digite o número de linhas: '))
m=int(input('Digite o número de colunas: '))
matriz=[]
for i in range(0,n,1):
linha=[]
for j in range(0,m,1):
linha.append(int(input('Digite um binario[0,1]: ')))
matriz.append(linha)
matrizX=[]
for i in range (0,n,1):
for j in range (0,n,1):
if matriz[i][j]==1:
x=i
break
for i1 in range (n-1,-1,-1):
for j in range (0,m,1):
if matriz[i1][j]==1:
x1=i1
break
for j in range (0,m,1):
for i in range (0,n,1):
if matriz[i][j]==1:
y=j
break
for j1 in range (m-1,-1,-1):
for i in range (0,n,1):
if matriz [i][j1]==1:
y1=j1
break
for i in range (x1,x+1,1):
for j in range (y1,y+1,1):
matrizX.append(matriz[i][j])
print(matrizX)
| [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
524af3a185a13244b19e8fc20ee82ba5d0d201c2 | 587ca84f28c54892ca1fed2ef14774568c20076d | /2013/tp2/tp2ej8.py | ea28a71f4cd4dfd3181ac85c14ebd9ef791dd802 | [] | no_license | guillox/Practica_python | 4c6132a22387db8017d37347217e11091e05b5c9 | a446c944ea81668393597ddf478dafb53d942cb1 | refs/heads/master | 2021-01-10T20:01:12.653714 | 2015-04-11T20:56:05 | 2015-04-11T20:56:05 | 33,785,885 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 242 | py | """Módulos
8.- Escriba cada uno de los incisos del ejercicio 7.- (a, b y c) en módulos diferentes
(conjuntos.py, cola.py, parámetros_variables.py), impórtelos en un nuevo modulo
(principal.py) y utilice sus funciones en éste último""" | [
"guillox22@gmail.com"
] | guillox22@gmail.com |
a42072f98db7c8b0cc32e382058507c2e0302a68 | a303be0a547d717b0deb19b5bdcc75010e131b51 | /Contests/College Contests/Hacktivate/p5.py | 2f92ca59a3663d4202e3459033685ae3a656031b | [] | no_license | harrypotter0/competitive-programming | ff883c4dc5aa8d72f1af589bb654a422e32c8a38 | 82a8497e69212dc62e75af74b0d5a3b390b8aca2 | refs/heads/master | 2023-03-23T07:07:14.295053 | 2021-03-17T01:24:45 | 2021-03-17T01:24:45 | 70,964,689 | 16 | 9 | null | 2021-03-17T01:24:49 | 2016-10-15T03:52:53 | Python | UTF-8 | Python | false | false | 983 | py | import math
def readInts():
return list(map(int, raw_input().strip().split()))
def readInt():
return int(raw_input())
def readIntsindex0():
return list(map(lambda x: int(x) - 1, input().split()))
def readStrs():
return raw_input().split()
def readStr():
return raw_input()
a = [[1 for i in xrange(n+1)] for n in xrange(102)]
for i in xrange(1, 101):
for j in xrange(1, i+1):
a[i+1][j] = a[i][j-1] + a[i][j]
p = math.pow(10,9)+9
# print(p)
for _ in xrange(int(raw_input())):
n, g = map(int, raw_input().split())
s = 0
ans = [n for _ in xrange(g+1)]
for i in xrange(1, g+1):
ans[i] = (pow(n+1, i+1)) - 1
for j in range(2, i+2):
ans[i] -= ((a[i+1][j]*ans[i+1-j]))
ans[i] = (ans[i]/(i+1))
print (int((ans[g]-1-pow(n,g)) % p))
'''
Sample Input 0
1
4 2
Sample Output 0
13
Explanation 0
(4-1)2 + (4-2)2 = 32 + 22 = 13.
Sample Input 1
1
4 3
Sample Output 1
35
Explanation 1
(4-1)3 + (4-2)3 = 33 + 23 = 35
'''
| [
"9654263057akashkandpal@gmail.com"
] | 9654263057akashkandpal@gmail.com |
402302a71ae5831e01eb2d136420e4d4a3044e79 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/94/usersdata/149/50708/submittedfiles/mediaLista.py | e1979fe1810c9462bf8a0c5faee5e239056bf973 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,050 | py | # -*- coding: utf-8 -*-
from __future__ import division
n1=int(input('digite o n1:'))
n2=int(input('digite o n2:'))
n3=int(input('digite o n3:'))
n4=int(input('digite o n4:'))
n5=int(input('digite o n5:'))
n6=int(input('digite o n6:'))
s1=int(input('digite o s1:'))
s2=int(input('digite o s2:'))
s3=int(input('digite o s3:'))
s4=int(input('digite o s4:'))
s5=int(input('digite o s5:'))
s6=int(input('digite o s6:'))
cont=0
if n1==s1 or n1==s2 or n1==s3 or n1==s4 or n1==s5 or n1==s6:
cont=cont+1
if n2==s1 or n2==s2 or n2==s3 or n2==s4 or n2==s5 or n2==s6:
cont=cont+1
if n3==s1 or n3==s2 or n3==s3 or n3==s4 or n3==s5 or n3==s6:
cont=cont+1
if n4==s1 or n4==s2 or n4==s3 or n4==s4 or n4==s5 or n4==s6:
cont=cont+1
if n5==s1 or n5==s2 or n5==s3 or n5==s4 or n5==s5 or n5==s6:
cont=cont+1
if n6==s1 or n6==s2 or n6==s3 or n6==s4 or n6==s5 or n6==s6:
cont=cont+1
if contador==3:
print('3n')
elif contador==4:
print('4n')
elif contador==5:
print('5n')
elif contador==6:
print('6n')
elif contador<3:
print('n') | [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
d9747b4b212e3d0982c69a484b3317dda82cbe5a | 43598dd1e251b1733ed16981a148834bd9faca9b | /draw_util.py | 6fec3134df7e94dd8e44503a6b59f2ab3d985313 | [] | no_license | SamL98/PhysicsEngine | 86e6f38a34d7261c13cc76e78f2702e72c4c0c3b | 440e9042cc999277bbc1961cbb4b8f2300f28fde | refs/heads/master | 2020-03-23T18:51:45.246905 | 2018-07-22T22:50:14 | 2018-07-22T22:50:14 | 141,936,554 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,287 | py | import numpy as np
"""
Clear the given canvas
:param canvas: numpy array to clear (set to 1)
"""
def clear(canvas):
canvas[0:canvas.shape[0], 0:canvas.shape[1]] = 1
"""
Calculate the Euclidean distance between two points
:param p1: one point
:param p2: the other point
"""
def dist(p1, p2):
return np.sqrt((p1[0]-p2[0])**2 + (p1[1]-p2[1])**2)
"""
Return whether or not given bounding box is viewable in the canvas
:param bbox: the bounding box defining a shape
:param canvas_shape: the shape of the current canvas
"""
def is_viewable(bbox, canvas_shape):
pos = (int(bbox.origin[0]), int(bbox.origin[1]))
xInView = pos[0]<canvas_shape[0] and pos[0]+bbox.height/2>=0
yInView = pos[1]<canvas_shape[1] and pos[1]+bbox.width/2>=0
return xInView and yInView
"""
Return the bounding box of the part of the shape to draw
because some of the bounding box may be out of view.
:param bbox: the bounding box
:param canvas_shape: the shape of the current canvas
"""
def get_drawing_coords(bbox, canvas_shape):
pos = (int(bbox.origin[0]), int(bbox.origin[1]))
center = (int(bbox.center[0]), int(bbox.center[1]))
t = max(0, pos[0])
b = min(canvas_shape[0], center[0]+bbox.height//2)
l = max(0, pos[1])
r = min(canvas_shape[1], center[1]+bbox.width//2)
return center, t, l, b, r | [
"lerner98@gmail.com"
] | lerner98@gmail.com |
e315107d95eb655e7fd621bbcc8ec9c87809941f | 48f73b5b78da81c388d76d685ec47bb6387eefdd | /scrapeHackerrankCode/codes/itertools-permutations.py | 4caa82914dda972ada250151c6f329eb7f86fb9e | [] | no_license | abidkhan484/hacerrankScraping | ad0ceda6c86d321d98768b169d63ea1ee7ccd861 | 487bbf115117bd5c293298e77f15ae810a50b82d | refs/heads/master | 2021-09-18T19:27:52.173164 | 2018-07-18T12:12:51 | 2018-07-18T12:12:51 | 111,005,462 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 241 | py | # Accepted
# Python 3
from itertools import permutations
a, d = input().split()
d = int(d)
l = list(permutations(a, d))
le = len(l)
l.sort()
for i in range(le):
for j in range(d):
print(l[i][j], end='')
print()
| [
"abidkhan484@gmail.com"
] | abidkhan484@gmail.com |
b7304124b253d7826e8d3a6f2f00285712b9fa5a | 7ac271f357f4c8f0c23c697b11966259f836880f | /app/data/dvdrental/language.py | 5b8fd9d6e1d6871ba70b081f79a44f05a68f7c4c | [] | no_license | cheng93/PythonWeb | 74a58eadee4ee7d2872a582a907bbf47630df371 | d5ced8dee1d5ba31778125c5e67169c92acf26a0 | refs/heads/develop | 2021-01-19T23:59:11.315871 | 2018-03-04T19:26:18 | 2018-03-04T19:26:18 | 89,063,916 | 0 | 0 | null | 2018-03-04T19:26:19 | 2017-04-22T11:09:14 | Python | UTF-8 | Python | false | false | 405 | py | from app.data.dvdrental import Base
from sqlalchemy import Column, DateTime, Integer, String, text
class Language(Base):
__tablename__ = 'language'
language_id = Column(Integer, primary_key=True, server_default=text("nextval('language_language_id_seq'::regclass)"))
name = Column(String(20), nullable=False)
last_update = Column(DateTime, nullable=False, server_default=text("now()"))
| [
"derek.c@hotmail.co.uk"
] | derek.c@hotmail.co.uk |
325e68bb2e595b68fa254d9bcb3c01d2c7a19026 | 8a0bbb159a3d6a259a83224b8addc83c9da1986e | /lists/tests.py | fa5b816ece674b42b152c08c66577ea5f3886bc9 | [] | no_license | guinslym/tdd-django-tutorial | 5e976bcfe3a670b0b75c64646881c5a98214848e | 436d036e01527788e3f7b055f84ed159c822e8b4 | refs/heads/master | 2021-01-01T04:50:07.504959 | 2016-05-19T21:31:03 | 2016-05-19T21:31:03 | 59,229,057 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,178 | py | from django.test import TestCase
from django.http import HttpRequest
from django.template.loader import render_to_string
from lists.views import home_page
# Create your tests here.
class HomePageViewTest(TestCase):
def test_home_page_returns_correct_html(self):
request = HttpRequest()
response = home_page(request)
self.assertIn('<title>To-Do lists</title>', response.content.decode('utf8'))
self.assertTrue(response.content.startswith(b'<html>'))
#print(response.content)
self.assertTrue(response.content.strip().endswith(b'</html>'))
#expected_content = open('lists/templates/home.html').read()
expected_content = render_to_string('home.html')
self.assertEqual(response.content.decode('utf8'), expected_content)
def test_home_page_can_store_post_requests(self):
request = HttpRequest()
request.method = 'POST'
request.POST['item_text'] = 'new item'
response = home_page(request)
expected_content = render_to_string('home.html',
{'new_item_text': 'new item'})
self.assertEqual(response.content.strip().decode('utf8'), expected_content)
| [
"guinslym@users.noreply.github.com"
] | guinslym@users.noreply.github.com |
4345a1edf1a7d7ec88ebe84116eca2ce54549f00 | 6d6d012b940718afda0e16e3d993d4d8a25793c0 | /applications/suscribers/views.py | 8781a40a6e5c2b2d3fcab0b21217d3ffd61597be | [] | no_license | neunapp/ajaxdj | e66bad9ffd47664e6c0342439b363f97907f8465 | 7a0a29d61ebe0bf5496b235165aaa3d504320978 | refs/heads/master | 2022-09-10T21:26:28.370316 | 2020-06-02T18:05:46 | 2020-06-02T18:05:46 | 268,872,703 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 2,371 | py | #
from django.shortcuts import render
from django.http import JsonResponse
from django.views.generic import CreateView
from .models import Suscriptor
class SuscriptorCreateView(CreateView):
template_name = "add.html"
model = Suscriptor
fields = ('__all__')
success_url = '.'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context["suscripciones"] = Suscriptor.objects.all()
print('**************')
return context
def render_to_response(self, context, **response_kwargs):
""" """
if self.request.is_ajax():
print('Es un peticon ajax*********')
data = list(context["suscripciones"].values())
return JsonResponse({'suscriptores': data})
else:
response_kwargs.setdefault('content_type', self.content_type)
return self.response_class(
request=self.request,
template=self.get_template_names(),
context=context,
using=self.template_engine,
**response_kwargs
)
# cuando este proceso de la respuesta es repetitiva durante el proyecto
# podemos usar una de las mejores herramientas que ofrece Vistas basadas en clases
# que son los mixin, lo que nos ayuda a resumir aun mas las lineas de codigo
# ejemplo aqui abajo utilizano la misma vista
class AjaxaResponseMixin(object):
def render_to_response(self, context, **response_kwargs):
""" """
if self.request.is_ajax():
data = list(context['suscripciones'].values())
return JsonResponse({'suscriptores': data})
else:
response_kwargs.setdefault('content_type', self.content_type)
return self.response_class(
request=self.request,
template=self.get_template_names(),
context=context,
using=self.template_engine,
**response_kwargs
)
class SuscriptorCreateView2(AjaxaResponseMixin, CreateView):
template_name = "add.html"
model = Suscriptor
fields = ('__all__')
success_url = '.'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context["suscripciones"] = Suscriptor.objects.all()
return context | [
"csantacruz1127@gmail.com"
] | csantacruz1127@gmail.com |
7c068027cc97b79c1cc967c92cc93dd96840b64e | 6fa082e3d17a7899c9d27e7da1a8fabc04c7a3d5 | /tests/test_pkg.py | 2f2ef5665dce458d025dd9c35be502d00ce035b1 | [] | no_license | agx/whatmaps | 06ecd79e4d6090794b6c7d9bce6c9efaa16d12f9 | e28c99d3fc7c391de039321e87b5ce12eae38572 | refs/heads/master | 2023-01-09T12:52:38.546656 | 2022-12-30T12:09:19 | 2022-12-30T12:09:55 | 18,165,729 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,860 | py | # vim: set fileencoding=utf-8 :
# (C) 2014 Guido Günther <agx@sigxcpu.org>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Test L{whatmaps.process} config"""
import unittest
from mock import patch
from whatmaps.pkg import Pkg, PkgError
from . import context
class TestPkg(unittest.TestCase):
def setUp(self):
self.tmpdir = context.new_tmpdir(__name__)
def test_abstract(self):
"""Check abstract method signatures"""
self.assertIsNone(Pkg.type)
self.assertIsNone(Pkg.services)
def test_repr(self):
p = Pkg('apckage')
self.assertEqual(str(p), "<None Pkg object name:'apckage'>")
def test_list_contents(self):
with patch('subprocess.Popen') as mock:
p = Pkg('doesnotmatter')
p._list_contents = '/does/not/matter'
PopenMock = mock.return_value
PopenMock.communicate.return_value = [
b'/package/content',
b'/more/package/content',
]
PopenMock.returncode = 0
result = p._get_contents()
self.assertIn('/package/content', result)
self.assertNotIn('/more/package/content', result)
# We want to check that we don't invoke Popen on
# a second call so let it fail
PopenMock.returncode = 1
result = p._get_contents()
self.assertIn('/package/content', result)
self.assertNotIn('/more/package/content', result)
def test_shared_objects(self):
"""Test that we properly match shared objects"""
with patch('subprocess.Popen') as mock:
p = Pkg('doesnotmatter')
p._list_contents = '/does/not/matter'
PopenMock = mock.return_value
PopenMock.communicate.return_value = [b'\n'.join([
b'/lib/foo.so.1',
b'/lib/bar.so',
b'/not/a/shared/object',
b'/not/a/shared/object.soeither',
])]
PopenMock.returncode = 0
result = p.shared_objects
self.assertIn('/lib/foo.so.1', result)
self.assertIn('/lib/bar.so', result)
self.assertNotIn('/not/a/shred/object', result)
self.assertNotIn('/not/a/shred/object.soeither', result)
# We want to check that we don't invoke Popen on
# a second call so let it fail.
PopenMock.returncode = 1
result = p._get_contents()
self.assertIn('/lib/foo.so.1', result)
self.assertNotIn('/not/a/shred/object', result)
def test_shared_object_error(self):
"""Test that we raise PkgError"""
with patch('subprocess.Popen') as mock:
p = Pkg('doesnotmatter')
p._list_contents = '/does/not/matter'
PopenMock = mock.return_value
PopenMock.communicate.return_value = ['']
PopenMock.returncode = 1
try:
p.shared_objects
self.fail("PkgError exception not raised")
except PkgError:
pass
except Exception as e:
self.fail("Raised '%s is not PkgError" % e)
def tearDown(self):
context.teardown()
| [
"agx@sigxcpu.org"
] | agx@sigxcpu.org |
68236e56506ea6cc90c7d5ffe069d839068af442 | 0524f83be13e4b6cafd304acb002eca7a5d3e0f0 | /Matplotlib intro/Intro/save_image.py | 9be325c4df724ca1d6ac114720f5591d40271db3 | [] | no_license | LesediSekakatlela/python_projects | 25064f7a7cabd54f6d04aba31cdace6e8af06d63 | 6cca3cbddefa006a17f95486dbaa8a0b7de7fea5 | refs/heads/main | 2023-06-06T23:37:59.385280 | 2021-06-20T16:46:11 | 2021-06-20T16:46:11 | 371,608,246 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 168 | py | import matplotlib.pyplot as plt
import numpy as np
x = np.linspace(0,10,100)
y = x
plt.plot(x, y, label='linear')
plt.legend()
plt.savefig("Figure_1.png")
plt.show() | [
"leseditumelo32@gmail.com"
] | leseditumelo32@gmail.com |
39caf781a2fb28054c728bd71be1b09a045da442 | afc8d5a9b1c2dd476ea59a7211b455732806fdfd | /Configurations/ggH_SF/Full2017_HTXS_Stage1p2_v7/plot_blind.py | 3b79748cb261b57889418880386c716ea5d393b4 | [] | no_license | latinos/PlotsConfigurations | 6d88a5ad828dde4a7f45c68765081ed182fcda21 | 02417839021e2112e740607b0fb78e09b58c930f | refs/heads/master | 2023-08-18T20:39:31.954943 | 2023-08-18T09:23:34 | 2023-08-18T09:23:34 | 39,819,875 | 10 | 63 | null | 2023-08-10T14:08:04 | 2015-07-28T07:36:50 | Python | UTF-8 | Python | false | false | 3,342 | py | # plot configuration
# groupPlot = {}
#
# Groups of samples to improve the plots.
# If not defined, normal plots is used
#
groupPlot['top'] = {
'nameHR' : 'tW and t#bar{t}',
'isSignal' : 0,
'color': 400, # kYellow
'samples' : ['top']
}
groupPlot['WW'] = {
'nameHR' : 'WW',
'isSignal' : 0,
'color': 851, # kAzure -9
'samples' : ['WW', 'ggWW', 'WWewk']
}
groupPlot['Fake'] = {
'nameHR' : 'nonprompt',
'isSignal' : 0,
'color': 921, # kGray + 1
'samples' : ['Fake_mm', 'Fake_ee']
}
groupPlot['DY'] = {
'nameHR' : "DY",
'isSignal' : 0,
'color': 418, # kGreen+2
'samples' : ['DY']
}
groupPlot['VVV'] = {
'nameHR' : 'VVV',
'isSignal' : 0,
'color': 857, # kAzure -3
'samples' : ['VVV']
}
groupPlot['VZ'] = {
'nameHR' : "VZ",
'isSignal' : 0,
'color' : 617, # kViolet + 1
'samples' : ['VZ', 'WZ', 'ZZ']
}
groupPlot['Vg'] = {
'nameHR' : "V#gamma",
'isSignal' : 0,
'color' : 810, # kOrange + 10
'samples' : ['Vg', 'Wg']
}
groupPlot['VgS'] = {
'nameHR' : "V#gamma*",
'isSignal' : 0,
'color' : 409, # kGreen - 9
'samples' : ['VgS_H','VgS_L']
}
print signals
groupPlot['Higgs'] = {
'nameHR' : 'Higgs',
'isSignal' : 1,
'color': 632, # kRed
'samples' : signals,
}
#plot = {}
# keys here must match keys in samples.py
#
plot['DY'] = {
'color': 418, # kGreen+2
'isSignal' : 0,
'isData' : 0,
'scale' : 1.0,
}
plot['Fake_mm'] = {
'color': 921, # kGray + 1
'isSignal' : 0,
'isData' : 0,
'scale' : 1.0
}
plot['Fake_ee'] = {
'color': 921, # kGray + 1
'isSignal' : 0,
'isData' : 0,
'scale' : 1.0
}
plot['top'] = {
'nameHR' : 'tW and t#bar{t}',
'color': 400, # kYellow
'isSignal' : 0,
'isData' : 0,
'scale' : 1.0,
}
plot['WW'] = {
'color': 851, # kAzure -9
'isSignal' : 0,
'isData' : 0,
'scale' : 1.0
}
plot['ggWW'] = {
'color': 850, # kAzure -10
'isSignal' : 0,
'isData' : 0,
'scale' : 1.0
}
plot['WWewk'] = {
'color': 851, # kAzure -9
'isSignal' : 0,
'isData' : 0,
'scale' : 1.0
}
plot['Vg'] = {
'color': 859, # kAzure -1
'isSignal' : 0,
'isData' : 0,
'scale' : 1.0
}
plot['VgS_H'] = {
'color' : 617, # kViolet + 1
'isSignal' : 0,
'isData' : 0,
'scale' : 1.0
}
plot['VgS_L'] = {
'color' : 617, # kViolet + 1
'isSignal' : 0,
'isData' : 0,
'scale' : 1.0
}
plot['VZ'] = {
'color': 858, # kAzure -2
'isSignal' : 0,
'isData' : 0,
'scale' : 1.0
}
plot['VVV'] = {
'color': 857, # kAzure -3
'isSignal' : 0,
'isData' : 0,
'scale' : 1.0
}
# HWW
for signal in signals:
plot[signal] = {
'nameHR' : signal,
'color': 632, # kRed
'isSignal' : 1,
'isData' : 0,
'scale' : 1
}
# data
plot['DATA'] = {
'nameHR' : 'Data',
'color': 1 ,
'isSignal' : 0,
'isData' : 1 ,
'isBlind' : 1
}
# additional options
legend['lumi'] = 'L = 41.5/fb'
legend['sqrt'] = '#sqrt{s} = 13 TeV'
| [
"nicolo.trevisani@cern.ch"
] | nicolo.trevisani@cern.ch |
9085c6988eab04035dae166b01927ae787a7b454 | b76daa106277ef2f7ab7f6e3278546c6da0bb967 | /base/sys_argv/code/pathdir.py | dd170c13326a1de5d44bc7bd2c02d908e1a062ac | [] | no_license | DyLanCao/ipython | d071b4659999062106438ec077d27754a711ef92 | 746e070d193de04002d277e5170ddf8b5d9d4d44 | refs/heads/master | 2021-06-12T19:31:44.325346 | 2021-02-20T03:17:58 | 2021-02-20T03:17:58 | 142,657,284 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,196 | py | import sys,os
try:
import parameters,helpAbout,autoUpdate
from Combobox import ComboBox
except ImportError:
from COMTool import parameters,helpAbout,autoUpdate
from COMTool.Combobox import ComboBox
from PyQt5.QtGui import QIcon,QFont,QTextCursor,QPixmap
from PyQt5.QtWidgets import (QApplication, QWidget,QToolTip,QPushButton,QMessageBox,QDesktopWidget,QMainWindow,
QVBoxLayout,QHBoxLayout,QGridLayout,QTextEdit,QLabel,QRadioButton,QCheckBox,
QLineEdit,QGroupBox,QSplitter,QFileDialog)
class MainWindow(QMainWindow):
DataPath = "./"
def __init__(self,app):
pathDirList = sys.argv[0].replace("\\", "/").split("/")
self.DataPath = os.path.abspath("/".join(str(i) for i in pathDirList))
pathDirList.pop()
if not os.path.exists(self.DataPath + "/" + parameters.strDataDirName):
pathDirList.pop()
self.DataPath = os.path.abspath("/".join(str(i) for i in pathDirList))
self.DataPath = (self.DataPath + "/" + parameters.strDataDirName).replace("\\", "/")
def main():
app = QApplication(sys.argv)
mainWindow = MainWindow(app)
print("pathdir:",mainWindow.DataPath)
if __name__ == "__main__":
main()
| [
"caoyin2011@163.com"
] | caoyin2011@163.com |
58e33bc192fdc4c023cd05a9cc2cfed05f2900d9 | e055690de408c8e0a6ca97b43912b1482528b98c | /src/features.py | c97f0c9ed48cdb2b95deea0e4c84f27eee76c013 | [] | no_license | webclinic017/Dissertation-1 | 88155419cfa6b9d8a1834fadecdadda5c22498db | 1e118c0697f0785dc2db30e46c26af154b269813 | refs/heads/master | 2021-09-08T06:37:21.633294 | 2016-11-29T11:07:02 | 2016-11-29T11:07:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,423 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Project: Dissertation
# File name: features
# Author: Mark Wang
# Date: 13/6/2016
from StockInference.constant import Constants
const = Constants()
features = {
const.PRICE_TYPE: const.STOCK_ADJUSTED_CLOSED,
const.STOCK_PRICE: {const.DATA_PERIOD: 5},
const.STOCK_INDICATOR: [
(const.MACD, {
const.MACD_FAST_PERIOD: 12,
const.MACD_SLOW_PERIOD: 26,
const.MACD_TIME_PERIOD: 9
}),
(const.MACD, {
const.MACD_FAST_PERIOD: 7,
const.MACD_SLOW_PERIOD: 14,
const.MACD_TIME_PERIOD: 9
}),
(const.SMA, 3),
(const.SMA, 13),
(const.SMA, 21),
(const.EMA, 5),
(const.EMA, 13),
(const.EMA, 21),
(const.ROC, 13),
(const.ROC, 21),
(const.RSI, 9),
(const.RSI, 14),
(const.RSI, 21),
],
const.FUNDAMENTAL_ANALYSIS: [
const.US10Y_BOND,
const.US30Y_BOND,
const.FXI,
# const.IC,
# const.IA, # comment this two because this two bond is a little newer
const.HSI,
{const.FROM: const.USD, const.TO: const.HKD},
{const.FROM: const.EUR, const.TO: const.HKD},
# {const.FROM: const.AUD, const.TO: const.HKD},
const.ONE_YEAR,
const.HALF_YEAR,
const.OVER_NIGHT,
const.GOLDEN_PRICE,
]
} | [
"wangyouan0629@hotmail.com"
] | wangyouan0629@hotmail.com |
cdd0665697b42f4cc95cda7d1404b2f0b64c2720 | 6a185868a6a41384f44334b23fea9079a2a35ded | /Algorithm/04_선형리스트-통합.py | 02694336285a5e96ba66bece4e669bab44d1cfb7 | [] | no_license | kimhyeongju/coding_practice | ec096e574877a4d21babdc0162d96a9c75ee5686 | 599b3ecf3100622e165abfc54c0ad90a270ccb51 | refs/heads/master | 2023-03-25T21:34:34.893277 | 2021-03-19T17:49:54 | 2021-03-19T17:49:54 | 296,621,620 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,344 | py | # 함수 선언부, 클래스 선언부
def insert_data(position, friend):
katok.append(None)
kLen = len(katok)
for i in range(kLen-1, position, -1):
katok[i] = katok[i-1]
katok[i-1] = None
katok[position] = friend
def delete_data(position):
kLen = len(katok)
katok[position] = None
for i in range(position+1,kLen):
katok[i-1] = katok[i]
katok[i] = None
del(katok[kLen-1])
def add_data(friend):
katok.append(None)
kLen = len(katok)
katok[kLen-1] = friend
# 전역 변수부
katok = []
select = -1 # 1.추가 2.삽입 3.삭제 4.종료
# 메인 코드부
if __name__ == '__main__':
while(select != 4):
select = int(input('선택(1.추가 2.삽입 3.삭제 4.종료) --> '))
if select == 1:
data = input('추가할 데이터 --> ')
add_data(data)
print(katok)
elif select == 2:
pos = int(input('삽입할 위치 --> '))
data = input('추가할 데이터 --> ')
insert_data(pos, data)
print(katok)
elif select == 3:
pos = int(input('삭제할 위치 --> '))
delete_data(pos)
print(katok)
elif select ==4:
exit()
else:
print("잘못 입력")
exit()
| [
"hengzhu1994@gmail.com"
] | hengzhu1994@gmail.com |
824ca23d6252d8aaf3b2c8307a5abfaebb3ea24f | d41d18d3ea6edd2ec478b500386375a8693f1392 | /plotly/validators/streamtube/hoverlabel/font/_family.py | 335e3ef0531d4b5f69975fdfd5c2903dd7c3d021 | [
"MIT"
] | permissive | miladrux/plotly.py | 38921dd6618650d03be9891d6078e771ffccc99a | dbb79e43e2cc6c5762251537d24bad1dab930fff | refs/heads/master | 2020-03-27T01:46:57.497871 | 2018-08-20T22:37:38 | 2018-08-20T22:37:38 | 145,742,203 | 1 | 0 | MIT | 2018-08-22T17:37:07 | 2018-08-22T17:37:07 | null | UTF-8 | Python | false | false | 531 | py | import _plotly_utils.basevalidators
class FamilyValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(
self,
plotly_name='family',
parent_name='streamtube.hoverlabel.font',
**kwargs
):
super(FamilyValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=True,
edit_type='none',
no_blank=True,
role='style',
strict=True,
**kwargs
)
| [
"noreply@github.com"
] | miladrux.noreply@github.com |
fced2509d1f5b132439534f2f9d67b73070b25b2 | 399fb29d8525b6d7ac298783675d0d56e37bcac7 | /python/ray/autoscaler/aws/tests/download_ssh_key.py | b3eb8b0a3c313b5155af259b1f0175c4494d6a84 | [
"MIT",
"BSD-3-Clause",
"Apache-2.0"
] | permissive | AmeerHajAli/ray | 40c9aebe0da59e9bcd70303d981bfe6b65007991 | 1ffd032f5f793d8817217a040f0f636f9372cd56 | refs/heads/master | 2023-03-28T10:50:09.186561 | 2023-03-24T23:08:08 | 2023-03-24T23:08:08 | 175,129,851 | 1 | 0 | Apache-2.0 | 2019-03-12T03:39:16 | 2019-03-12T03:39:14 | null | UTF-8 | Python | false | false | 512 | py | import os
import boto3
# Create a Boto3 client to interact with S3
s3_client = boto3.client("s3", region_name="us-west-2")
# Set the name of the S3 bucket and the key to download
bucket_name = "oss-release-test-ssh-keys"
key_name = "ray-autoscaler_59_us-west-2.pem"
# Download the key from the S3 bucket to a local file
local_key_path = os.path.expanduser(f"~/.ssh/{key_name}")
s3_client.download_file(bucket_name, key_name, local_key_path)
# Set permissions on the key file
os.chmod(local_key_path, 0o400)
| [
"noreply@github.com"
] | AmeerHajAli.noreply@github.com |
ad0feb58c540da01689ddae3f367adda20b6db35 | 99589f73e394567a3656ccf287003ae89ad5c83e | /MDustbinButton.py | e0a146c06f2553d193119d1a96c0aa258f164385 | [] | no_license | YanruMu-sunding/pyqtwidget | f0c9f33a7d040ace979f9b050ebde8a96fceae2e | 85aaac30e7e63a9494e8bc022c49bf5a6a01e251 | refs/heads/master | 2022-03-01T11:06:00.386250 | 2015-01-23T09:42:21 | 2015-01-23T09:42:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,292 | py | # -*- coding: utf-8 -*-
###################################################################
# Author: Mu yanru
# Date : 2013.09
# Email : muyanru345@163.com
###################################################################
try:
from PySide.QtCore import *
from PySide.QtGui import *
except ImportError:
import sip
sip.setapi("QString", 2)
sip.setapi("QVariant", 2)
from PyQt4.QtCore import *
from PyQt4.QtGui import *
'''
Class Name: MDustbinButton
Type : QToolButton
Public Method:
void setSize(int)
void setData(int)
int data()
Public Signal:
void sigClicked(int)
'''
class MDustbinButton(QToolButton):
def __init__(self, data = 0, parent = None):
super(MDustbinButton, self).__init__(parent)
self.setData(data)
self.setToolTip('Remove')
self.setAutoRaise(True)
self.setIcon(QIcon('./images/dustbin.png'))
self.connect(self, SIGNAL('clicked()'), self.slotEmitDelete)
def slotEmitDelete(self):
self.emit(SIGNAL('sigClicked(int)'), self.stateData)
def setData(self, data):
self.stateData = data
def data(self):
return self.stateData
def setSize(self, w):
self.setFixedSize(QSize(w, w))
self.setIconSize(QSize(w-1, w-1))
| [
"muyanru345@163.com"
] | muyanru345@163.com |
b32bbeb3daea896285047e64886179a4f2fef98c | 702b8b109bf4b2235de7442a46fbf22288a5ff24 | /forums/Algorithms.py | 49fd08441901f3e4daa6dd53394388e1f289ecc2 | [] | no_license | jakhar1996/neuron-task | 3eac554088c6b5ae81c9c9b532e8537086875347 | 5dd62fcb03acd81a31c73ad2354dccbfecfe7b4c | refs/heads/master | 2021-01-19T18:53:43.901904 | 2016-10-10T20:11:44 | 2016-10-10T20:11:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,187 | py | '''
Algorithms for calculating score of Posts, Comments and Voting
reference :
1. https://github.com/reddit/reddit/blob/master/r2/r2/lib/db/_sorts.pyx
2. https://medium.com/hacking-and-gonzo/how-reddit-ranking-algorithms-work-ef111e33d0d9
'''
from math import *
import datetime
epoch = datetime.datetime(1970,1,1)
def sign(a):
if a == 0 : return 0
return a/abs(a)
def voteCount(p,n):
#Used in initial implementation for vote counting
p = int(p)
n = int(n)
return abs(p)*(n-p) + (1-abs(p))*n
def epoch_seconds(date):
td = date-epoch
return td.days * 86400 + td.seconds + (float(td.microseconds)/1000000)
def hot(score,date):
#Algorithm for sorting featured posts
order = log(max(abs(score),1),10)
sign = 1 if score > 0 else -1 if score < 0 else 0
seconds = epoch_seconds(date) - 1134028003
a = round(sign * order + seconds/45000,7)
return a
def zeero(a):
a.score = 0
a.vote_count = 0
return a
def confidence(ups,downs):
'''
Algorithm for sorting comments
'''
n = ups + downs
if n == 0:
return 0
z = 1.281551565545
p = float(ups) / n
left = p + 1/(2*n)*z*z
right = z*sqrt(p*(1-p)/n + z*z/(4*n*n))
under = 1 + 1/n*z*z
return (left - right) / under
| [
"shikhar.chauhan@live.com"
] | shikhar.chauhan@live.com |
5eebbda08df3e41b17377b37668d53d24995eef6 | 42a0760a051935b2e765d57c445235221a28f49e | /509_Fibonacci_Number.py | ddfee4f254fd8b67428fa44385ed8f01eb920e51 | [] | no_license | Th3Lourde/l33tcode | 3bea3a3e7c633a2d5a36f7d76d5d776d275d8ee3 | eb6b11f97a022b66716cb3890cc56c58f62e8aa4 | refs/heads/master | 2022-12-22T19:05:04.384645 | 2022-12-18T19:38:46 | 2022-12-18T19:38:46 | 232,450,369 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 622 | py |
class Solution:
def fib_1(self, N):
if N == 0:
return 0
if N == 1 or N == 2:
return 1
elif N >= 2:
return self.fib(N-1) + self.fib(N-2)
def fib(self, N, mem):
if mem[N] != None:
result = mem[N]
elif N == 1 or N == 2:
result = 1
elif N >= 2:
result = self.fib(N-1, mem) + self.fib(N-2, mem)
mem[N] = result
return result
if __name__ == '__main__':
s = Solution()
n = 4
mem = [None] * (n+1)
# print(mem)
# print(s.fib(4, mem))
# print(mem)
| [
"th3sylvia.lourde@gmail.com"
] | th3sylvia.lourde@gmail.com |
c808cacd4d12136e61ae70443e057d83bfff00a2 | ae85cd400fa71296867c9e55297affa2d3679b5d | /hashmaps/count_pairs.py | 975bf328e09c9882d3f5eb6ca6976298bde1852a | [] | no_license | Psycadelik/sifu | a1e751aa4e97cd56431cdf8704304b82943db37c | 72965f694f7a44aa8711d11934b216d5ccf9d280 | refs/heads/master | 2023-04-12T17:07:00.677702 | 2021-05-07T07:30:14 | 2021-05-07T07:30:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,077 | py | """
Count of index pairs with equal elements in an array
Given an array of n elements.
The task is to count the total number of indices (i, j) such that arr[i] = arr[j] and i != j
Examples :
Input : arr[] = {1, 1, 2}: -> 1
As arr[0] = arr[1], the pair of indices is (0, 1)
Input : arr[] = {1, 1, 1} -> 3
As arr[0] = arr[1], the pair of indices is (0, 1),
(0, 2) and (1, 2)
Input : arr[] = {1, 2, 3} -> 0
"""
def countPairs(arr):
n = len(arr)
mapping = {}
ans = 0
# Finding frequency of each number.
for num in arr:
mapping[num] = mapping.get(num, 0) + 1
# Calculating pairs of each value.
for k, v in mapping.items():
ans += (v * (v - 1)) // 2
return ans
def countPairs3(arr):
def no_of_repeats(n):
if n < 2:
return 0
return n-1 + no_of_repeats(n-1)
freqs = [arr.count(i) for i in list(set(arr))]
res = sum([no_of_repeats(i) for i in freqs])
return res
arr = [1, 1, 2]
arr1 = [1, 1, 1, 3, 3, 4, 1]
# print(countPairs(arr))
print(countPairs(arr1))
print(countPairs3(arr1))
| [
"erickmwazonga@gmail.com"
] | erickmwazonga@gmail.com |
f58deb9eda7f55877a7c900911bed9917c40e100 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03945/s876347603.py | 59b6606399e20fd6e84e359c750c5ad89b645501 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 258 | py | def myAnswer(S:str) -> int:
ans = 0
pre = S[0]
for s in S[1:]:
if (pre != s):
pre = s
ans += 1
return ans
def modelAnswer():
return
def main():
S = input()
print(myAnswer(S))
if __name__ == '__main__':
main() | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
c2c901b79b059b6cb996273d372de8316f4ccc9d | 6fe990986efe7f06f8a6eafd01fd4eb042c7f8b3 | /portfolio/urls.py | 0c2f3ffe78cf6dcf42512e15b6327e5e52b8623a | [] | no_license | StillsSma/portfolio | e436c82b2fc5639d38b88375b85e73c40bfd9985 | 856047f9ea38d812dda630815e7a7bf6cf63c798 | refs/heads/master | 2021-01-13T04:12:05.110398 | 2017-04-12T22:26:05 | 2017-04-12T22:26:05 | 77,696,082 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 355 | py |
from django.conf.urls import url
from django.contrib import admin
from app.views import IndexView
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^$', IndexView.as_view(), name="index_view"),
] + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
| [
"samdawson301@live.com"
] | samdawson301@live.com |
8b3c268cb508e5b629740a4b9b56f8e830592352 | 48b79c2d813cc89c227e36b83a5508acdf9657bd | /udemy/ecommerce/cfehome/forms.py | c3565e95ab0f4222b594c4f78ad177017e9035d3 | [] | no_license | felipe-basina/python | b1c8b980ac201241f06e79c0f5f05ee2528b9960 | bb739f9b57b1947010f831826fd7c65b2a3b85cf | refs/heads/master | 2022-09-23T20:59:34.080521 | 2020-12-19T16:37:21 | 2020-12-19T16:37:21 | 101,874,996 | 1 | 0 | null | 2022-09-16T17:42:00 | 2017-08-30T11:49:08 | Python | UTF-8 | Python | false | false | 1,212 | py | from django import forms
from django.contrib.auth import get_user_model
User = get_user_model()
class ContactForm(forms.Form):
fullname = forms.CharField(
widget=forms.TextInput(
attrs={
"class": "form-control",
"id": "form_full_name",
"placeholder": "Your full name"}
)
)
email = forms.EmailField(
widget=forms.EmailInput(
attrs={
"class": "form-control",
"id": "form_email",
"placeholder": "Your email"}
)
)
content = forms.CharField(
widget=forms.Textarea(
attrs={
"class": "form-control",
"id": "form_content",
"placeholder": "Your content"}
)
)
def clean_email(self):
email = self.cleaned_data.get("email")
if not "gmail.com" in email:
raise forms.ValidationError("Email has to be gmail.com")
return email
| [
"felipe.basina@gmail.com"
] | felipe.basina@gmail.com |
7493e37819689409b4590fc5a592b68f51559ce8 | f3bd271bf00325881fb5b2533b9ef7f7448a75ec | /xcp2k/classes/_each346.py | 52567e9a128698e79a0ca91ffd3ecde84ab173fd | [] | no_license | obaica/xcp2k | 7f99fc9d494859e16b9b0ea8e217b0493f4b2f59 | 6e15c2c95658f545102595dc1783f5e03a9e6916 | refs/heads/master | 2020-07-15T17:27:43.378835 | 2019-02-11T16:32:24 | 2019-02-11T16:32:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,113 | py | from xcp2k.inputsection import InputSection
class _each346(InputSection):
def __init__(self):
InputSection.__init__(self)
self.Just_energy = None
self.Powell_opt = None
self.Qs_scf = None
self.Xas_scf = None
self.Md = None
self.Pint = None
self.Metadynamics = None
self.Geo_opt = None
self.Rot_opt = None
self.Cell_opt = None
self.Band = None
self.Ep_lin_solver = None
self.Spline_find_coeffs = None
self.Replica_eval = None
self.Bsse = None
self.Shell_opt = None
self.Tddft_scf = None
self._name = "EACH"
self._keywords = {'Md': 'MD', 'Bsse': 'BSSE', 'Powell_opt': 'POWELL_OPT', 'Replica_eval': 'REPLICA_EVAL', 'Just_energy': 'JUST_ENERGY', 'Metadynamics': 'METADYNAMICS', 'Geo_opt': 'GEO_OPT', 'Band': 'BAND', 'Xas_scf': 'XAS_SCF', 'Rot_opt': 'ROT_OPT', 'Spline_find_coeffs': 'SPLINE_FIND_COEFFS', 'Shell_opt': 'SHELL_OPT', 'Cell_opt': 'CELL_OPT', 'Qs_scf': 'QS_SCF', 'Ep_lin_solver': 'EP_LIN_SOLVER', 'Tddft_scf': 'TDDFT_SCF', 'Pint': 'PINT'}
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
90888ddfffffef2ccb8da651c3573ecdba4d388a | 93c6cdca36f79e7ccb4e100c048fa4d44ed3d937 | /day06/day06-04.py | f66c94a45471d3bc70c823de6493a93a1b97f9fb | [] | no_license | parkwisdom/Python-Study-step3 | eab66f0afd10ebdaadb167dddec245ab6115d859 | 4c51b6d9959f93e52e8896d9c404c10c64bc8ea8 | refs/heads/master | 2020-04-03T13:51:33.369220 | 2018-10-30T00:55:02 | 2018-10-30T00:55:02 | 155,301,544 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,988 | py | from tkinter import *
from tkinter.simpledialog import *
from tkinter.filedialog import *
def drawSheet(cList) :
global cellList
if cellList != None :
for row in cellList:
for col in row:
col.destroy()
rowNum = len(cList)
colNum = len(cList[0])
cellList = []
# 빈 시트 만들기
for i in range(0, rowNum):
tmpList = []
for k in range(0, colNum):
ent = Entry(window, text='')
tmpList.append(ent)
ent.grid(row=i, column=k)
cellList.append(tmpList)
# 시트에 리스트값 채우기. (= 각 엔트리에 값 넣기)
for i in range(0, rowNum):
for k in range(0, colNum):
cellList[i][k].insert(0, cList[i][k])
def openCSV() :
global csvList
csvList = []
input_file = askopenfilename(parent=window,
filetypes=(("CSV파일", "*.csv"), ("모든파일", "*.*")))
filereader = open(input_file, 'r', newline='')
header = filereader.readline()
header = header.strip() # 앞뒤 공백제거
header_list = header.split(',')
csvList.append(header_list)
for row in filereader: # 모든행은 row에 넣고 돌리기.
row = row.strip()
row_list = row.split(',')
csvList.append(row_list)
drawSheet(csvList)
filereader.close()
def saveCSV() :
global csvList
if csvList == [] :
return
saveFp = asksaveasfile(parent=window, mode='w', defaultextension='.csv',
filetypes=(("CSV파일", "*.csv"), ("모든파일", "*.*")))
filewriter = open(saveFp.name, 'w', newline='')
for row_list in csvList :
row_str = ','.join(map(str, row_list))
filewriter.writelines(row_str + '\n')
filewriter.close()
def csvData01() :
csvList=[]
global csvList
input_file = "d:\\pydata\\csv\\supplier_data.csv"
filereader = open(input_file, 'r', newline='')
header = filereader.readline()
header = header.strip() # 앞뒤 공백제거
header_list = header.split(',')
# part Number, Purchase Date
idx1 = 0
for h in header_list:
if h.strip().upper() == 'part Number'.strip().upper():
break
idx1 += 1
idx2 = 0
for h in header_list:
if h.strip().upper() == 'Purchase Date'.strip().upper():
break
idx2 += 1
if idx1 > idx2:
idx1, idx2 = idx2, idx1
del (header_list[idx2])
del (header_list[idx1])
csvList.append(header_list)
for row in filereader: # 모든행은 row에 넣고 돌리기.
row = row.strip()
row_list = row.split(',')
del (row_list[idx2])
del (row_list[idx1])
if row_list[0] == 'Supplier Y':
continue
cost = float(row_list[2][1:])
cost *= 1.5
cost = int(cost / 100) * 100
cost_str = "${0:.2f}".format(cost)
row_list[2] = cost_str
csvList.append(row_list)
drawSheet(csvList)
filereader.close()
def csvData02():
global csvList
csvList=[]
import csv
input_file='d:\pydata\csv\supplier_data.csv'
filereader= open(input_file,'r',newline='')
csvReader = csv.reader(filereader)
header_list = next(csvReader)
csvList.append(header_list)
for row_list in csvReader:
csvList.append(row_list)
drawSheet(csvList)
filereader.close()
pass
## 전역 변수 ##
csvList, cellList = [], []
## 메인 코드 ##
window = Tk()
mainMenu = Menu(window)
window.config(menu=mainMenu)
fileMenu = Menu(mainMenu)
mainMenu.add_cascade(label='파일', menu=fileMenu)
fileMenu.add_command(label='CSV 열기', command=openCSV)
fileMenu.add_command(label='CSV 저장', command=saveCSV)
csvMenu = Menu(mainMenu)
mainMenu.add_cascade(label='CSV 데이터 분석', menu=csvMenu)
csvMenu.add_command(label='특정 열,행 제거', command=csvData01)
csvMenu.add_command(label='특정 열,행 제거', command=csvData02)
window.mainloop() | [
"43980901+parkwisdom@users.noreply.github.com"
] | 43980901+parkwisdom@users.noreply.github.com |
d6468a91d9c1cd79c7e894a0df202c4743dc1841 | 13edd8f1bc3b86fd881f85fbeafe94811392d7fc | /seventh_module/CRM/38.CRM开发之公户基本管理/luffy_crm/web/views/public_customer.py | 9a98ecccb0e156c8735277ebf26e985092295e90 | [] | no_license | ryan-yang-2049/oldboy_python_study | f4c90c9d8aac499e1d810a797ab368217f664bb1 | 6e1ab7f217d9bf9aa7801266dee7ab4d7a602b9f | refs/heads/master | 2022-07-22T23:49:28.520668 | 2019-06-11T13:26:25 | 2019-06-11T13:26:25 | 129,877,980 | 0 | 1 | null | 2022-07-18T17:12:54 | 2018-04-17T09:12:48 | HTML | UTF-8 | Python | false | false | 712 | py | # -*- coding: utf-8 -*-
"""
__title__ = 'public_customer.py'
__author__ = 'yangyang'
__mtime__ = '2019-02-21'
"""
from stark.service.v1 import StarkHandler,StarkModelForm,get_choice_text,get_m2m_text
from web import models
class PublicCustomerModelForm(StarkModelForm):
class Meta:
model =models.Customer
exclude = ['consultant',]
class PublicCustomerHandler(StarkHandler):
list_display = ['name','qq',get_choice_text('状态','status'),get_choice_text('性别','gender'),
get_m2m_text('咨询的课程','course')]
def get_queryset(self, request, *args, **kwargs):
return self.model_class.objects.filter(consultant__isnull=True)
model_form_class = PublicCustomerModelForm
| [
"11066986@qq.com"
] | 11066986@qq.com |
5ac4b682d2dfdc3e080813ab4a70bff3d7d7351e | 7933d55de7e2d3a9e78a372fa76f064f5ed5eb6f | /maths/questions/antiderivative.py | 56e2c6794eb07b71afc2e8541f40caf39c5e0963 | [] | no_license | o2edu/MathsExams | 4921f6683e1d6d96aa834d5b01f30bd66522887d | 8e2c0aeba6bbad52103c420747ead1dad6380408 | refs/heads/master | 2021-05-29T17:42:32.442055 | 2014-05-31T10:18:12 | 2014-05-31T10:18:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,603 | py | import sympy
import random
from .. import all_functions, not_named_yet
from ..latex import solutions
from ..symbols import x0
from . import relationships
@relationships.root
class Antiderivative(relationships.QuestionPart):
"""
Question description
====================
Take a simple expression and find its antiderivative.
Real-life instances
===================
2009 2a: y = 1 / (1 - 2x) [4 lines] [2 marks]
2010 2a: y = cos(2x + 1) [2 lines] [1 mark]
2011 2a: y = 1 / (3x - 4) [5 lines] [1 mark]
2012 2: y = 1 / (2x - 1)^3 [4 lines] [2 marks]
there is no correlation between function type and marks assigned, so we have to choose between
the historic 1 or 2 marks this question has been assigned
"""
def __init__(self):
self.num_marks = random.randint(1, 2)
self._qp = {}
self._qp['function_type'] = random.choice(['linear', 'trig'])
inner_function = all_functions.request_linear(difficulty=3).equation
if self._qp['function_type'] == 'linear':
self.num_lines = 4
index = random.randint(1, 3)
self._qp['equation'] = 1 / inner_function ** index
elif self._qp['function_type'] == 'trig':
self.num_lines = 2
outer_function = random.choice([sympy.cos, sympy.sin])
self._qp['equation'] = outer_function(inner_function)
self._qp['antiderivative'] = self._qp['equation'].integrate()
def question_statement(self):
return 'Find an antiderivative of ${equation}$ with respect to $x$.'.format(
equation=sympy.latex(self._qp['equation'])
)
def solution_statement(self):
# sympy integrates things like 1/x as log(x), not log(|x|) (since the symbol x is treated as a complex number, not a real number)
proper_antiderivative = self._qp['antiderivative'].replace(sympy.log(x0), sympy.log(sympy.Abs(x0)))
constant_of_integration = not_named_yet.randint_no_zero(-3, 3)
lines = solutions.Lines()
# without using .factor() here, we could have (x + 1)**(-3) integrate to -1/(2*x**2 + 4*x + 2) which is expanded
antiderivative = proper_antiderivative.factor() + constant_of_integration
lines += r'${antiderivative}$'.format(antiderivative=sympy.latex(antiderivative))
lines += r'We arbitrarily choose our constant of integration to be ${constant_of_integration}$. It can be any real number, including zero.'.format(
constant_of_integration=constant_of_integration
)
return lines.write()
| [
"ben.lucato@gmail.com"
] | ben.lucato@gmail.com |
c75c91792bd3cf62f44b90570699313af9f3e2aa | 79e45a6e4846927da432087aba845036b11c5622 | /UAT/var/ARCHIVE/NZDJPYdailyOHLC.py | a06abae1818e84fd8456ff3e4968a135362f0569 | [] | no_license | mjserpico/Scarlett-Trading | cba2bcfaacf886b9d851d978683b4ce641c8f6ad | 9778717393dbb0818ee026356996d1806345a6c2 | refs/heads/master | 2020-03-21T21:39:51.108503 | 2019-05-09T02:06:26 | 2019-05-09T02:06:26 | 139,076,454 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,394 | py | # -*- coding: utf-8 -*-
"""
Created on Sun Jan 08 09:16:43 2017
@author: Michael
"""
import mysql.connector
from ib.opt import Connection, message
from ib.ext.Contract import Contract
import ib
import time
import logging
import datetime
import datalink #universal logins for environment
Flag = 0
logging.basicConfig(filename='pythonlogs\DailyOHLC' + str(datetime.date.today()) + '.txt', level=logging.DEBUG, format='%(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
fh = logging.FileHandler('DailyOHLC' + str(datetime.date.today()) + '.txt')
fh.setLevel(logging.DEBUG)
fh.setFormatter(formatter)
logger.addHandler(fh)
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
ch.setFormatter(formatter)
logger.addHandler(ch)
logger.debug('Starting NZDJPYDailyOHLC')
def reply_handler(msg):
#print(msg.value)
print("Reply:", msg)
test = msg.open
test2 = msg.high
test3 = msg.low
test4 = msg.close
logger.debug('In Reply Handler')
if float(test) != -1:
import time
logger.debug('Valid Price Found (OPEN NOT -1)')
#cnx = mysql.connector.connect(user='mjserpico', password='UrzE8B66',host="scar01.cqxmc7cib5oh.us-east-1.rds.amazonaws.com", database='SCAR01')
#cnx = mysql.connector.connect(user='Scarlett01', password='scar01lett',host="serpdb01.cqxmc7cib5oh.us-east-1.rds.amazonaws.com", database='SERPDB01')
cnx = mysql.connector.connect(user=datalink.DB_User, password=datalink.DB_Pass,host=datalink.DB_Host, database=datalink.DB_Path)
logger.debug('Connected to Database')
cur = cnx.cursor()
cur.execute("""Insert Into NZDJPY (Date, Open, High, Low, Close) values(%s,%s,%s,%s,%s)""",(time.strftime("%m/%d/%Y"),float(test),float(test2),float(test3),float(test4)))
cnx.commit()
logger.debug('Ran Insert Script')
today = datetime.date.today( )
print("Today is " + str(today))
dayofweek = datetime.datetime.today().weekday()
print(dayofweek)
if dayofweek == 0: #if Today is Monday
yesterday = today - datetime.timedelta(days=3) #Get Friday
month = (str(0) + str(yesterday.month))
day = (str(0)+ str(yesterday.day))
yesterday2 = (month[-2:] +"/"+ day[-2:] +"/"+str(yesterday.year))
logger.debug('Yesterday2 was %s', str(yesterday2))
else:
yesterday = today - datetime.timedelta(days=1) #Take 1 Day back
month = (str(0) + str(yesterday.month))
day = (str(0)+ str(yesterday.day))
yesterday2 = (month[-2:] +"/"+ day[-2:] +"/"+str(yesterday.year))
logger.debug('Yesterday2 was %s', str(yesterday2))
#MovingAverage Calculation
#Step 1 Get earliest Date to calculate avg from
#reformat date to DB convention first
logger.debug('Today is still %s', today)
backdate = today - datetime.timedelta(days=13)
logger.debug('Date shifted back 10 is %s', backdate)
dayofweek = backdate.weekday()
#Adjust for Saturdays and Sundays: No price data available.
# if dayofweek == 6:
# backdate = today - datetime.timedelta(days = 9)
# if dayofweek == 5:
# backdate = today - datetime.timedelta(days = 8)
#
month = (str(0) + str(backdate.month))
day = (str(0)+ str(backdate.day))
backdate2 = (month[-2:] +"/"+ day[-2:] +"/"+str(backdate.year))
logger.debug('First Date of BB Moving Average is %s', backdate2)
#Select ID from EURUSD where Date in ('12/19/2016', '02/07/2017');
#Select round(Avg(Close),5) from EURUSD where ID BETWEEN 3881 AND 3915;
query = ("SELECT ID from " + CCY1 + CCY2 + " where Date = \"" + yesterday2 + "\"")
logger.debug('Query is %s', query)
cur.execute(query)
for (ID) in cur:
ID1 = ID
logger.debug('BB ID1 is %s', ID1)
query = ("SELECT ID from " + CCY1 + CCY2 + " where Date = \"" + backdate2 + "\"")
logger.debug('Query is %s', query)
cur.execute(query)
for (ID) in cur:
ID2 = ID
logger.debug('BB ID1 is %s', ID1)
logger.debug('BB ID2 is %s', ID2)
query = ("SELECT round(Avg(Close),5) as Avg from " + CCY1 + CCY2 + " where ID BETWEEN " + str(ID2[0]) + " AND " + str(ID1[0]) + ";")
logger.debug('Query is %s', query)
cur.execute(query)
for (Avg) in cur:
BBMovAvg = Avg #Final Moving Average Value
logger.debug('BBMovAvg is %s', BBMovAvg)
##Puts Moving Average Value in hasPosition Table for Reference with intraday strategies
query = ("UPDATE hasPosition SET BB_STRATMovingAvgValue = " + str(BBMovAvg[0]) + " where CCY =\'" + CCY1 + CCY2 +"\';")
logger.debug('Query is %s', query)
cur.execute(query)
cnx.commit()
global Flag
Flag = 1
logger.debug('Flag set to 1')
while Flag == 0:
conn = Connection.create(port=4002, clientId=999)
conn.connect()
logger.debug('Connecting to Server')
time.sleep(1)
conn.register(reply_handler,'HistoricalData') #By registering "HistoricalData" --the Method name only --we can eliminate all the open order garbage
logger.debug('Registered HistoricalData Reply Handler')
#conn.registerall(reply_handler)
time.sleep(1)
qqq = Contract()
qqq.m_symbol = 'NZD'
qqq.m_secType = 'CASH'
qqq.m_exchange = 'IDEALPRO'
qqq.m_currency = 'JPY'
logger.debug('Requesting historical data')
conn.reqHistoricalData(1, qqq, '', '1 D', '1 day', 'Midpoint', 1, 2)
logger.debug('Returned from Reply Handler')
time.sleep(1) #give IB time to send us messages
logger.debug('Disconnecting from Server')
conn.disconnect()
logger.debug('Finished AUDCAD Daily OHLC') | [
"mjserpico@gmail.com"
] | mjserpico@gmail.com |
c35c68e7a7faa7dfa3b76900fb3308daf37711fe | 945b3c14b5a58f8d98955cdf27aef9469e21523c | /flod_booking/alembic/versions/20140307-1414-483e5e40b48d_book_131_noark_5_documents_will_.py | 6c307e7ab2a2eb9984925dcd7e32e36ce21794e4 | [
"BSD-2-Clause-Views"
] | permissive | Trondheim-kommune/Bookingbasen | 34e595e9c57ea6428406b2806559aab17e9a3031 | 58235a5a1fd6ad291cb237e6ec9a67bfe8c463c6 | refs/heads/master | 2022-11-29T00:20:18.681549 | 2017-05-29T19:33:43 | 2017-05-29T19:33:43 | 49,863,780 | 1 | 1 | NOASSERTION | 2022-11-22T00:27:34 | 2016-01-18T08:47:46 | JavaScript | UTF-8 | Python | false | false | 1,581 | py | # -*- coding: utf-8 -*-
"""BOOK-131 Noark 5 documents will temporarily be saved in flod (the integration point, FeSak, is not ready to receive them)
Revision ID: 483e5e40b48d
Revises: 47d9ec0a7bc5
Create Date: 2014-03-07 14:14:59.182049
"""
# revision identifiers, used by Alembic.
revision = '483e5e40b48d'
down_revision = '47d9ec0a7bc5'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.create_table('fesak_sak',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('application_id', sa.Integer(), nullable=False),
sa.Column('saksnummer', sa.String(), nullable=False),
sa.Column('ws_header', sa.String(), nullable=False),
sa.Column('ws_sak', sa.String(), nullable=False),
sa.PrimaryKeyConstraint('id'),
sa.ForeignKeyConstraint(['application_id'], ['applications.id'], ),
sa.UniqueConstraint('application_id')
)
op.create_table('fesak_journalpost',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('fesak_sak_id', sa.Integer(), nullable=False),
sa.Column('ws_header', sa.String(), nullable=False),
sa.Column('ws_journalpost', sa.String(), nullable=False),
sa.PrimaryKeyConstraint('id'),
sa.ForeignKeyConstraint(['fesak_sak_id'], ['fesak_sak.id'], ),
)
def downgrade():
raise NotImplementedError('This application does not support downgrades.')
| [
"teeejay@gmail.com"
] | teeejay@gmail.com |
726f87be962da3929f2f16d4d4ab5bded3efb223 | 6a5a16dc64262c0c3aa4732253d804de105a60b2 | /2.Replacing values in a DataFrame/Replace single values II.py | 58160600818e3b9518971f909fa09fa548aed606 | [] | no_license | Mat4wrk/Writing-Efficient-Code-with-pandas-Datacamp | 425c574053f5777098c7ef1ebedc4ede6500860a | 11ee5f5f2dae180a51fe003a52aaed22df8b5835 | refs/heads/main | 2023-03-12T01:02:09.787179 | 2021-02-25T16:39:59 | 2021-02-25T16:39:59 | 342,286,682 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 133 | py | # Replace the number rank by a string
names['Rank'].replace({1: 'FIRST', 2: 'SECOND', 3: 'THIRD'}, inplace=True)
print(names.head())
| [
"noreply@github.com"
] | Mat4wrk.noreply@github.com |
c1b0703067199a1d0d8be39d0e2e1cab53696640 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_136/ch36_2020_10_02_01_35_54_347944.py | 7e2ceca48b0f5b0f14629fb376f033a7433bfd56 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 237 | py | def farotial(n):
i=0
diminuir= 1
outra= n
multiplicador= 1
while i<n-1:
outra-= diminuir
#print (outra)
multiplicador*= outra
i+= 1
resultado= multiplicador * n
return resultado | [
"you@example.com"
] | you@example.com |
bae83ea1902b2ad2dd0cc9983d69a229e3e4b6b5 | 5982a9c9c9cb682ec9732f9eeb438b62c61f2e99 | /Problem_165/learning_solution.py | 7fec0e3d63d4bdb759b81fe243085cecfadd800a | [] | no_license | chenshanghao/LeetCode_learning | 6fdf98473be8f2240dd86d5586bbd1bbb95d6b0c | acf2395f3b946054009d4543f2a13e83402323d3 | refs/heads/master | 2021-10-23T05:23:01.970535 | 2019-03-15T05:08:54 | 2019-03-15T05:08:54 | 114,688,902 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 665 | py | class Solution:
def compareVersion(self, version1, version2):
"""
:type version1: str
:type version2: str
:rtype: int
"""
# Case 1:
# 1='1.0.1' 2='01.01.01'
# 2='1.0.0' 2='1.0'
versions1 = [int(v) for v in version1.split(".")]
versions2 = [int(v) for v in version2.split(".")]
for i in range(max(len(versions1),len(versions2))):
v1 = versions1[i] if i < len(versions1) else 0
v2 = versions2[i] if i < len(versions2) else 0
if v1 > v2:
return 1
elif v1 < v2:
return -1
return 0
| [
"21551021@zju.edu.cn"
] | 21551021@zju.edu.cn |
bca710e613a599994957f93c89e4a64b8f045faf | 0015f3ac50f20f2f99727948f117d7ec8cd324d5 | /Data_Preprocessing_ABC_24hrs.py | c8087f23b1b604a60e4d271f750e50b68290dcfd | [] | no_license | omkarpandit24/Dial-Plan-Data-Preprocessing-Aricent | 720d49bce31be9bcec7d4a8c62f8fab46c1fe04b | 578f9849027cdaa5f4eff38ff9c06b9f7b837444 | refs/heads/master | 2020-04-01T08:06:30.483536 | 2018-10-15T17:21:57 | 2018-10-15T17:21:57 | 153,017,742 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,888 | py | #Dial Plan data preprocessing
#24 hours execution
import pandas as pd
import numpy as np
#Import mapping file - mapping between Test_Case_ID and its details about 'Call of Service' and 'Call type'
mapping_data = pd.read_csv("Test_Case_ID_mapping.csv")
#Import 24 Hours working executions data
data = pd.read_csv("INTLENH_premium_try2_CT_3.csv")
data.rename(columns={'Calling Party': 'Calling_Party', 'Called Party': 'Called_Party'}, inplace=True)
data = data[['Status', 'Calling_Party', 'Called_Party', 'Duration']]
#New dataframe to store combined results - data + mapping_data
data3 = pd.DataFrame(columns=['Status', 'Calling_Party', 'Called_Party', 'Duration', 'Test_Case_ID'])
#Focus on only Failed and Completed executions
status_array = ['Failed', 'Completed']
data = data.loc[data['Status'].isin(status_array)]
calling_party_series = pd.Series(data['Calling_Party'])
called_party_series = pd.Series(data['Called_Party'])
#Truncate the text to extract only calling party information
data['Calling_Party']= data.Calling_Party.str.split().str.get(0)
#Call of service codes for 24 hours execution
Call_of_Service = ['InternalOnly', 'Nat24STD', 'Nat24RES'
,'Nat24ENH', 'INTL24STD', 'INTL24ENH'
,'CLIRNat24STD', 'CLIRNat24RES', 'CLIRNat24ENH'
, 'CLIRINTL24STD', 'CLIRINTL24ENH']
#Codes available for call type common for all 3 types of executions
Call_Type = ['National', 'Service', 'Freephone', 'Emergency'
, 'International', 'Mobile', 'Premium']
#Define type of execution
execution_cycle = '24 Hours Execution'
#Current execution cycle ID
cycle_id = 3
#Mapping logic
for i in range(len(Call_of_Service)):
data1 = data[data['Calling_Party'] == Call_of_Service[i]]
#data1 = data[calling_party_series.str.match(Call_of_Service[i])]
for j in range(len(Call_Type)):
data2 = data1[called_party_series.str.contains(Call_Type[j])]
data2.insert(len(data2.columns), 'Test_Case_ID', pd.Series(np.random.randn(len(data2['Status'])), index=data2.index))
for index, row in mapping_data.iterrows():
if row["Execution_Cycle"] == execution_cycle and row["COS_Code"] == Call_of_Service[i] and row["Call_Type_code"] == Call_Type[j]:
test_case_id = row["Test_Case_ID"]
#print(test_case_id)
data2['Test_Case_ID'] = test_case_id
data3 = data3.append(data2)
data3.loc[data3['Test_Case_ID'] == 'DP_GERMANY_TC42']
data3.loc[data3['Calling_Party'] == 'INTL24ENH']
data4 = data3.sort_index()
data4['Execution_ID'] = range(1, len(data4) + 1)
data4 = data4.drop(['Calling_Party', 'Called_Party'], axis=1)
data4['Cycle_ID'] = cycle_id
data4 = data4[['Execution_ID', 'Cycle_ID', 'Duration', 'Status', 'Test_Case_ID']]
#Writing into CSV file
data4.to_csv('PP_INTLENH_premium_24hrs_try2_CT_3.csv')
| [
"you@example.com"
] | you@example.com |
e44953e3dd208113a69c491df8bc862ce7df32a8 | e1efc8e0b0e4629dea61504fbc816c0527691bd9 | /5.mysql/mysql12-引擎.py | 82a3eab1efb867d286da3c941f8abdf243a5f941 | [] | no_license | xiongmengmeng/xmind-technology | 2bb67a0bf92cfd660cac01f8ab3a2454423ccba5 | e2fdb6987ef805a65f0a4feb52d84383853f4b77 | refs/heads/main | 2023-07-31T07:10:29.868120 | 2021-09-11T08:18:17 | 2021-09-11T08:18:17 | 307,636,242 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,363 | py | import os,sys
parentdir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0,parentdir)
import xmind
from xmind.core.markerref import MarkerId
xmind_name="mysql"
w = xmind.load(os.path.dirname(os.path.abspath(__file__))+"\\"+xmind_name+".xmind")
s2=w.createSheet()
s2.setTitle("引擎")
r2=s2.getRootTopic()
r2.setTitle("引擎")
content={
'MyISAM':[
{'每个MyISAM在磁盘上存储成三个文件':[
'1.frm文件:存储表的定义数据',
'2.MYD文件:存放表具体记录的数据',
'3.MYI文件:存储索引,仅保存记录所在页的指针,索引的结构是B+树结构'
]},
'存储引擎通过MYI的B+树结构来查找记录页,再根据记录页查找记录',
'不支持事务'
],
'InnoDB':[
'1.通过自动增长auto_increment,生成id',
'2.支持事务:默认隔离级别为可重复度,通过MVCC(并发版本控制)来实现',
'3.使用的锁粒度为行级锁,可支持更高的并发',
'4.存在着缓冲管理:通过缓冲池,将索引和数据全部缓存起来,加快查询的速度',
'5.InnoDB类型的表,其数据的物理组织形式是聚簇表,所有数据按照主键来组织,数据和索引放在一块,位于B+数的叶子节点上'
'6.支持事务'
],
'Memory':[
'1.支持数据类型有限:如不支持TEXT和BLOB类型,对字符串类型,只支持固定长度的,VARCHAR会被自动存储为CHAR类型',
'2.支持的锁粒度为表级锁:访问量大时,表级锁会成为MEMORY存储引擎的瓶颈',
'3.数据存放在内存中:一旦服务器出现故障,数据会丢失',
'4.默认使用hash索引'
],
'InnoDB和Memory的区别':[
'InnoDB引擎:把数据放在主键索引上,其他索引上保存的是主键id',
'Memory引擎:把数据单独存放,索引上保存数据位置'
],
'InnoDB和MyISAM的区别':[
'都是使用B+树来实现索引,但innoDB的叶子节点保存的是主键和数据(占空间更大,但查询更快),MyISAM保存了数据指针',
'锁:InnoDB支持行级锁,事务,MVCC,MyISAM不支持',
'count(*):InnoDB要扫描全表,MyISAM用一个变量保存了整个表的行数'
]
}
#构建xmind
xmind.build(content,r2)
#保存xmind
xmind.save(w,os.path.dirname(os.path.abspath(__file__))+"\\"+xmind_name+".xmind") | [
"xiongmengmeng@qipeipu.com"
] | xiongmengmeng@qipeipu.com |
43ab8afeab93a62e08dddd6878ed50d6a8ff2cc2 | cbcdf195338307b0c9756549a9bffebf3890a657 | /django-stubs/contrib/admin/forms.pyi | 02de4b7a39ef2e839ec31c245909328b97c3f7bd | [
"MIT"
] | permissive | mattbasta/django-stubs | bc482edf5c6cdf33b85005c2638484049c52851b | 8978ad471f2cec0aa74256fe491e2e07887f1006 | refs/heads/master | 2020-04-27T08:38:22.694104 | 2019-03-06T09:05:08 | 2019-03-06T09:05:24 | 174,178,933 | 1 | 0 | MIT | 2019-03-06T16:18:01 | 2019-03-06T16:18:00 | null | UTF-8 | Python | false | false | 891 | pyi | from typing import Any, Dict
from django.contrib.auth.forms import AuthenticationForm, PasswordChangeForm
from django.contrib.auth.models import User
class AdminAuthenticationForm(AuthenticationForm):
auto_id: str
data: Dict[str, str]
empty_permitted: bool
error_class: type
fields: Dict[Any, Any]
files: Dict[Any, Any]
initial: Dict[Any, Any]
is_bound: bool
label_suffix: str
request: None
user_cache: None
error_messages: Any = ...
required_css_class: str = ...
def confirm_login_allowed(self, user: User) -> None: ...
class AdminPasswordChangeForm(PasswordChangeForm):
auto_id: str
data: Dict[Any, Any]
empty_permitted: bool
error_class: type
fields: Dict[Any, Any]
files: Dict[Any, Any]
initial: Dict[Any, Any]
is_bound: bool
label_suffix: str
user: Any
required_css_class: str = ...
| [
"maxim.kurnikov@gmail.com"
] | maxim.kurnikov@gmail.com |
06b164dad6993d72808fa25f53b59ffbb58c7abe | 636506c687b4797bfe5daa59b5264615d3bb894b | /backend/task/migrations/0001_initial.py | 34545b851e4d69599c555b830abd90bf0b7735ca | [] | no_license | crowdbotics-apps/pip-25311 | ac5240874c28ab73f28b5f8c5bc273267aaa88e5 | c4155c93f1517039812e794caf744214fdd115e2 | refs/heads/master | 2023-03-29T11:25:20.961098 | 2021-03-27T19:44:45 | 2021-03-27T19:44:45 | 352,161,389 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,121 | py | # Generated by Django 2.2.19 on 2021-03-27 19:42
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('task_profile', '0001_initial'),
('task_category', '0001_initial'),
('location', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Task',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('details', models.TextField()),
('frequency', models.CharField(max_length=7)),
('size', models.CharField(max_length=6)),
('is_confirmed', models.BooleanField()),
('status', models.CharField(max_length=10)),
('timestamp_created', models.DateTimeField(auto_now_add=True)),
('timestamp_confirmed', models.DateTimeField(blank=True, null=True)),
('category', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='task_category', to='task_category.Category')),
('customer', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='task_customer', to='task_profile.CustomerProfile')),
('location', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='task_location', to='location.TaskLocation')),
('subcategory', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='task_subcategory', to='task_category.Subcategory')),
('tasker', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='task_tasker', to='task_profile.TaskerProfile')),
],
),
migrations.CreateModel(
name='TaskTransaction',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('status', models.CharField(max_length=10)),
('timestamp_completed', models.DateTimeField(blank=True, null=True)),
('date', models.DateField(blank=True, null=True)),
('timestamp_started', models.DateTimeField(blank=True, null=True)),
('task', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='tasktransaction_task', to='task.Task')),
],
),
migrations.CreateModel(
name='Rating',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('rating', models.FloatField()),
('timestamp_created', models.DateTimeField(auto_now_add=True)),
('review', models.TextField(blank=True, null=True)),
('customer', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='rating_customer', to='task_profile.CustomerProfile')),
('tasker', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='rating_tasker', to='task_profile.TaskerProfile')),
],
),
migrations.CreateModel(
name='Message',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('message', models.TextField()),
('timestamp_created', models.DateTimeField(auto_now_add=True)),
('customer', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='message_customer', to='task_profile.CustomerProfile')),
('task', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='message_task', to='task.Task')),
('tasker', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='message_tasker', to='task_profile.TaskerProfile')),
],
),
]
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
38c507a83cc96c9dff844075ddef4b8e6e21b84c | d87483a2c0b50ed97c1515d49d62c6e9feaddbe0 | /.history/buyTopStocks_20210202220509.py | ced8f04211038e2e467f10513714dca47a8b61ff | [
"MIT"
] | permissive | HopperKremer/hoptrader | 0d36b6e33922414003cf689fb81f924da076a54b | 406793c10bc888648290fd15c7c2af62cf8c6c67 | refs/heads/main | 2023-06-12T15:51:00.910310 | 2021-07-06T16:15:41 | 2021-07-06T16:15:41 | 334,754,936 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,661 | py | # from excel import OpenExcel
from tda import auth, client
import os, sys
currentdir = os.path.dirname(os.path.realpath(__file__))
parentdir = os.path.dirname(currentdir)
sys.path.append(parentdir)
import config
from selenium import webdriver
import json
DRIVER_PATH = "/home/hopper/chromedriver"
driver = webdriver.Chrome(DRIVER_PATH)
redirect_uri = "https://localhost"
try:
c = auth.client_from_token_file(config.token_path, config.api_key)
except FileNotFoundError:
# with webdriver.Chrome() as driver:
c = auth.client_from_login_flow(
driver, config.api_key, redirect_uri, config.token_path
)
r = c.get_price_history(
"AAPL",
period_type=client.Client.PriceHistory.PeriodType.YEAR,
period=client.Client.PriceHistory.Period.TWENTY_YEARS,
frequency_type=client.Client.PriceHistory.FrequencyType.DAILY,
frequency=client.Client.PriceHistory.Frequency.DAILY,
)
assert r.status_code == 200, r.raise_for_status()
print(json.dumps(r.json(), indent=4))
soldFile = open("sold.py", "a")
soldStocks = []
# for stock, data in my_stocks.items():
for stock in my_stocks:
driver = webdriver.Chrome(PATH)
driver.get('https://financhill.com/screen/stock-score')
score = int(driver.find_element_by_tag_name('h2').text)
time.sleep(2)
print(stock)
print(score)
# if (score < 40):
# r.order_sell_trailing_stop(stock, data['quantity'], 1)
# soldStocks.append(stock)
driver.quit()
soldFile.write(soldStocks)
soldFile.close()
<span class="sort sort-desc" data-sort-name="stock_score_normalized" data-current-order="">
Stock Score <i class="glyphicon"></i></span> | [
"hopperkremer@gmail.com"
] | hopperkremer@gmail.com |
130ae343f2184e2cf9db80e53c5efbebb5c76066 | 358a60b05a291a4a81c50401be836e6a60687b55 | /Problems/Eigenvalues/main.py | 351a776463b82d906f57d23b325d1260174bfcce | [] | no_license | wangpengda1210/Tetris | dbdd57cb21d40ff625445e5e9f737db51dd57f63 | 7ef5b3fe8f687097d7d3ff7b5c7aa3b77032667b | refs/heads/main | 2023-03-09T16:34:28.367503 | 2021-03-02T03:40:55 | 2021-03-02T05:39:00 | 342,492,432 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 142 | py | import numpy as np
a = int(input())
b = int(input())
c = int(input())
d = int(input())
print(np.linalg.eigvals(np.array([[a, b], [c, d]])))
| [
"515484505@qq.com"
] | 515484505@qq.com |
94259b1c5eb4a5d39212ed10ba34bbd1766befa5 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/39/usersdata/72/15179/submittedfiles/dec2bin.py | cd0019461b73084784b76609959e2e414e1c86af | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 150 | py | # -*- coding: utf-8 -*-
n=int(input('digite o numero decimal:'))
i=0
j=1
d=n%2
while n>0:
d=n%2
n=n/2
i=i+d*j
j=j*10
print ('%d' %i) | [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
a1626bb5890cc887220d4ad125773c36dcc3d81a | b3528a3795ce373e27d52362128de3cff6f9969d | /python/orbs/target/password-generator/slices1589360571.263371/success/success_24_0.py | 6911640c338f776987141f438eff887ce2b76e1d | [] | no_license | greenmonn/daily-coding | 43e0f3775678c7d6116df7ba5034ea18489d87c9 | ef6ecc88e6db61e18364eef3ea071c11e1385a99 | refs/heads/master | 2023-01-14T04:59:14.130309 | 2021-02-08T23:32:56 | 2021-02-08T23:32:56 | 157,735,438 | 1 | 1 | null | 2022-12-21T02:13:17 | 2018-11-15T15:47:37 | Python | UTF-8 | Python | false | false | 5,923 | py | #!/usr/bin/env python3
# m4ngl3m3! v0.1.1
# Common password pattern generator using strings list
# Follow (Medium / Twitter): @localh0t
import argparse
import sys
import os
from Mangler import ManglingParameters
from Mangler import Mangler
def build_parser():
"""Add parser arguments and return an instance of ArgumentParser."""
parser = argparse.ArgumentParser(description=("Common password pattern "
"generator using strings "
"list"),
formatter_class=argparse.
ArgumentDefaultsHelpFormatter)
parser.add_argument("mutation_mode",
metavar="MUTATION_MODE",
type=str,
help=("Mutation mode to perform: "
"(prefix-mode | suffix-mode | dual-mode)"),
choices=['prefix-mode', 'suffix-mode', 'dual-mode'])
parser.add_argument("strings_file",
metavar="STRINGS_FILE",
type=str,
help="File with strings to mutate")
parser.add_argument("output_file",
metavar="OUTPUT_FILE",
type=str,
help="Where to write the mutated strings")
parser.add_argument("-fy", "--from-year",
metavar="FROM_YEAR",
type=int,
help="Year where our iteration starts",
default=2015)
parser.add_argument("-ty", "--to-year",
metavar="TO_YEAR",
type=int,
help="Year where our iteration ends",
default=2020)
parser.add_argument('-sy', "--short-year",
help=("Also add shorter year form when iterating"),
action='store_true',
default=False)
parser.add_argument("-nf", "--numbers-file",
metavar="NUMBERS_FILE",
type=str,
help="Numbers prefix/suffix file",
default='./target/password-generator/files/numbers/numbers_set2.txt')
parser.add_argument("-sf", "--symbols-file",
metavar="SYMBOLS_FILE",
type=str,
help="Symbols prefix/suffix file",
default='./target/password-generator/files/symbols/symbols_set2.txt')
parser.add_argument("-cf", "--custom-file",
metavar="CUSTOM_FILE",
type=str,
help="Custom words/dates/initials/etc file")
parser.add_argument('-sbs', "--symbols-before-suffix",
help=("Insert symbols also before years/numbers/"
"custom (when in suffix-mode or dual-mode)"),
action='store_true',
default=False)
parser.add_argument('-sap', "--symbols-after-prefix",
help=("Insert symbols also after years/numbers/custom"
" (when in prefix-mode or dual-mode)"),
action='store_true',
default=False)
parser.add_argument("-mm", "--mutation-methods",
metavar="MUTATION_METHODS",
type=str,
help=("Mutation methods to perform (comma separated, "
"no spaces) (valid: see MUTATION_METHODS.md)"),
default='normal,'
'uppercase,'
'firstup,'
'replacevowels')
return parser
def validate_files(strings_file, output_file):
"""Check if input/output files are valid."""
if not os.path.isfile(strings_file):
print("[-] The file %s does not exist or is not a file!" % strings_file)
sys.exit(1)
if os.path.isfile(output_file):
os.remove(output_file)
def build_mangler_with_args(args):
"""Return an instance of Mangler with the given parameters."""
parameters = ManglingParameters()
parameters.num_file = open(args.numbers_file, 'r').read().splitlines()
parameters.sym_file = open(args.symbols_file, 'r').read().splitlines()
if (args.custom_file):
parameters.cus_file = open(args.custom_file, 'r').read().splitlines()
parameters.mutation_mode = args.mutation_mode
parameters.from_year = args.from_year
parameters.to_year = args.to_year
parameters.short_year = args.short_year
parameters.prefix_pos_swap = args.symbols_after_prefix
parameters.suffix_pos_swap = args.symbols_before_suffix
return Mangler(mangling_parameters=parameters)
if __name__ == "__main__":
args = build_parser().parse_args()
mangler = build_mangler_with_args(args)
mangler_functions = {
"normal": mangler.normal_mangling,
"uppercase": mangler.uppercase_mangling,
"firstup": mangler.firstup_mangling,
"replacevowels": mangler.replacevowels_mangling,
}
written_strings = 0
with open(args.strings_file, 'r') as f:
for line in f:
mangled = []
for method in args.mutation_methods.lower().split(","):
try:
(name, output) = mangler_functions[method](line.strip())
mangled.extend(output)
except KeyError:
print("[-] The method %s is not defined !" % method)
print("[+] %s mutation method done on string: %s" %
(name, line.strip()))
written_strings += len(mangled)
print('##v_trajectory captured: {}##'.format(written_strings))
| [
"greenmon@kaist.ac.kr"
] | greenmon@kaist.ac.kr |
dd0f2fd7d48c85fda6349cac297406d769294ba1 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/217/usersdata/355/113233/submittedfiles/av2_p3_m2.py | ddc27da64cc24bc2b5d2c4edd83d7db6abb116a4 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 503 | py | # -*- coding: utf-8 -*-
import numpy as np
n=int(input("Digite a dimensão da matriz: "))
matriz=np.zeros(n,n)
M0=M1=M2=0
for i in range(0,n,1):
for j in range(0,n,1):
matriz[i,j]=float(input("Digite os elementos da matriz: "))
while j=0:
M0=matriz[i,j]+M0
while j=1:
M1=matriz[i,j]+M1
while j=2:
M2=matriz[i,j]+M2
if M0=M1:
m=M0
else:
if M1=M2:
m=M1
else:
m=M0
| [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
56688dead2cbc91be46ea27859c5c5320cca5b5a | 551b75f52d28c0b5c8944d808a361470e2602654 | /huaweicloud-sdk-ocr/huaweicloudsdkocr/v1/model/recognize_driver_license_response.py | bbe664d173706914e84e388630af95ba3f524c3d | [
"Apache-2.0"
] | permissive | wuchen-huawei/huaweicloud-sdk-python-v3 | 9d6597ce8ab666a9a297b3d936aeb85c55cf5877 | 3683d703f4320edb2b8516f36f16d485cff08fc2 | refs/heads/master | 2023-05-08T21:32:31.920300 | 2021-05-26T08:54:18 | 2021-05-26T08:54:18 | 370,898,764 | 0 | 0 | NOASSERTION | 2021-05-26T03:50:07 | 2021-05-26T03:50:07 | null | UTF-8 | Python | false | false | 2,933 | py | # coding: utf-8
import pprint
import re
import six
from huaweicloudsdkcore.sdk_response import SdkResponse
class RecognizeDriverLicenseResponse(SdkResponse):
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'result': 'DriverLicenseResult'
}
attribute_map = {
'result': 'result'
}
def __init__(self, result=None):
"""RecognizeDriverLicenseResponse - a model defined in huaweicloud sdk"""
super(RecognizeDriverLicenseResponse, self).__init__()
self._result = None
self.discriminator = None
if result is not None:
self.result = result
@property
def result(self):
"""Gets the result of this RecognizeDriverLicenseResponse.
:return: The result of this RecognizeDriverLicenseResponse.
:rtype: DriverLicenseResult
"""
return self._result
@result.setter
def result(self, result):
"""Sets the result of this RecognizeDriverLicenseResponse.
:param result: The result of this RecognizeDriverLicenseResponse.
:type: DriverLicenseResult
"""
self._result = result
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, RecognizeDriverLicenseResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"hwcloudsdk@huawei.com"
] | hwcloudsdk@huawei.com |
1d9fbcc9d3db5195493acabf8c693f8b1d4d7abb | 56a0762c741bcac3ab1172eb6114a9e59a48a5df | /tutorados/urls.py | 2a9edff042fcb259b99285e88e6692f3844700c5 | [
"MIT"
] | permissive | jjmartinr01/gauss3 | 54af1735a035a566f237d8e0fd9a6fe4447845a2 | 41a23d35c763890d8f729c9d63ac073673689400 | refs/heads/master | 2023-08-23T06:40:51.033857 | 2023-08-08T11:50:50 | 2023-08-08T11:50:50 | 171,710,013 | 1 | 0 | MIT | 2023-02-15T18:43:56 | 2019-02-20T16:35:03 | HTML | UTF-8 | Python | false | false | 341 | py | # -*- coding: utf-8 -*-
from django.urls import path
from . import views
urlpatterns = [
path('informes_seguimiento/', views.informes_seguimiento),
path('ajax_informe_seguimiento/', views.ajax_informe_seguimiento),
path('informes_tareas/', views.informes_tareas),
path('ajax_informe_tareas/', views.ajax_informe_tareas),
]
| [
"jmar0269@gmail.com"
] | jmar0269@gmail.com |
8a57b2c557a5ef2f443057587619d3e3c06297ab | 6f02ef3af5c9360fdc41f766493e5f8f2eeaca58 | /todos/migrations/0001_initial.py | a4a758ab58d2e6245ce1c817234954a901ac27f6 | [] | no_license | mahmudgithub/demo_pactics_project_eighteen | b79b58a9e6283f6c155013e775f28ba9746ce8e1 | 0709453fddde390ad1b456b2dc3bc5cfab0a30de | refs/heads/master | 2022-04-01T03:17:32.275191 | 2020-01-29T11:03:50 | 2020-01-29T11:03:50 | 236,962,921 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 538 | py | # Generated by Django 2.2.6 on 2019-10-31 11:19
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Todo',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200)),
('description', models.TextField()),
],
),
]
| [
"mahmudhossain838@gmail.com"
] | mahmudhossain838@gmail.com |
f8f6e8e72ebdcabc0c5a3c0453af21f15603e6d2 | 978248bf0f275ae688f194593aa32c267832b2b6 | /xlsxwriter/test/comparison/test_chart_radar01.py | 3ec2e6659d3e8c3762dccca98737d355ecc87765 | [
"BSD-2-Clause-Views"
] | permissive | satish1337/XlsxWriter | b0c216b91be1b74d6cac017a152023aa1d581de2 | 0ab9bdded4f750246c41a439f6a6cecaf9179030 | refs/heads/master | 2021-01-22T02:35:13.158752 | 2015-03-31T20:32:28 | 2015-03-31T20:32:28 | 33,300,989 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,554 | py | ###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2015, John McNamara, jmcnamara@cpan.org
#
from ..excel_comparsion_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.maxDiff = None
filename = 'chart_radar01.xlsx'
test_dir = 'xlsxwriter/test/comparison/'
self.got_filename = test_dir + '_test_' + filename
self.exp_filename = test_dir + 'xlsx_files/' + filename
self.ignore_files = []
self.ignore_elements = {}
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({'type': 'radar'})
chart.axis_ids = [56801152, 56802688]
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
]
worksheet.write_column('A1', data[0])
worksheet.write_column('B1', data[1])
worksheet.write_column('C1', data[2])
chart.add_series({'values': '=Sheet1!$A$1:$A$5'})
chart.add_series({'values': '=Sheet1!$B$1:$B$5'})
chart.add_series({'values': '=Sheet1!$C$1:$C$5'})
worksheet.insert_chart('E9', chart)
workbook.close()
self.assertExcelEqual()
| [
"jmcnamara@cpan.org"
] | jmcnamara@cpan.org |
e6bfc1a526fa9242f4a6480456004c104c5cb614 | d3d14ffc9d0211c49187f7502e0e9edbf68cc01f | /auth_network_provider/admin.py | 2aaf239a44680b3362b64fbffbc8be3a999739e3 | [] | no_license | Brachamul/centrifuge | 567948fe0fd67a605448c1f3248a0fc5c6d838e6 | b3ba6635fd4097cc76b4ef6e2522ab2741ccd372 | refs/heads/master | 2021-05-01T03:41:29.432670 | 2017-06-17T14:20:02 | 2017-06-17T14:20:02 | 61,970,963 | 0 | 1 | null | 2017-01-22T15:45:55 | 2016-06-26T02:48:56 | HTML | UTF-8 | Python | false | false | 752 | py | from django.contrib import admin
from .models import *
class AppAdmin(admin.ModelAdmin):
model = App
list_display = ("name", "trusted", "callback_url", "key", "secret")
admin.site.register(App, AppAdmin)
class CredentialsInline(admin.TabularInline):
model = Credentials
readonly_fields = ( "app", "user_has_authorized", "token", )
extra = 0
class NetworkUserAdmin(admin.ModelAdmin):
model = NetworkUser
readonly_fields = ("user", "uuid", )
list_display = ("user", "number_of_apps",)
inlines = [CredentialsInline, ]
admin.site.register(NetworkUser, NetworkUserAdmin)
class CredentialsAdmin(admin.ModelAdmin):
model = Credentials
readonly_fields = ( "token", "date_joined", )
admin.site.register(Credentials, CredentialsAdmin)
| [
"barnaby.brachamul@gmail.com"
] | barnaby.brachamul@gmail.com |
21bb27820c75c147f94f451e5b09b11aa42f6dbc | a73fd25dd9a8e6df0b1bf3eee0bccf5297722bc7 | /]tasks/2018.01.26.make_purge_road_ds/purged_road_test.py | b51732b72009e0187128f0a3175859c76936bfc9 | [] | no_license | bohaohuang/sis | 23d0260d85903b62518fb8fb588661597248ad0d | 28a59f3182f0ba58ba582449377c6588af1d4cde | refs/heads/master | 2021-05-05T17:00:33.808099 | 2019-09-06T17:46:02 | 2019-09-06T17:46:02 | 117,362,230 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 818 | py | import os
import imageio
import numpy as np
import matplotlib.pyplot as plt
patchDir2 = r'/hdd/uab_datasets/Results/PatchExtr/inria/chipExtrRand0_cSz224x224_pad0'
files = os.path.join(patchDir2, 'fileList.txt')
with open(files, 'r') as f:
file_list = f.readlines()
files = os.path.join(r'/media/lab/Michael(01)/chipExtrRegPurge_cSz572x572_pad184', 'state.txt')
with open(files, 'r') as f:
text = f.readlines()
print(text)
'''for i in file_list[:5]:
file_array = i.strip().split(' ')
rgb = []
for file in file_array[:3]:
img = imageio.imread(os.path.join(patchDir2, file))
rgb.append(img)
rgb = np.dstack(rgb)
gt = imageio.imread(os.path.join(patchDir2, file_array[-1]))
plt.subplot(121)
plt.imshow(rgb)
plt.subplot(122)
plt.imshow(gt)
plt.show()'''
| [
"bohao.huang@duke.edu"
] | bohao.huang@duke.edu |
b2f0c12b65c751c60b90d88d52ace5920b128e1d | 35f2fafdc401b6a055d7d236fd1a5c619b6567df | /users/models.py | 781e6d3ddd30db5b26262f98d80bdcb1a5e68729 | [] | no_license | lannyMa/zmr_form2 | d8999d3605cf6ef0aee53b91599db3d5e91ddfc2 | e02743231d1df98e25c7c321bae82b01ebcaae83 | refs/heads/master | 2021-07-18T11:20:19.914460 | 2017-10-26T10:25:17 | 2017-10-26T10:25:17 | 108,396,661 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,484 | py | from datetime import datetime
from django.contrib.auth.models import AbstractUser
from django.db import models
# Create your models here.
class UserProfile(AbstractUser):
image = models.ImageField(upload_to="image/%Y/%m",default="image/default.png",verbose_name="头像")
nick_name = models.CharField(max_length=50,default="",verbose_name="昵称")
gender = models.CharField(max_length=50,choices=(("femail","女"),("male","男")), default="femail",verbose_name="性别")
birth = models.DateField(null=True,blank=True,verbose_name="生日")
address = models.CharField(max_length=100,default="",verbose_name="地址")
mobile = models.CharField(max_length=13,verbose_name="手机")
class Meta:
verbose_name = "用户信息"
verbose_name_plural = verbose_name
def __str__(self):
return self.username
class EmailVerifyRecord(models.Model):
code = models.CharField(max_length=20,verbose_name="验证码类型")
email = models.EmailField(max_length=30,verbose_name="邮箱")
send_type = models.CharField(max_length=30,choices=(("register","注册"),("forget","找回密码"),("update","修改邮箱")),default="register",verbose_name="发送类型")
send_time = models.DateField(default=datetime.now,verbose_name="添加时间")
class Meta:
verbose_name = "邮箱验证码"
verbose_name_plural = verbose_name
def __str__(self):
return "{0}({1})".format(self.code,self.email)
| [
"iher@foxmail.com"
] | iher@foxmail.com |
919360772cff2ff788f446d26bf11cbde56b7805 | dc437674926f7402da4de3ea4022da37932aaffd | /studentproject/post/admin.py | 2a3ec02467b1509ef6750978aeee6360170098cc | [] | no_license | kstar0102/Datasource | ec188029ed6fdefcda13b49fffc0496f9cbd6277 | 92bb1f4f9f1cfe9dd4c25c220cf503cb1de2ba68 | refs/heads/master | 2023-04-24T17:37:27.670492 | 2021-04-22T07:39:36 | 2021-04-22T07:39:36 | 360,366,639 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 136 | py | from django.contrib import admin
from .models import *
# Register your models here.
admin.site.register(Post)
admin.site.register(Like)
| [
"yourname@email.com"
] | yourname@email.com |
60a5cc01c4078a8fac44bbfd6e9fc314eddbd9cd | e55480007fde8acea46fe8eeb3ee7193c25ba113 | /tests/test_ds/test_graph_subject/chapter_04/test_is_tree.py | 8ea2c200572eb20635412bb520bb3f7bd67a6172 | [] | no_license | Annihilation7/Ds-and-Al | 80301bf543ec2eb4b3a9810f5fc25b0386847fd3 | a0bc5f5ef4a92c0e7a736dcff77df61d46b57409 | refs/heads/master | 2020-09-24T05:04:41.250051 | 2020-02-15T10:31:10 | 2020-02-15T10:31:10 | 225,669,366 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,273 | py | # -*- coding: utf-8 -*-
# Email: 763366463@qq.com
# Created: 2020-02-11 09:36pm
from src.ds.graph_subject.chapter_02 import my_adj_set, adj_matrix
from src.ds.graph_subject.chapter_04 import is_tree
import unittest
class Test_IsTree(unittest.TestCase):
def setUp(self) -> None:
self.test_adj_matrix1 = is_tree.IsTree(
adj_matrix.AdjMatrix('src/ds/graph_subject/data/g2.txt')
)
self.test_adj_matrix2 = is_tree.IsTree(
adj_matrix.AdjMatrix('src/ds/graph_subject/data/g3.txt')
)
self.test_adj_set1 = is_tree.IsTree(
my_adj_set.MyAdjSet('src/ds/graph_subject/data/g2.txt')
)
self.test_adj_set2 = is_tree.IsTree(
my_adj_set.MyAdjSet('src/ds/graph_subject/data/g3.txt')
)
def test_all(self):
# 由于5是孤立点,自成连通分量,所以这里返回的应该都是False
print('基于邻接矩阵的图:')
print(self.test_adj_matrix1.is_tree())
print(self.test_adj_matrix2.is_tree())
print('=' * 20, '华丽分割线', '=' * 20)
print('基于邻接表的图:')
print(self.test_adj_set1.is_tree())
print(self.test_adj_set2.is_tree())
if __name__ == '__main__':
unittest.main() | [
"763366463@qq.com"
] | 763366463@qq.com |
a0319a975466da4555acd1c7f72db4566235dbd5 | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /hpJsoWBBHWKZ9NcAi_20.py | a8ff8faf0dbc2ff4752f68ec485a8e5bd8531d3f | [] | no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,710 | py | """
In the world of birding there are four-letter codes for the common names of
birds. These codes are created by some simple rules:
* If the bird's name has only one word, the code takes the first four letters of that word.
* If the name is made up of two words, the code takes the first two letters of each word.
* If the name is made up of three words, the code is created by taking the first letter from the first two words and the first two letters from the third word.
* If the name is four words long, the code uses the first letter from all the words.
There are other ways codes are created, but this challenge will only use the
four rules listed above.
In this challenge you will write a function that takes a list of strings of
common bird names and create the codes for those names based on the rules
above. The function will return a list of codes in the same order in which the
input names were presented.
### Examples
bird_code(["Black-Capped Chickadee", "Common Tern"]) ➞ ["BCCH", "COTE"]
bird_code(["American Redstart", "Northern Cardinal"]) ➞ ["AMRE","NOCA"]
bird_code(["Bobolink", "American White Pelican"]) ➞ ["BOBO","AWPE"]
### Notes
* The four-letter codes in the returned list should be in UPPER CASE.
* If a common name has a hyphen/dash, it should be considered a space.
"""
import re
def bird_code(lst):
A=[re.split('[\-\s]',x) for x in lst]
B=[]
for x in A:
if len(x)==1:
B.append(x[0][:4].upper())
elif len(x)==2:
B.append((x[0][:2]+x[-1][:2]).upper())
elif len(x)==3:
B.append((x[0][0]+x[1][0]+x[2][:2]).upper())
else:
B.append((x[0][0]+x[1][0]+x[2][0]+x[3][0]).upper())
return B
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
90e79259099914f41a2ae73355cadc9e89d537bc | 28a462a28f443c285ca5efec181ebe36b147c167 | /tests/compile/basic/es2019/PutValue.spec | 82f9ea18648132fc2116887adffdb4b33ca2706b | [
"BSD-3-Clause",
"BSD-2-Clause"
] | permissive | kaist-plrg/jstar | 63e71f9156860dc21cccc33a9f6c638dfee448ea | 1282919127ea18a7e40c7a55e63a1ddaaf7d9db4 | refs/heads/main | 2022-07-22T08:12:34.947712 | 2022-02-27T04:19:33 | 2022-02-27T11:06:14 | 384,045,526 | 6 | 4 | NOASSERTION | 2022-02-27T11:05:26 | 2021-07-08T07:53:21 | Python | UTF-8 | Python | false | false | 1,196 | spec | 1. ReturnIfAbrupt(_V_).
1. ReturnIfAbrupt(_W_).
1. If Type(_V_) is not Reference, throw a *ReferenceError* exception.
1. Let _base_ be GetBase(_V_).
1. If IsUnresolvableReference(_V_) is *true*, then
1. If IsStrictReference(_V_) is *true*, then
1. Throw a *ReferenceError* exception.
1. Let _globalObj_ be GetGlobalObject().
1. Return ? Set(_globalObj_, GetReferencedName(_V_), _W_, *false*).
1. Else if IsPropertyReference(_V_) is *true*, then
1. If HasPrimitiveBase(_V_) is *true*, then
1. Assert: In this case, _base_ will never be *undefined* or *null*.
1. Set _base_ to ! ToObject(_base_).
1. Let _succeeded_ be ? _base_.[[Set]](GetReferencedName(_V_), _W_, GetThisValue(_V_)).
1. If _succeeded_ is *false* and IsStrictReference(_V_) is *true*, throw a *TypeError* exception.
1. Return.
1. Else _base_ must be an Environment Record,
1. Return ? _base_.SetMutableBinding(GetReferencedName(_V_), _W_, IsStrictReference(_V_)) (see <emu-xref href="#sec-environment-records"></emu-xref>). | [
"h2oche22@gmail.com"
] | h2oche22@gmail.com |
b034390970ca7665154b9ff7554141897cd63861 | fd64e364368bcb2cdcf77ab1e0fc234a6b698f69 | /Python/Beginner/CATSDOGS.py | 4c85396cd6c96559c3c085683f329d3f416ad4ff | [] | no_license | Parizval/CodeChefCodes | 57712069f3d56cc42282f9e35c6ddd9398e4a5bf | cfd2876816be806882650b6ea51431b1f8d6bec5 | refs/heads/master | 2021-07-16T13:10:15.668713 | 2020-07-06T21:40:09 | 2020-07-06T21:40:09 | 188,693,667 | 5 | 1 | null | null | null | null | UTF-8 | Python | false | false | 450 | py | for a in range(int(input())):
C,D,L = map(int,input().split())
check = True
if L % 4 != 0 :
check = False
else:
animals = L //4
upperlimit = D + C
remainder = C - 2*D
if remainder < 0 :
remainder = 0
lowerlimit = D + remainder
if animals < lowerlimit or animals > upperlimit:
check = False
if check:
print("yes")
else:
print("no") | [
"anmolgoyal@gmail.com"
] | anmolgoyal@gmail.com |
8897cd5753b7e6de917d400cf7fff05f75fe2ae7 | 9431bba2d148f8aef9c0a8f3ca16fcf875890757 | /tools/snippets/callexecute.py | d020c8d54598d30b17ad2f2e36d6f1c5d51cb87b | [
"MIT"
] | permissive | terasakisatoshi/pythonCodes | fba0b78414b2c85f4a738200354ea583f0516768 | 953210c06e9885a7c885bc01047715a77de08a1a | refs/heads/master | 2023-05-14T12:30:22.201711 | 2023-05-07T13:41:22 | 2023-05-07T13:41:22 | 197,893,702 | 2 | 1 | MIT | 2022-11-25T10:59:52 | 2019-07-20T07:09:12 | Jupyter Notebook | UTF-8 | Python | false | false | 416 | py | import subprocess
import sys
from os.path import join
def main():
argv=sys.argv
argc=len(argv)
print ("argv=%s"%argv)
print ("argc=%d"%argc)
if(argc==2):
exename=argv[1]
path ="hoge"
command=exename+" "+join(".",path)
echo="echo "+command
subprocess.call(echo,shell=True)
subprocess.call(command,shell=True)
if __name__ == '__main__':
main() | [
"terasakisatoshi.math@gmail.com"
] | terasakisatoshi.math@gmail.com |
42dd946ad5766301d51e0817e59acc4c05619a40 | 8cf0844cfc26f32726ea787100528aea9a63427c | /flask_app/app_start.py | 6fe44cf4502b62ed22ca7eb5655a1d62a1e129d6 | [] | no_license | lcgkiller/firstFlask | 8d8aa2987381dd599522391a82f5c1e53cda48fc | 4cfb3b8093e1e118aecebdcd9945edae02226ccc | refs/heads/master | 2020-03-13T21:47:43.241979 | 2018-04-27T16:02:51 | 2018-04-27T16:02:51 | 131,304,087 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 414 | py | from flask import Flask, request, redirect, url_for, send_from_directory, render_template
app = Flask(__name__)
app.debug = True
# Routes
@app.route('/', methods=['GET'])
def root():
return render_template('index.html')
@app.route('/<path:path>')
def static_prox(path):
return app.send_static_file(path)
if __name__ == "__main__":
app.run()
# app.run(host="0.0.0.0", port=80, threaded=True)
| [
"lcgkiller@gmail.com"
] | lcgkiller@gmail.com |
11a9567b55ce1e148141783bb21cb23f10aeca3c | b24ce5acced59ef367a20706949953f3ea81d57a | /tensorflow/contrib/learn/python/learn/learn_runner.py | 183ab438b6f2f658da98d6f655c97b4a59ac9a06 | [
"Apache-2.0"
] | permissive | BoldizsarZopcsak/Image-Classifier | b57dd3b72cf368cc1d66a5e318003a2a2d8338a4 | c0d471a55a70b3118178488db3c005a9277baade | refs/heads/master | 2022-11-19T12:28:49.625532 | 2018-01-20T15:48:48 | 2018-01-20T15:48:48 | 118,253,026 | 1 | 1 | Apache-2.0 | 2022-11-01T09:24:24 | 2018-01-20T15:04:57 | Python | UTF-8 | Python | false | false | 6,712 | py | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Runs an Experiment."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.framework.python.framework import experimental
from tensorflow.contrib.learn.python.learn.estimators import run_config
from tensorflow.contrib.learn.python.learn.experiment import Experiment
from tensorflow.python.platform import tf_logging as logging
# TODO(xiejw): Refactor the learn_runner to make code reusable.
def _execute_schedule(experiment, schedule):
"""Execute the method named `schedule` of `experiment`."""
if not hasattr(experiment, schedule):
logging.error('Schedule references non-existent task %s', schedule)
valid_tasks = [x for x in dir(experiment)
if not x.startswith('_')
and callable(getattr(experiment, x))]
logging.error('Allowed values for this experiment are: %s', valid_tasks)
raise ValueError('Schedule references non-existent task %s' % schedule)
task = getattr(experiment, schedule)
if not callable(task):
logging.error('Schedule references non-callable member %s', schedule)
valid_tasks = [x for x in dir(experiment)
if not x.startswith('_')
and callable(getattr(experiment, x))]
logging.error('Allowed values for this experiment are: %s', valid_tasks)
raise TypeError('Schedule references non-callable member %s' % schedule)
return task()
def run(experiment_fn, output_dir, schedule=None):
"""Make and run an experiment.
It creates an Experiment by calling `experiment_fn`. Then it calls the
function named as `schedule` of the Experiment.
If schedule is not provided, then the default schedule for the current task
type is used. The defaults are as follows:
* 'ps' maps to 'serve'
* 'worker' maps to 'train'
* 'master' maps to 'local_run'
If the experiment's config does not include a task type, then an exception
is raised.
Example:
```
def _create_my_experiment(output_dir):
return tf.contrib.learn.Experiment(
estimator=my_estimator(model_dir=output_dir),
train_input_fn=my_train_input,
eval_input_fn=my_eval_input)
learn_runner.run(
experiment_fn=_create_my_experiment,
output_dir="some/output/dir",
schedule="train")
```
Args:
experiment_fn: A function that creates an `Experiment`. It should accept an
argument `output_dir` which should be used to create the `Estimator`
(passed as `model_dir` to its constructor). It must return an
`Experiment`.
output_dir: Base output directory.
schedule: The name of the method in the `Experiment` to run.
Returns:
The return value of function `schedule`.
Raises:
ValueError: If `output_dir` is empty, `schedule` is None but no task
type is set in the built experiment's config, the task type has no
default, or `schedule` doesn't reference a member of `Experiment`.
TypeError: `schedule` references non-callable member.
"""
if not output_dir:
raise ValueError('Must specify an output directory')
if not callable(experiment_fn):
raise TypeError('Experiment builder "%s" is not callable.' %
experiment_fn)
# Call the builder
experiment = experiment_fn(output_dir=output_dir)
if not isinstance(experiment, Experiment):
raise TypeError('Experiment builder did not return an Experiment '
'instance, got %s instead.' % type(experiment))
# Get the schedule
config = experiment.estimator.config
schedule = schedule or _get_default_schedule(config)
return _execute_schedule(experiment, schedule)
@experimental
def tune(experiment_fn, tuner):
"""Tune an experiment with hyper-parameters.
It iterates trials by running the Experiment for each trial with the
corresponding hyper-parameters. For each trial, it retrieves the
hyper-parameters from `tuner`, creates an Experiment by calling experiment_fn,
and then reports the measure back to `tuner`.
Example:
```
def _create_my_experiment(config, hparams):
hidden_units = [hparams.unit_per_layer] * hparams.num_hidden_layers
return tf.contrib.learn.Experiment(
estimator=DNNClassifier(config=config, hidden_units=hidden_units),
train_input_fn=my_train_input,
eval_input_fn=my_eval_input)
tuner = create_tuner(study_configuration, objective_key)
learn_runner.tune(experiment_fn=_create_my_experiment, tuner)
```
Args:
experiment_fn: A function that creates an `Experiment`. It should accept an
argument `config` which should be used to create the `Estimator` (passed
as `config` to its constructor), and an argument `hparams`, which should
be used for hyper-parameters tuning. It must return an `Experiment`.
tuner: A `Tuner` instance.
"""
while tuner.next_trial():
tuner.run_experiment(experiment_fn)
def _is_distributed(config):
"""Returns true if this is a distributed job."""
if not config.cluster_spec:
return False
# This is considered a distributed job if there is more than one task
# in the cluster spec.
task_count = 0
for job in config.cluster_spec.jobs:
for _ in config.cluster_spec.job_tasks(job):
task_count += 1
return task_count > 1
def _get_default_schedule(config):
"""Returns the default schedule for the provided RunConfig."""
if not config or not _is_distributed(config):
return 'train_and_evaluate'
if not config.task_type:
raise ValueError('Must specify a schedule')
if config.task_type == run_config.TaskType.MASTER:
# TODO(rhaertel): handle the case where there is more than one master
# or explicitly disallow such a case.
return 'train_and_evaluate'
elif config.task_type == run_config.TaskType.PS:
return 'run_std_server'
elif config.task_type == run_config.TaskType.WORKER:
return 'train'
raise ValueError('No default schedule for task type: %s' % (config.task_type))
| [
"zboldi@gmail.com"
] | zboldi@gmail.com |
276580429593813de66a2a00be17ca56381aed29 | beb4c1dd9077f11ebd7aca407b272acbc780aa3c | /natureShare/natureShare/asgi.py | 353dbf25eec8bcec3d755238860bcb12f1dbfeb5 | [] | no_license | jpchato/nature-share | f89cfc1295648e9a0b03b7281d72d9179357b0be | 2cb952e72ea43c45884cd4a6c4b39c32936fe612 | refs/heads/master | 2023-02-19T06:46:01.213253 | 2021-01-15T19:10:24 | 2021-01-15T19:10:24 | 317,411,534 | 1 | 0 | null | 2021-01-13T20:23:29 | 2020-12-01T03:15:01 | Python | UTF-8 | Python | false | false | 399 | py | """
ASGI config for natureShare project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'natureShare.settings')
application = get_asgi_application()
| [
"jpchato@gmail.com"
] | jpchato@gmail.com |
fe568ab2ae061bd22fdba789143115d98757ce79 | 9fb1c85a6d39c08e2a3cc235335bc482ad909b71 | /prowler/providers/aws/services/apigateway/apigateway_endpoint_public/apigateway_endpoint_public.py | dd48255b14d584610fe0c0b2327022a71b29afe2 | [
"Apache-2.0"
] | permissive | muharihar/prowler | 06dbdeaa0696dd65d72c33ff3c9f957b97f83d7a | 25c9bc07b219cc02004cc0b84adcfdcf18d5ad2b | refs/heads/master | 2023-02-18T01:26:54.161003 | 2023-02-10T11:38:13 | 2023-02-10T11:38:13 | 238,623,868 | 0 | 0 | null | 2020-02-06T06:36:36 | 2020-02-06T06:36:35 | null | UTF-8 | Python | false | false | 945 | py | from prowler.lib.check.models import Check, Check_Report_AWS
from prowler.providers.aws.services.apigateway.apigateway_client import (
apigateway_client,
)
class apigateway_endpoint_public(Check):
def execute(self):
findings = []
for rest_api in apigateway_client.rest_apis:
report = Check_Report_AWS(self.metadata())
report.region = rest_api.region
report.resource_id = rest_api.name
report.resource_arn = rest_api.arn
if rest_api.public_endpoint:
report.status = "FAIL"
report.status_extended = f"API Gateway {rest_api.name} ID {rest_api.id} is internet accesible."
else:
report.status = "PASS"
report.status_extended = (
f"API Gateway {rest_api.name} ID {rest_api.id} is private."
)
findings.append(report)
return findings
| [
"noreply@github.com"
] | muharihar.noreply@github.com |
af8af055ae6e1ed2806b5c3f803bf49f74269d9c | a86293a2033c06410aa8ed19bcbce8ca55ea3c55 | /src/client_libraries/python/dynamics/customerinsights/api/models/hierarchy_dependency.py | fb99c2e6f2cd0cec933998fef17969965b30fc13 | [
"LicenseRef-scancode-generic-cla",
"MIT"
] | permissive | ramotheonly/Dynamics365-CustomerInsights-Client-Libraries | a3ca28aa78d2b5509e65d9895ff4a0d42d05f611 | e00632f7972717b03e0fb1a9e2667e8f9444a0fe | refs/heads/main | 2023-08-02T08:09:04.063030 | 2021-09-28T22:42:15 | 2021-09-28T22:42:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,346 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class HierarchyDependency(Model):
"""Represents metadata for a Hierarchy Dependency.
:param source_entity: Gets the source entities fully qualified name.
:type source_entity: str
:param account_id_attribute: Gets entity account Id.
:type account_id_attribute: str
:param parent_account_id_attribute: Gets parent account id.
:type parent_account_id_attribute: str
"""
_attribute_map = {
'source_entity': {'key': 'sourceEntity', 'type': 'str'},
'account_id_attribute': {'key': 'accountIdAttribute', 'type': 'str'},
'parent_account_id_attribute': {'key': 'parentAccountIdAttribute', 'type': 'str'},
}
def __init__(self, **kwargs):
super(HierarchyDependency, self).__init__(**kwargs)
self.source_entity = kwargs.get('source_entity', None)
self.account_id_attribute = kwargs.get('account_id_attribute', None)
self.parent_account_id_attribute = kwargs.get('parent_account_id_attribute', None)
| [
"michaelajohnston@mac.com"
] | michaelajohnston@mac.com |
a6ce5cb881f64be94cf0f5da6e0511a2849f7d7e | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03986/s472536811.py | 0e513eaef16d85aa0b34b6fd2545952c6d79a389 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 283 | py | x = input()
cs = 0
ct = 0
prev = ""
n = len(x)
cnt = 0
for i in range(n):
if x[i] =="S":
cs+=1
elif x[i] =="T":
if cs>0:
ct+=1
else:
cs = 0
ct = 0
if cs>0 and ct>0:
cnt+=1
cs-=1
ct-=1
print(n-cnt*2)
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
6e3be21352074c547520d6711374fad2530e1908 | 63481ad34cca1a90c09819dd5f65fae490e59240 | /AddColumn.py | 7be5e9fd1172002b875c15d78d4ed93c2b519ecc | [] | no_license | icequeen5931/SmartSheets | 8d6ca26e269c83986ba302648e9c532ebdd6970d | 9c26269670f5b5679d08f9cb2bc765bb7753461c | refs/heads/master | 2021-05-11T21:36:01.269525 | 2017-01-12T15:37:50 | 2017-01-12T15:37:50 | 117,471,837 | 0 | 1 | null | 2018-01-14T22:25:12 | 2018-01-14T22:25:12 | null | UTF-8 | Python | false | false | 560 | py | __author__ = 'jpisano'
import requests
import json
sheetid = '4816554870237060' # "test" Sheet ID
rowid = '4542989902079876' # row number 4
customer_col = '4113607471458180' # Customer name
url = 'https://api.smartsheet.com/2.0/sheets/' + sheetid + '/columns'
myheader = {'Authorization': 'Bearer 519zl07z3k1uef6rfjxqqm5630', 'Content-Type': 'application/json'}
response = requests.post (url,headers=myheader,json={"index": "5", "title": "my1stcol", "type": "TEXT_NUMBER"})
print (response.url)
print (response.content)
data = json.loads(response.text)
| [
"jpisano@cisco.com"
] | jpisano@cisco.com |
636ceb065354c55694cf707a2445c3403708f5a4 | cccf8da8d41ae2c14f5f4313c1edcf03a27956bb | /python/python2latex/writeLTXparbox.py | 10108a1f33c2e2ce95c5acb886bfbdd1ba13e27d | [] | no_license | LucaDiStasio/transpilers | e8f8ac4d99be3b42a050148ca8fbc5d025b83290 | c55d4f5240083ffd512f76cd1d39cff1016909b8 | refs/heads/master | 2021-01-12T01:57:00.540331 | 2017-11-01T13:59:55 | 2017-11-01T13:59:55 | 78,448,378 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,309 | py | # Autogenerated with SMOP
from smop.core import *
#
@function
def writeLTXparbox(filepath=None,args=None,options=None,*args,**kwargs):
varargin = writeLTXparbox.varargin
nargin = writeLTXparbox.nargin
##
#==============================================================================
# Copyright (c) 2016-2017 Universite de Lorraine & Lulea tekniska universitet
# Author: Luca Di Stasio <luca.distasio@gmail.com>
# <luca.distasio@ingpec.eu>
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the distribution
# Neither the name of the Universite de Lorraine or Lulea tekniska universitet
# nor the names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#==============================================================================
# DESCRIPTION
#
# A function to create a Latex file.
# Defines a box whose contents are created in paragraph mode. SeeBoxes.#
##
fileId=fopen(filepath,'a')
fprintf(fileId,'\\n')
line='\\parbox'
if logical_not(strcmp(options,'none')) and logical_not(strcmp(options,'NONE')) and logical_not(strcmp(options,'None')):
line=strcat(line,'[',options,']')
if logical_not(isempty(args)):
line=strcat(line,'{')
for i in arange(1,length(args)).reshape(-1):
dims=size(args)
if dims[1] == 1 and dims[2] == 1:
line=strcat(line,args[i])
else:
if dims[1] > 1 and dims[2] == 1:
try:
line=strcat(line,args[i][1])
finally:
pass
else:
if dims[1] == 1 and dims[2] > 1:
try:
line=strcat(line,args[1][i])
finally:
pass
else:
line=strcat(line,args[i])
line=strcat(line,'}')
fprintf(fileId,strcat(line,'\\n'))
fclose(fileId)
return | [
"luca.distasio@gmail.com"
] | luca.distasio@gmail.com |
82a1071af62dab8396e2f20057846ed7b6b6ca47 | c6af5dcdb1a3cd9d20abdf50c5571836a1b76298 | /servlets/login.py | 54bbbd9d129286c5db8dc6fab7dddadd00a6e83b | [] | no_license | mikelambert/dancedeets | 82b1cb0c32b14485cd9cbbc051421d1cb7499830 | 8dd51007bb2faa56d835a149b60740141d472c25 | refs/heads/master | 2021-01-21T00:30:09.963623 | 2016-11-29T12:04:00 | 2016-11-29T12:04:00 | 42,857,923 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,058 | py | #!/usr/bin/env python
import logging
import app
import base_servlet
from logic import mobile
from users import users
@app.route('/login')
class LoginHandler(base_servlet.BaseRequestHandler):
def requires_login(self):
return False
def is_login_page(self):
return True
# TODO(lambert): move this into the same base / handler, so we don't do stupid redirects to /login
def get(self):
next_url = self.request.get('next') or '/'
# If they're logged in, and have an account created, update and redirect
if self.fb_uid:
user = users.User.get_by_id(self.fb_uid)
if user and not user.expired_oauth_token:
self.redirect(next_url)
return
want_specific_page = (next_url != '/?')
if want_specific_page:
self.display['next'] = next_url
self.display['suppress_promos'] = True
logging.info(self.display['next'])
self.render_template('login_only')
return
# Treat them like a totally logged-out user since they have no user object yet
self.fb_uid = None
# Explicitly do not preload anything from facebook for this servlet
# self.finish_preload()
self.display['user_message'] = self.get_cookie('User-Message')
from util import country_dialing_codes
self.display['suppress_promos'] = True
self.display['country_codes'] = sorted(country_dialing_codes.mapping.items())
self.display['android_url'] = mobile.ANDROID_URL
self.display['ios_url'] = mobile.IOS_URL
self.display['prefix'] = ''
self.display['phone'] = '' # Set the default, and then let any errors-and-refilling occur on /mobile_apps
self.display['mobile_show_smartbanner'] = False
self.display['next'] = next_url
logging.info(self.display['next'])
if bool(self.request.get('nd', 1)):
self.render_template('new_homepage')
else:
self.render_template('login')
| [
"mlambert@gmail.com"
] | mlambert@gmail.com |
0031184772a691b823929ac81fe865d20d594792 | d5b6b19ab192180ae1e04eff99a37f629e1feb10 | /goods/sellgoods/salesquantity/local_util/sales_util.py | 124a73cd88e88090953229c4709c3d6c998ff4ab | [] | no_license | maxenergy/goodsdl2 | 9e88dd499fa4c6d536e4444839e7fbe549c7070a | 42d0eb797e9710ca85d885e6b4d0ed97cbf88607 | refs/heads/master | 2022-12-05T12:34:18.061604 | 2020-09-03T04:28:29 | 2020-09-03T04:28:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,200 | py | from set_config import config
from goods.sellgoods.salesquantity.utils import mysql_util
from goods.sellgoods.sql import sales_quantity
import time
ai = config.ai
def get_predict_sales(shop_ids):
mysql_ins = mysql_util.MysqlUtil(ai)
sql = sales_quantity.sql_params["sales_ai"]
exe_time = str(time.strftime('%Y-%m-%d', time.localtime()))
exe_time = str("'"+exe_time+"'")
if len(shop_ids) == 1:
shop_ids = str("( "+str(shop_ids[0])+" )")
elif(len(shop_ids) > 1):
shop_ids = str(tuple(shop_ids))
sql = sql.format(shop_ids,exe_time)
print (sql)
results = mysql_ins.selectAll(sql)
shop_ids = []
upcs = []
predict_sales = []
for row in results:
shop_id = row[0]
upc = row[1]
predict_sale = row[2]
shop_ids.append(shop_id)
upcs.append(upc)
predict_sales.append(predict_sale)
shop_upc_sales = {}
for shop_id in list(set(shop_ids)):
upc_sales = {}
for shop_id1,upc,predict_sale in zip(shop_ids,upcs,predict_sales):
if shop_id == shop_id1:
upc_sales[upc] = predict_sale
shop_upc_sales[shop_id] = upc_sales
return shop_upc_sales
| [
"908601417@qq.com"
] | 908601417@qq.com |
2e1d77bc5d60ab64e8e3ec36b17200fe8b53f725 | e9e3169d354c840104595dcd660cd16d7d56f72e | /dz5_asp/task1.py | dbf086d9af3d3fb70c51908923f24d927c786782 | [] | no_license | davendiy/combinatorics_2course | f8d08c9164fa544662c86e254f3a7181928db3a1 | b18618335812c3a185a94be8fbbc8f28fd2dea78 | refs/heads/master | 2020-04-05T09:58:37.904216 | 2019-01-11T21:23:02 | 2019-01-11T21:23:02 | 156,782,776 | 0 | 0 | null | 2018-12-06T22:18:22 | 2018-11-08T23:32:12 | Python | UTF-8 | Python | false | false | 3,977 | py | #!/usr/bin/env python3
# -*-encoding: utf-8-*-
# created: 07.12.18
# by David Zashkolny
# 2 course, comp math
# Taras Shevchenko National University of Kyiv
# email: davendiy@gmail.com
"""The following implementation assumes that the activities
are already sorted according to their finish time
"""
import random
import time
import functools
def cache(func):
""" Decorator for save answers of any function
"""
results = {}
@functools.wraps(func)
def __cache(*args): # changed function
nonlocal results # if this function call with parameters that already used
if args in results.keys(): # then answer gets from dictionary
# print("{} - got from cache".format(args))
rez = results[args]
else:
rez = func(*args)
results[args] = rez
return rez
return __cache
def recursive(s, f):
s = tuple([0] + s + [1000050000])
f = tuple([0] + f + [1000050000])
n = len(f)
return _recursive(s, f, 0, n-1)
@cache
def _recursive(func_s, func_f, i, j):
_max = 0
for k in range(i, j+1):
if func_f[i] <= func_s[k] < func_f[k] <= func_s[j]:
tmp_max = _recursive(func_s, func_f, i, k) + _recursive(func_s, func_f, k, j) + 1
if tmp_max > _max:
_max = tmp_max
return _max
def dynamic(s, f):
""" Dynamic solution of ASP problem. Using recurrent formula
from Kormen.
:param s: An array that contains start time of all activities
:param f: An array that contains finish time of all activities
:return: optimal sequence of indexes
"""
n = len(s)
func_s = [0] + s + [10005000] # adding to arrays of activities a fictive elements
func_f = [0] + f + [10005000]
dp = [[0 for i in range(n+2)] for i in range(n+2)] # dp[i][j] is max activities from i to j
for i in range(n+2):
for j in range(n+2): # fills all positions in dynamic table
_max = 0
for k in range(i, j+1): # go through all activities that might be done between i-th and j-th
if func_f[i] <= func_s[k] < func_f[k] <= func_s[j]:
tmp_max = dp[i][k] + dp[k][j] + 1 # find maximum
if tmp_max > _max:
_max = tmp_max
dp[i][j] = _max
return dp[0][n+1]
def printMaxActivities(s, f):
"""Prints a maximum set of activities that can be done by a
single person, one at a time
:param s: An array that contains start time of all activities
:param f: An array that contains finish time of all activities
:return: optimal sequence of indexes
"""
n = len(f)
print("The following activities are selected")
# The first activity is always selected
i = 0
print(i, end=' ')
# Consider rest of the activities
for j in range(1, n):
# If this activity has start time greater than
# or equal to the finish time of previously
# selected activity, then select it
if s[j] >= f[i]:
print(j, end=' ')
i = j
# Driver program to test above functions
if __name__ == '__main__':
test_s = []
test_f = []
test = []
N = 1000
for count in range(N):
tmp_s = random.randrange(1, N)
tmp_f = random.randrange(tmp_s+1, N+1)
test.append((tmp_f, tmp_s))
test.sort()
for el in test:
test_s.append(el[1])
test_f.append(el[0])
print(test_s)
print(test_f)
print(f"n == {N}")
print('\n=====by greedy=====')
t = time.time()
print('result:')
printMaxActivities(test_s, test_f)
print('\ntime elapsed: {}'.format(time.time() - t))
print('\n=====by dynamic=====')
t = time.time()
print('result:\n{}'.format(dynamic(test_s, test_f)))
print('time elapsed: {}'.format(time.time() - t))
# print('\n===by recursive===')
# print(recursive(test_s, test_f))
| [
"davendiy@gmail.com"
] | davendiy@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.