blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2 values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M โ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 220 values | src_encoding stringclasses 30 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 2 10.3M | extension stringclasses 257 values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
06b43121f55062f22988a5b9411f16234b2dd4c9 | a527ab5e3c2cf34e92117d657eda52bcc76e24f8 | /gender.py | d5e7b764b1601b921cb0f6f51492e26b2ef874a0 | [] | no_license | pankajdahilkar/python_codes | 2e4246aa9b725b28f3cccb4c7a4c7094927b8b5a | 56fda0257fb4769da59f2b5c81536f9a5b9ee2db | refs/heads/master | 2021-05-25T17:16:55.765443 | 2020-04-13T07:21:02 | 2020-04-13T07:21:02 | 253,838,761 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 402 | py | import csv
name = input("Enter your name ")
with open("Female.csv",'r', encoding='utf-8') as f:
reader = csv.reader(f)
for row in reader :
for field in row :
if field == name:
print(name,"is girl")
break
else : print("not found")
| [
"pankajmdahilkar@gmail.com"
] | pankajmdahilkar@gmail.com |
fbad71338ab509e1dfd81e377efb259dc9287a74 | 4e092f6a59c0aad5cfe4235baac06f4f9823c62a | /0026. Remove Duplicates from Sorted Array.py | 3628990533211b662c2e9da54cc9ee0e44197a56 | [] | no_license | xinmiaoo/leetcode_Aug_3 | 72091fd7d23f98fb646ffb6f0b1a3068c930ea3f | 732d8aa126dd5149013a4ce084f37ee8bc123ac7 | refs/heads/master | 2020-07-12T04:34:24.506629 | 2019-08-10T21:54:50 | 2019-08-10T21:54:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 411 | py | class Solution(object):
def removeDuplicates(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
if len(nums)<1:
return
if len(nums)==1:
return
i=1
while i<len(nums):
if nums[i]==nums[i-1]:
del nums[i]
else:
i+=1
return
| [
"noreply@github.com"
] | xinmiaoo.noreply@github.com |
11f2fe4d1f01ee2b1139b7b7076221a351cbf9e4 | cb26d3f745628cd113f4f954ceac23ce262afbb5 | /Day4/answer_day4_question2.py | 93ae2c3f566d420feb9820a96e4d196c55632b24 | [] | no_license | nama-aman/Qiskit-Challenge-India-2020 | 88e56982012221b4153dc9d73e4f09508c3d7807 | 038956e5b241868637c06cbbf81928f3f9a1d796 | refs/heads/master | 2022-12-16T00:11:15.135572 | 2020-09-06T12:58:47 | 2020-09-06T12:58:47 | 293,277,220 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 861 | py |
### WRITE YOUR CODE BETWEEN THESE LINES - START
# import libraries that are used in the functions below.
from qiskit import QuantumCircuit
import numpy as np
### WRITE YOUR CODE BETWEEN THESE LINES - END
def init_circuit():
# create a quantum circuit on two qubits
qc = QuantumCircuit(2)
# initializing the circuit
qc.h(0)
qc.x(1)
return qc
# The initial state has been defined above.
# You'll now have to apply necessary gates in the build_state() function to convert the state as asked in the question.
def build_state():
### WRITE YOUR CODE BETWEEN THESE LINES - START
# the initialized circuit
circuit = init_circuit()
circuit.cu3(0,-np.pi/2,0,1,0)
# apply a single cu3 gate
### WRITE YOUR CODE BETWEEN THESE LINES - END
return circuit
| [
"noreply@github.com"
] | nama-aman.noreply@github.com |
1080efe864c0987d212ee7dc9f93c2eb3b2aac02 | ced90a261ca425d86581ff301c21ce08e005c067 | /tranx/datasets/django/dataset.py | 6e4e64b95bf293feb1a5c1198b55baed0eb6137f | [] | no_license | gaoliujie2016/tranx-1 | 0a9727d9f5bf96ea0874b2c723045d734fa824d6 | f9e4de91f678284f2935e2f9c4cdf71a32d8deca | refs/heads/master | 2022-03-08T14:49:42.148163 | 2019-08-27T18:15:21 | 2019-08-27T18:15:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,743 | py | # coding=utf-8
from __future__ import print_function
import torch
import re
import pickle
import ast
import astor
import nltk
import sys
import numpy as np
from asdl.lang.py.py_asdl_helper import python_ast_to_asdl_ast, asdl_ast_to_python_ast
from asdl.lang.py.py_transition_system import PythonTransitionSystem
from asdl.hypothesis import *
from asdl.lang.py.py_utils import tokenize_code
from components.action_info import ActionInfo, get_action_infos
p_elif = re.compile(r'^elif\s?')
p_else = re.compile(r'^else\s?')
p_try = re.compile(r'^try\s?')
p_except = re.compile(r'^except\s?')
p_finally = re.compile(r'^finally\s?')
p_decorator = re.compile(r'^@.*')
QUOTED_STRING_RE = re.compile(r"(?P<quote>['\"])(?P<string>.*?)(?<!\\)(?P=quote)")
def replace_string_ast_nodes(py_ast, str_map):
for node in ast.walk(py_ast):
if isinstance(node, ast.Str):
str_val = node.s
if str_val in str_map:
node.s = str_map[str_val]
else:
# handle cases like `\n\t` in string literals
for key, val in str_map.items():
str_literal_decoded = key.decode('string_escape')
if str_literal_decoded == str_val:
node.s = val
class Django(object):
@staticmethod
def canonicalize_code(code):
if p_elif.match(code):
code = 'if True: pass\n' + code
if p_else.match(code):
code = 'if True: pass\n' + code
if p_try.match(code):
code = code + 'pass\nexcept: pass'
elif p_except.match(code):
code = 'try: pass\n' + code
elif p_finally.match(code):
code = 'try: pass\n' + code
if p_decorator.match(code):
code = code + '\ndef dummy(): pass'
if code[-1] == ':':
code = code + 'pass'
return code
@staticmethod
def canonicalize_str_nodes(py_ast, str_map):
for node in ast.walk(py_ast):
if isinstance(node, ast.Str):
str_val = node.s
if str_val in str_map:
node.s = str_map[str_val]
else:
# handle cases like `\n\t` in string literals
for str_literal, slot_id in str_map.items():
str_literal_decoded = str_literal.decode('string_escape')
if str_literal_decoded == str_val:
node.s = slot_id
@staticmethod
def canonicalize_query(query):
"""
canonicalize the query, replace strings to a special place holder
"""
str_count = 0
str_map = dict()
matches = QUOTED_STRING_RE.findall(query)
# de-duplicate
cur_replaced_strs = set()
for match in matches:
# If one or more groups are present in the pattern,
# it returns a list of groups
quote = match[0]
str_literal = match[1]
quoted_str_literal = quote + str_literal + quote
if str_literal in cur_replaced_strs:
# replace the string with new quote with slot id
query = query.replace(quoted_str_literal, str_map[str_literal])
continue
# FIXME: substitute the ' % s ' with
if str_literal in ['%s']:
continue
str_repr = '_STR:%d_' % str_count
str_map[str_literal] = str_repr
query = query.replace(quoted_str_literal, str_repr)
str_count += 1
cur_replaced_strs.add(str_literal)
# tokenize
query_tokens = nltk.word_tokenize(query)
new_query_tokens = []
# break up function calls like foo.bar.func
for token in query_tokens:
new_query_tokens.append(token)
i = token.find('.')
if 0 < i < len(token) - 1:
new_tokens = ['['] + token.replace('.', ' . ').split(' ') + [']']
new_query_tokens.extend(new_tokens)
query = ' '.join(new_query_tokens)
query = query.replace('\' % s \'', '%s').replace('\" %s \"', '%s')
return query, str_map
@staticmethod
def canonicalize_example(query, code):
canonical_query, str_map = Django.canonicalize_query(query)
query_tokens = canonical_query.split(' ')
canonical_code = Django.canonicalize_code(code)
ast_tree = ast.parse(canonical_code)
Django.canonicalize_str_nodes(ast_tree, str_map)
canonical_code = astor.to_source(ast_tree)
# sanity check
# decanonical_code = Django.decanonicalize_code(canonical_code, str_map)
# decanonical_code_tokens = tokenize_code(decanonical_code)
# raw_code_tokens = tokenize_code(code)
# if decanonical_code_tokens != raw_code_tokens:
# pass
# try:
# ast_tree = ast.parse(canonical_code).body[0]
# except:
# print('error!')
# canonical_code = Django.canonicalize_code(code)
# gold_ast_tree = ast.parse(canonical_code).body[0]
# str_map = {}
# parse_tree = python_ast_to_asdl_ast(gold_ast_tree, grammar)
# gold_source = astor.to_source(gold_ast_tree)
# ast_tree = asdl_ast_to_python_ast(parse_tree, grammar)
# source = astor.to_source(ast_tree)
# assert gold_source == source, 'sanity check fails: gold=[%s], actual=[%s]' % (gold_source, source)
#
# # action check
# parser = PythonTransitionSystem(grammar)
# actions = parser.get_actions(parse_tree)
#
# hyp = Hypothesis()
# for action in actions:
# assert action.__class__ in parser.get_valid_continuation_types(hyp)
# if isinstance(action, ApplyRuleAction):
# assert action in parser.get_valid_continuations(hyp)
# hyp.apply_action(action)
#
# src_from_hyp = astor.to_source(asdl_ast_to_python_ast(hyp.tree, grammar))
# assert src_from_hyp == gold_source
return query_tokens, canonical_code, str_map
@staticmethod
def parse_django_dataset(annot_file, code_file, asdl_file_path, max_query_len=70, vocab_freq_cutoff=10):
asdl_text = open(asdl_file_path).read()
grammar = ASDLGrammar.from_text(asdl_text)
transition_system = PythonTransitionSystem(grammar)
loaded_examples = []
from components.vocab import Vocab, VocabEntry
from components.dataset import Example
for idx, (src_query, tgt_code) in enumerate(zip(open(annot_file), open(code_file))):
src_query = src_query.strip()
tgt_code = tgt_code.strip()
src_query_tokens, tgt_canonical_code, str_map = Django.canonicalize_example(src_query, tgt_code)
python_ast = ast.parse(tgt_canonical_code).body[0]
gold_source = astor.to_source(python_ast).strip()
tgt_ast = python_ast_to_asdl_ast(python_ast, grammar)
tgt_actions = transition_system.get_actions(tgt_ast)
# print('+' * 60)
# print('Example: %d' % idx)
# print('Source: %s' % ' '.join(src_query_tokens))
# if str_map:
# print('Original String Map:')
# for str_literal, str_repr in str_map.items():
# print('\t%s: %s' % (str_literal, str_repr))
# print('Code:\n%s' % gold_source)
# print('Actions:')
# sanity check
hyp = Hypothesis()
for t, action in enumerate(tgt_actions):
assert action.__class__ in transition_system.get_valid_continuation_types(hyp)
if isinstance(action, ApplyRuleAction):
assert action.production in transition_system.get_valid_continuating_productions(hyp)
p_t = -1
f_t = None
if hyp.frontier_node:
p_t = hyp.frontier_node.created_time
f_t = hyp.frontier_field.field.__repr__(plain=True)
print('\t[%d] %s, frontier field: %s, parent: %d' % (t, action, f_t, p_t))
hyp = hyp.clone_and_apply_action(action)
assert hyp.frontier_node is None and hyp.frontier_field is None
src_from_hyp = astor.to_source(asdl_ast_to_python_ast(hyp.tree, grammar)).strip()
assert src_from_hyp == gold_source
print('+' * 60)
loaded_examples.append({'src_query_tokens' : src_query_tokens,
'tgt_canonical_code': gold_source,
'tgt_ast' : tgt_ast,
'tgt_actions' : tgt_actions,
'raw_code' : tgt_code, 'str_map': str_map})
# print('first pass, processed %d' % idx, file=sys.stderr)
train_examples = []
dev_examples = []
test_examples = []
action_len = []
for idx, e in enumerate(loaded_examples):
src_query_tokens = e['src_query_tokens'][:max_query_len]
tgt_actions = e['tgt_actions']
tgt_action_infos = get_action_infos(src_query_tokens, tgt_actions)
example = Example(idx=idx,
src_sent=src_query_tokens,
tgt_actions=tgt_action_infos,
tgt_code=e['tgt_canonical_code'],
tgt_ast=e['tgt_ast'],
meta={'raw_code': e['raw_code'], 'str_map': e['str_map']})
# print('second pass, processed %d' % idx, file=sys.stderr)
action_len.append(len(tgt_action_infos))
# train, valid, test split
if 0 <= idx < 16000:
train_examples.append(example)
elif 16000 <= idx < 17000:
dev_examples.append(example)
else:
test_examples.append(example)
print('Max action len: %d' % max(action_len), file=sys.stderr)
print('Avg action len: %d' % np.average(action_len), file=sys.stderr)
print('Actions larger than 100: %d' % len(list(filter(lambda x: x > 100, action_len))), file=sys.stderr)
src_vocab = VocabEntry.from_corpus([e.src_sent for e in train_examples], size=5000, freq_cutoff=vocab_freq_cutoff)
primitive_tokens = [map(lambda a: a.action.token,
filter(lambda a: isinstance(a.action, GenTokenAction), e.tgt_actions))
for e in train_examples]
primitive_vocab = VocabEntry.from_corpus(primitive_tokens, size=5000, freq_cutoff=vocab_freq_cutoff)
assert '_STR:0_' in primitive_vocab
# generate vocabulary for the code tokens!
code_tokens = [tokenize_code(e.tgt_code, mode='decoder') for e in train_examples]
code_vocab = VocabEntry.from_corpus(code_tokens, size=5000, freq_cutoff=vocab_freq_cutoff)
vocab = Vocab(source=src_vocab, primitive=primitive_vocab, code=code_vocab)
print('generated vocabulary %s' % repr(vocab), file=sys.stderr)
return (train_examples, dev_examples, test_examples), vocab
@staticmethod
def process_django_dataset():
vocab_freq_cutoff = 15 # TODO: found the best cutoff threshold
annot_file = 'data/django/all.anno'
code_file = 'data/django/all.code'
(train, dev, test), vocab = Django.parse_django_dataset(annot_file, code_file,
'asdl/lang/py/py_asdl.txt',
vocab_freq_cutoff=vocab_freq_cutoff)
pickle.dump(train, open('data/django/train.bin', 'w'))
pickle.dump(dev, open('data/django/dev.bin', 'w'))
pickle.dump(test, open('data/django/test.bin', 'w'))
pickle.dump(vocab, open('data/django/vocab.freq%d.bin' % vocab_freq_cutoff, 'w'))
@staticmethod
def run():
asdl_text = open('asdl/lang/py/py_asdl.txt').read()
grammar = ASDLGrammar.from_text(asdl_text)
annot_file = 'data/django/all.anno'
code_file = 'data/django/all.code'
transition_system = PythonTransitionSystem(grammar)
for idx, (src_query, tgt_code) in enumerate(zip(open(annot_file), open(code_file))):
src_query = src_query.strip()
tgt_code = tgt_code.strip()
query_tokens, tgt_canonical_code, str_map = Django.canonicalize_example(src_query, tgt_code)
python_ast = ast.parse(tgt_canonical_code).body[0]
gold_source = astor.to_source(python_ast)
tgt_ast = python_ast_to_asdl_ast(python_ast, grammar)
tgt_actions = transition_system.get_actions(tgt_ast)
# sanity check
hyp = Hypothesis()
hyp2 = Hypothesis()
for action in tgt_actions:
assert action.__class__ in transition_system.get_valid_continuation_types(hyp)
if isinstance(action, ApplyRuleAction):
assert action.production in transition_system.get_valid_continuating_productions(hyp)
hyp = hyp.clone_and_apply_action(action)
hyp2.apply_action(action)
src_from_hyp = astor.to_source(asdl_ast_to_python_ast(hyp.tree, grammar))
assert src_from_hyp == gold_source
assert hyp.tree == hyp2.tree and hyp.tree is not hyp2.tree
print(idx)
@staticmethod
def canonicalize_raw_django_oneliner(code):
# use the astor-style code
code = Django.canonicalize_code(code)
py_ast = ast.parse(code).body[0]
code = astor.to_source(py_ast).strip()
return code
def generate_vocab_for_paraphrase_model(vocab_path, save_path):
from components.vocab import VocabEntry, Vocab
vocab = pickle.load(open(vocab_path))
para_vocab = VocabEntry()
for i in range(0, 10):
para_vocab.add('<unk_%d>' % i)
for word in vocab.source.word2id:
para_vocab.add(word)
for word in vocab.code.word2id:
para_vocab.add(word)
pickle.dump(para_vocab, open(save_path, 'w'))
if __name__ == '__main__':
# Django.run()
# f1 = Field('hahah', ASDLPrimitiveType('123'), 'single')
# rf1 = RealizedField(f1, value=123)
#
# # print(f1 == rf1)
# a = {f1: 1}
# print(a[rf1])
Django.process_django_dataset()
# generate_vocab_for_paraphrase_model('data/django/vocab.freq10.bin', 'data/django/vocab.para.freq10.bin')
# py_ast = ast.parse("""sorted(asf, reverse='k' 'k', k='re' % sdf)""")
# canonicalize_py_ast(py_ast)
# for node in ast.walk(py_ast):
# if isinstance(node, ast.Str):
# print(node.s)
# print(astor.to_source(py_ast))
| [
"alex.dinu07@gmail.com"
] | alex.dinu07@gmail.com |
45a17b83e5063af268f032dd425b660dcf74ebf6 | ae67cd669cb733f0ed7b4b02c1320757c91ebd9f | /lut/__init__.py | ff33cffbf0d228820ebf83cb62a25232e9a5b6ea | [] | no_license | anguelos/lut | bef878485a6ce31195593a504f0a819032e878be | 6898fefc748cbbc323415e96e22c1711dfcce1e1 | refs/heads/master | 2016-09-14T01:29:15.626110 | 2016-05-06T16:50:22 | 2016-05-06T16:50:22 | 58,217,071 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 414 | py | import core
import train
import models
import view
import loss
import layers
import metric
import ds
core=reload(core)
models=reload(models)
view=reload(view)
loss=reload(loss)
layers=reload(layers)
metric=reload(metric)
ds=reload(ds)
train=reload(train)
from core import *
from train import *
from models import *
from view import *
from ds import *
from loss import *
from layers import *
from metric import *
| [
"anguelos.nicolaou@gmail.com"
] | anguelos.nicolaou@gmail.com |
1afefbe7797fabd4b8f9a7ab4164ea1bf07b73e6 | b13c95cb06434a985f9da51575f1eb3fa484cb0c | /helloworld/main.py | 9320a568b423d22b5eaab013607325ea89c83177 | [] | no_license | shandre-github/minikube | aa34294d6355ade16e0155bd5caef6a946840e69 | 94cfe856bf202e90464c2d85ccabe1ce673dbeb2 | refs/heads/master | 2023-03-13T15:11:39.785525 | 2021-02-25T04:53:19 | 2021-02-25T04:53:19 | 341,305,244 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 219 | py | import os
from flask import Flask
app = Flask(__name__)
@app.route('/')
def index():
name = os.environ.get('MY_POD_NAME')
return 'Hello World from {name}'.format(name=name)
app.run(host='0.0.0.0', port=8080) | [
"shpnaruto@gmail.com"
] | shpnaruto@gmail.com |
6ecde04930e4c4d2d909383a6d9ae3c2c8476230 | c089d51e00ace0e2d2c1c08cc7278ea4a43ff8f9 | /classifiers/keras.py | 4ac55c4f41d6fadb0db78e8d3ac6e6246d879ec9 | [] | no_license | salman-kha3/ActiveLearning | 84f088aac802c59edba94c205b4666bfa8e7b15f | 3d6c6c120788ea2fd338cff5d3f5b88b5b8901ec | refs/heads/master | 2021-01-11T20:35:46.977142 | 2017-04-02T12:58:01 | 2017-04-02T12:58:01 | 79,150,978 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,154 | py | import keras
from libact.base.interfaces import ContinuousModel
import numpy as np
class KerasClassifier(ContinuousModel):
def __init__(self, *args, **kwargs):
self.model = keras.models.Sequential(*args, **kwargs)
def train(self, dataset, *args, **kwargs):
return self.model.fit(*(dataset.format_sklearn()+args), **kwargs)
def predict(self, feature, *args, **kwargs):
return self.model.predict(feature, *args, **kwargs)
def score(self, *args, **kwargs):
return self.model.evaluate(*args, **kwargs)
def predict_real(self, feature, *args, **kwargs):
if hasattr(self.model, "decision_function"):
dvalue = self.model.decision_function(feature, *args, **kwargs)
else:
dvalue = self.model.predict_proba(feature, *args, **kwargs)
# [:, 1]
if len(np.shape(dvalue)) == 1: # n_classes == 2
return np.vstack((-dvalue, dvalue)).T
else:
return dvalue
def add(self, *args, **kwargs):
self.model.add(*args, **kwargs)
def compile(self, *args, **kwargs):
self.model.compile(*args, **kwargs)
| [
"salman.khatri3@gmail.com"
] | salman.khatri3@gmail.com |
77f78c54499c89158741dcfdec3c99815d78fd7f | 48399403b64b9d8e52ea573c0cbefb941f33e540 | /server.py | 62775f3592e3fea539d03b16d9b96c25a5d4ac78 | [] | no_license | Henry-Aybar/counter | 97c54c9a3060fb0249ee432ec89163ae796c014e | 8bb089134d502bd334d785751352086340561862 | refs/heads/master | 2023-08-03T09:41:43.420948 | 2021-09-14T20:50:30 | 2021-09-14T20:50:30 | 406,517,407 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 558 | py | from flask import Flask, render_template, request, redirect, session
app = Flask(__name__)
app.secret_key = 'Im just Super Sayin!'
@app.route('/')
def index():
if 'visit' in session:
session['visit'] += 1
else:
session['visit'] = 1
if 'count' in session:
session['count'] += 1
else:
session['count'] = 0
return render_template("index.html")
@app.route('/clear')
def clear_session():
session.clear()
return redirect('/')
if __name__=="__main__":
app.run(debug=True) | [
"aybar.henry.usmc@gmail.com"
] | aybar.henry.usmc@gmail.com |
f4dbe6287f26b3505bbd41c6c3a493e894db9f0e | 43e36f639e69bb3c0ace7b89eee8571d946da1c8 | /scripts/ball_gripping_test.py | 28d81f812fe81d24a1a294c322afcb8d91a80c66 | [] | no_license | mikolak/test_package | 309a07e78b322a0fe378ac595200b473cba0a391 | c9978a440bcdeed54839aeeb1e80795060cc1b5e | refs/heads/master | 2016-09-03T07:24:23.361858 | 2014-09-08T13:36:32 | 2014-09-08T13:36:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,630 | py | #!/usr/bin/env python
import rospy
import tf
import actionlib
import math
from controller_manager_msgs.srv import *
from std_msgs.msg import *
from diagnostic_msgs.msg import *
from geometry_msgs.msg import *
from trajectory_msgs.msg import *
from control_msgs.msg import *
from cartesian_trajectory_msgs.msg import *
from force_control_msgs.msg import *
from tf.transformations import *
import PyKDL
import tf_conversions.posemath as pm
def getDownOrientedQuaternion():
real_angle = math.pi * 1 #180 stopni to pionowo w dol
v_x = 0.0
v_y = -1
v_z = 0
angle = 0.5 * real_angle
_sin = math.sin(angle)
x = _sin * v_x
y = _sin * v_y
z = _sin * v_z
w = math.cos(angle)
return Quaternion(x, y, z, w)
if __name__ == '__main__':
rospy.init_node('simple_trajectory_test')
rospy.wait_for_service('/controller_manager/switch_controller')
conManSwitch = rospy.ServiceProxy('/controller_manager/switch_controller', SwitchController)
#------------------------------------------------
# Stawy
#------------------------------------------------
conManSwitch(['Irp6pmSplineTrajectoryGeneratorJoint'], [], True)
client = actionlib.SimpleActionClient('/irp6p_arm/spline_trajectory_action_joint', FollowJointTrajectoryAction)
client.wait_for_server()
print 'Inicjacja postawy'
goal = FollowJointTrajectoryGoal()
goal.trajectory.joint_names = ['joint1', 'joint2', 'joint3', 'joint4', 'joint5', 'joint6']
goal.trajectory.points.append(JointTrajectoryPoint([0, -0.5 * math.pi, 0, 0, 1.45 * math.pi, -0.5 * math.pi], [], [], [], rospy.Duration(6.0)))
goal.trajectory.header.stamp = rospy.get_rostime() + rospy.Duration(0.2)
client.send_goal(goal)
client.wait_for_result()
command_result = client.get_result()
#=====================================================
conManSwitch(['Irp6pmPoseInt'], ['Irp6pmSplineTrajectoryGeneratorJoint'], True)
pose_client = actionlib.SimpleActionClient('/irp6p_arm/pose_trajectory', CartesianTrajectoryAction)
pose_client.wait_for_server()
print 'Ustawienie pozycji poczatkowej'
goal = CartesianTrajectoryGoal()
quaternion = getDownOrientedQuaternion()
point = Point(0.85, 0, 1.20)
goal.trajectory.points.append(CartesianTrajectoryPoint(rospy.Duration(10.0), Pose(point, quaternion), Twist()))
goal.trajectory.header.stamp = rospy.get_rostime() + rospy.Duration(0.2)
pose_client.send_goal(goal)
pose_client.wait_for_result()
command_result = pose_client.get_result()
#====================================================
conManSwitch(['Irp6pmPoseInt'], [], True)
pose_client = actionlib.SimpleActionClient('/irp6p_arm/pose_trajectory', CartesianTrajectoryAction)
pose_client.wait_for_server()
print 'Podejscie do podjecia'
goal = CartesianTrajectoryGoal()
quaternion = getDownOrientedQuaternion()
point = Point(0.9, 0, 0.95)
goal.trajectory.points.append(CartesianTrajectoryPoint(rospy.Duration(10.0), Pose(point, quaternion), Twist()))
goal.trajectory.header.stamp = rospy.get_rostime() + rospy.Duration(0.2)
pose_client.send_goal(goal)
pose_client.wait_for_result()
command_result = pose_client.get_result()
rospy.sleep(5.0)
#========================================================
conManSwitch(['Irp6ptfgSplineTrajectoryGeneratorMotor'], ['Irp6pmPoseInt'], True)
motor_client = actionlib.SimpleActionClient('/irp6p_tfg/spline_trajectory_action_motor', FollowJointTrajectoryAction)
motor_client.wait_for_server()
print 'Chwyt'
goal = FollowJointTrajectoryGoal()
goal.trajectory.joint_names = ['joint1']
goal.trajectory.points.append(JointTrajectoryPoint([1000.0], [0.0], [], [], rospy.Duration(3.0)))
goal.trajectory.header.stamp = rospy.get_rostime() + rospy.Duration(0.2)
motor_client.send_goal(goal)
motor_client.wait_for_result()
command_result = motor_client.get_result()
#=======================================================
conManSwitch(['Irp6pmPoseInt'], ['Irp6ptfgSplineTrajectoryGeneratorMotor'], True)
pose_client = actionlib.SimpleActionClient('/irp6p_arm/pose_trajectory', CartesianTrajectoryAction)
pose_client.wait_for_server()
print 'Powrot do pozycji poczatkowej'
goal = CartesianTrajectoryGoal()
quaternion = getDownOrientedQuaternion()
point = Point(0.85, 0, 1.20)
goal.trajectory.points.append(CartesianTrajectoryPoint(rospy.Duration(15.0), Pose(point, quaternion), Twist()))
goal.trajectory.header.stamp = rospy.get_rostime() + rospy.Duration(0.2)
pose_client.send_goal(goal)
pose_client.wait_for_result()
command_result = pose_client.get_result()
#========================================================
conManSwitch(['Irp6pmPoseInt'], [], True)
pose_client = actionlib.SimpleActionClient('/irp6p_arm/pose_trajectory', CartesianTrajectoryAction)
pose_client.wait_for_server()
print 'Podejscie do odlozenia'
goal = CartesianTrajectoryGoal()
quaternion = getDownOrientedQuaternion()
point = Point(0.9, 0.3, 0.95)
goal.trajectory.points.append(CartesianTrajectoryPoint(rospy.Duration(10.0), Pose(point, quaternion), Twist()))
goal.trajectory.header.stamp = rospy.get_rostime() + rospy.Duration(0.2)
pose_client.send_goal(goal)
pose_client.wait_for_result()
command_result = pose_client.get_result()
#===========================================
conManSwitch(['Irp6ptfgSplineTrajectoryGeneratorMotor'], ['Irp6pmPoseInt'], True)
motor_client = actionlib.SimpleActionClient('/irp6p_tfg/spline_trajectory_action_motor', FollowJointTrajectoryAction)
motor_client.wait_for_server()
print 'Wypuszczenie'
goal = FollowJointTrajectoryGoal()
goal.trajectory.joint_names = ['joint1']
goal.trajectory.points.append(JointTrajectoryPoint([-1000.0], [0.0], [], [], rospy.Duration(3.0)))
goal.trajectory.header.stamp = rospy.get_rostime() + rospy.Duration(0.2)
motor_client.send_goal(goal)
motor_client.wait_for_result()
command_result = motor_client.get_result()
#============================================
conManSwitch(['Irp6pmPoseInt'], ['Irp6ptfgSplineTrajectoryGeneratorMotor'], True)
pose_client = actionlib.SimpleActionClient('/irp6p_arm/pose_trajectory', CartesianTrajectoryAction)
pose_client.wait_for_server()
print 'Powrot do pozycji poczatkowej'
goal = CartesianTrajectoryGoal()
quaternion = getDownOrientedQuaternion()
point1 = Point(0.9, 0.3, 1.20)
point2 = Point(0.85, 0, 1.20)
goal.trajectory.points.append(CartesianTrajectoryPoint(rospy.Duration(8.0), Pose(point1, quaternion), Twist()))
goal.trajectory.points.append(CartesianTrajectoryPoint(rospy.Duration(16.0), Pose(point2, quaternion), Twist()))
goal.trajectory.header.stamp = rospy.get_rostime() + rospy.Duration(0.2)
pose_client.send_goal(goal)
pose_client.wait_for_result()
command_result = pose_client.get_result()
#================================================
conManSwitch(['Irp6pmSplineTrajectoryGeneratorJoint'], ['Irp6pmPoseInt'], True)
client = actionlib.SimpleActionClient('/irp6p_arm/spline_trajectory_action_joint', FollowJointTrajectoryAction)
client.wait_for_server()
print 'Powrot do pozycji synchronizacji'
goal = FollowJointTrajectoryGoal()
goal.trajectory.joint_names = ['joint1', 'joint2', 'joint3', 'joint4', 'joint5', 'joint6']
goal.trajectory.points.append(JointTrajectoryPoint([-0.10087151336609543, -1.5417429815634993, 0.019743230015841898, 1.1331041783656084, 3.658011557435151, -2.7351279214366393], [], [], [], rospy.Duration(10.0)))
goal.trajectory.header.stamp = rospy.get_rostime() + rospy.Duration(0.2)
client.send_goal(goal)
client.wait_for_result()
command_result = client.get_result()
conManSwitch([], ['Irp6pmSplineTrajectoryGeneratorJoint'], True)
print 'Skonczylem!'
| [
"mikolak.k@gmail.com"
] | mikolak.k@gmail.com |
c72344f39a2b47d401f7e46eee77380b114764d6 | 32a3a7522546df5f18effcaec43b66115c1cd2c1 | /alpha/migrations/0001_initial.py | deee8f56a15d69afa30df48f53d2484a0ee593bc | [] | no_license | seian/youask | 4b90804a289d3b7d21c9fb9b7b7736279845b95b | 490c8f730e32462eae6a7f69af25fed10c92aba9 | refs/heads/master | 2016-08-09T08:17:19.168489 | 2016-02-17T05:58:46 | 2016-02-17T05:58:46 | 50,502,910 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,849 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.1 on 2016-02-17 04:40
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Celeb_info',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('owner', models.IntegerField()),
('name', models.CharField(max_length=100)),
('birth', models.DateTimeField()),
('group', models.CharField(max_length=500)),
('job', models.CharField(max_length=500)),
('school', models.CharField(max_length=500)),
],
),
migrations.CreateModel(
name='Celeb_info_career',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('owner', models.IntegerField()),
('_from', models.DateTimeField()),
('_to', models.DateTimeField()),
('award', models.CharField(max_length=500)),
],
),
migrations.CreateModel(
name='Members',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('email', models.CharField(max_length=200)),
('nick_name', models.CharField(max_length=50)),
('password', models.CharField(max_length=512)),
('signup_date', models.DateTimeField(auto_now=True)),
('contents', models.TextField(max_length=300)),
],
),
migrations.CreateModel(
name='Replies',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('owner', models.IntegerField()),
('thread', models.IntegerField()),
('content', models.CharField(max_length=500)),
('preference', models.IntegerField()),
('post_date', models.DateTimeField(auto_now=True)),
('parent_id', models.IntegerField()),
],
),
migrations.CreateModel(
name='Thread',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('owner', models.IntegerField()),
('topic', models.CharField(max_length=500)),
('title', models.CharField(max_length=500)),
('post_date', models.DateTimeField(auto_now=True)),
],
),
]
| [
"wnsdud1861@gmail.com"
] | wnsdud1861@gmail.com |
0795e38e200dc99858a29c73d98fbc5eada52473 | 9d8314bcb2cdabb62e6ce97cf61fc79e559e1c4f | /python-ds-practice/fs_5_read_file_list/read_file_list.py | 74b36e94617c01f212c124ad2b7d30a53cadb857 | [] | no_license | hannahsylee/18_Python | 110ac6bfcda571bfafa0a737723616ba0ac87957 | 165465755fd3707b20fa5f7a094826ee2374cbe8 | refs/heads/main | 2023-08-27T02:22:53.879605 | 2021-10-18T04:05:22 | 2021-10-18T04:05:22 | 417,318,259 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 757 | py | def read_file_list(filename):
"""Read file and print out each line separately with a "-" before it.
For example, if we have a file, `dogs`, containing:
Fido
Whiskey
Dr. Sniffle
This should work:
>>> read_file_list("dogs")
- Fido
- Whiskey
- Dr. Sniffle
It will raise an error if the file cannot be found.
"""
with open(filename) as f:
for line in f:
# remove newline at end of line!
line = line.strip()
print(f"- {line}")
# hint: when you read lines of files, there will be a "newline"
# (end-of-line character) at the end of each line, and you want to
# strip that off before you print it. Do some research on that! | [
"hannahsylee@gmail.com"
] | hannahsylee@gmail.com |
595a9e74a588b9a31577ba1c84a3e2bd2e99a3bc | e4c798246339e765f04424d727106e80e810f47c | /Medium/iNo008.py | 7c70fbb6da2f54341b2bef3bbcc9b1e6fae85c2f | [] | no_license | kikihiter/LeetCode | 3a61dc4ee3223d634632e30b97c30a73e5bbe253 | 62b5ae50e3b42ae7a5a002efa98af5ed0740a37f | refs/heads/master | 2021-05-26T08:05:00.126775 | 2019-05-21T09:18:37 | 2019-05-21T09:18:37 | 127,999,978 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 774 | py | class Solution(object):
def myAtoi(self, str):
"""
:type str: str
:rtype: int
"""
str = str.lstrip()
posNum = True
if str == "":
return 0
if str[0]=='-':
posNum = False
str = str[1:]
elif str[0]=='+':
str = str[1:]
try:
int(str[0])
except:
return 0
rStr = ""
for i in str:
try:
int(i)
except:
break
rStr = rStr + i
rStr = rStr.lstrip('0')
if rStr == "":
return 0
if posNum == False:
return max(-int(rStr),-2147483648)
print rStr
return min(int(rStr),2147483647)
| [
"noreply@github.com"
] | kikihiter.noreply@github.com |
78196c0216eb2c3169434d91e959b6edf1d91d1a | 44d9cdc0ca026036c314b8530a7eeda1fb3ee21c | /src/JZ/JZ14-II.py | ea9dde6589c4ad917732d8c88169b2ff6c251334 | [] | no_license | Sibyl233/LeetCode | 525a2104a4f96e0e701fa175dfad6d02d6cf40f3 | 3e20e54bb11f7a56d8fa8189a0a1a1461f81eb8a | refs/heads/master | 2021-09-06T14:52:31.718922 | 2021-08-20T15:51:37 | 2021-08-20T15:51:37 | 234,050,203 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 738 | py | """่งฃๆณ๏ผๆพ่งๅพ
- ๆถ้ดๅคๆๅบฆ๏ผO(logN)ใไธบไบๅๆฑไฝๆณๅคๆๅบฆใ
- ็ฉบ้ดๅคๆๅบฆ๏ผO(1)
"""
class Solution:
def cuttingRope(self, n: int) -> int:
if n <= 3:
return n - 1
# ไธๅไบJZ14-I๏ผๆ นๆฎ้ขๆๆญคๅคๆฑไฝ้็จไบๅๆฑไฝๆณ
a, b, p, x, rem = n // 3 - 1, n % 3, 1000000007, 3 , 1
while a > 0:
if a % 2: rem = (rem * x) % p
x = x ** 2 % p
a //= 2
if b == 0:
return (rem * 3) % p # = 3^(a+1) % p
if b == 1:
return (rem * 4) % p # = 3^a * 4 % p
return (rem * 6) % p # = 3^(a+1) * 2 % p
if __name__=="__main__":
n = 10
print(Solution().cuttingRope(n)) # 36
| [
"780598113@qq.com"
] | 780598113@qq.com |
5d5fd3a0fc2112964ec1c90c48196dcdc7468bd7 | d93b337a73a9bc6f6c104cc6eea9ca8d60ef3577 | /stripeAPI/cargo.py | 407cca194cdbced169d8ec5a77e78e9b275ed8e4 | [] | no_license | erivera23/market | 0532c5aefe32bd0102fa18fe6b224af289c4388f | 73bead776f233bd1a79d34122daeff74b2b3ce18 | refs/heads/master | 2022-12-13T00:21:38.163936 | 2020-01-18T22:52:34 | 2020-01-18T22:52:34 | 234,808,623 | 1 | 0 | null | 2022-11-22T04:57:29 | 2020-01-18T22:51:03 | Python | UTF-8 | Python | false | false | 478 | py | from . import stripe
def create_cargo(orden):
if orden.billing_profile and orden.user and orden.user.customer_id:
cargo = stripe.Charge.create(
amount = int(orden.total) * 100,
currency = 'USD',
description = orden.descripcion,
customer=orden.user.customer_id,
source=orden.billing_profile.card_id,
metadata= {
'orden_id': orden.id
}
)
return cargo | [
"riveraefrain5@gmail.com"
] | riveraefrain5@gmail.com |
b4fde878db7746ff13c0a3dedbfbca0736d7249c | 34c91527966ecf29d8adf46becbf3af5032dc3a6 | /basic/armstrongnumber.py | 9552f47dc63cdff9b0b8cf0745e63ea4f047d5bf | [] | no_license | mjohnkennykumar/csipythonprograms | 0a23040a484812b3176b614bf0e18f685c74a375 | 750cdaeda15fe73328902c3de567d6083dfa7a77 | refs/heads/master | 2021-01-21T14:32:37.913617 | 2017-06-24T12:54:03 | 2017-06-24T12:54:03 | 95,297,480 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 497 | py | # -*- coding: utf-8 -*-
# Python program to check if the number provided by the user is an Armstrong number or not
# take input from the user
# num = int(input("Enter a number: "))
# initialize sum
sum = 0
num = int(input("Enter a number: "))
# find the sum of the cube of each digit
temp = num
while temp > 0:
digit = temp % 10
sum += digit ** 3
temp //= 10
# display the result
if num == sum:
print(num,"is an Armstrong number");
else:
print(num,"is not an Armstrong number");
| [
"Samuel Henry"
] | Samuel Henry |
acc0e879ddff0fddff0f1c17854d03eba9823789 | be8fe594fae8a4fb66c0cbc1f0e8462891eabb1b | /examsystemapp/api/external.py | ff92d1a6411c523b029c830ee33dfb926c6e41f2 | [] | no_license | ITfyMe/ExamSytemPython | a30cccc1ba4ef832666b23109a772209fcbcea8c | da7506ae9607d69c97744bdc08ac1113fc86237a | refs/heads/master | 2023-06-12T11:48:20.514142 | 2021-07-12T13:16:06 | 2021-07-12T13:16:06 | 369,427,947 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 644 | py | """
Created By : Nikesh
Created On :
Reviewed By :
Reviewed On :
Version :
"""
from django.http import HttpRequest
from examsystemapp.api.base_controller import BaseController
from examsystemapp.utils.helpers.request_helper import RequestHelper
from django.conf import settings
class External(BaseController):
def __init__(self, request: HttpRequest):
BaseController.__init__(self, request)
def check_session(self, request):
pass
def masters(self, request: HttpRequest):
json_data = RequestHelper().call_ext_api(request, settings.MASTER_BASE_URL)
return self.send_response_raw_json(json_data)
| [
"nikesh.kedlaya@gmail.com"
] | nikesh.kedlaya@gmail.com |
3bf09fa4f79c4ab4f60f4fdf8d3c23e04214b598 | 6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4 | /F5ycABGyZtghMpYjr_16.py | ce3767d6e7a4448df5ed169a0465448836b9b5c5 | [] | no_license | daniel-reich/ubiquitous-fiesta | 26e80f0082f8589e51d359ce7953117a3da7d38c | 9af2700dbe59284f5697e612491499841a6c126f | refs/heads/master | 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 73 | py |
def max_num(n1, n2):
if n1 > n2:
return n1
else:
return n2
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
50157256f9b323f313890c0165fa4fe159337357 | 8cce087dfd5c623c2f763f073c1f390a21838f0e | /projects/the/test.py | 8b5458899ae7d4a6053ff37dca0868ce16e83cdb | [
"Unlicense"
] | permissive | quinn-dougherty/python-on-nix | b2ae42761bccf7b3766999b27a4674310e276fd8 | 910d3f6554acd4a4ef0425ebccd31104dccb283c | refs/heads/main | 2023-08-23T11:57:55.988175 | 2021-09-24T05:55:00 | 2021-09-24T05:55:00 | 414,799,752 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11 | py | import the
| [
"kamadorueda@gmail.com"
] | kamadorueda@gmail.com |
d0e5a0905a4356ef92ac7aaf23d6954bde4d27c3 | 6a2d708c290d5fc8b4eb6d2b6d52186d6ae57406 | /Chef Race (SRTF) Final/srtf.py | 5bbf4bbf634474a5b9be024dfb09b3ce1e213ed3 | [] | no_license | ac-marlon/ChefRaceUD | 64080c1e575234f03113c16d9c017c4f42f55010 | b3c8312af7b6b8e3e6fc0a457169358e4bd1d62b | refs/heads/master | 2021-08-26T01:09:25.324512 | 2021-08-18T15:01:24 | 2021-08-18T15:01:24 | 108,355,667 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,609 | py | import cola
import time
from procesos import *
import recursos as rs
import queue
import threading
import numpy as np
import pygame
from pygame.sprite import Sprite
from pygame.locals import *
import util
import sys, pygame, util
from receta import Receta
from recursos import CuchillosIma
from recursos import LicuadoraIma
from recursos import HornoIma
from pizarra import Pizarra
size = width, height = 900, 712
screen = pygame.display.set_mode(size)
class Procesador(threading.Thread):
def __init__(self,idProcesador,*args):
threading.Thread.__init__(self)
self.idProcesador=idProcesador
self.proceso=None
self.lis=cola.Cola()
self.ter=cola.Cola()
self.blo=cola.Cola()
self.sus=cola.Cola()
self._args=args
self.uso=True
self.minIter=50
def __str__(self):
return str(self.idProcesador)
def run(self):
while self.uso:
self.usarProcesador(*self._args)
def usarProcesador(self,q):
while not self.proceso==None or not q.empty() or not self.lis.es_vacia() or not self.sus.es_vacia() or not self.blo.es_vacia() or self.minIter>0:
time.sleep(2)
self.minIter-=1
if not q.empty(): self.asignar(q.get())
self.lis.ordenar()
if not self.lis.es_vacia() and self.proceso==None:
posible=self.lis.desencolar()
if posible.recurso.libre:
self.ocupado=True
self.proceso=posible
self.proceso.recurso.utilizar()
self.proceso.estado=3
else:
posible.bloquear()
self.blo.encolar(posible)
elif not self.lis.es_vacia() and not self.proceso==None:
posible=self.lis.desencolar()
if self.proceso.t>posible.t and posible.recurso.libre:
self.proceso.suspender()
self.sus.encolar(self.proceso)
self.proceso=posible
self.proceso.recurso.utilizar()
else:
self.lis.encolar(posible)
self.contarColaBlo()
self.contarColaLis()
self.revisarColaSus()
self.revisarColaBlo()
if not self.proceso==None:
self.proceso.procesar()
if self.proceso.t==0:
self.proceso.recurso.liberar()
print("\nterminando proceso",self.proceso,"en el procesador",self,",sus",self.proceso.sus,",lis",self.proceso.lis,",blo",self.proceso.blo,",zona critica",self.proceso.zc)
self.proceso.estado=4
self.ter.encolar(self.proceso)
self.proceso=None
q.task_done()
print("termino el procesador",self,"lista de tareas completadas en este procesador:")
for i in range(self.ter.tam):
print(self.ter.desencolar())
self.uso=False
def revisarColaSus(self):
tam = self.sus.tam
for i in range(tam):
n=self.sus.desencolar()
n.tr-=1
n.sus+=1
if n.tr==0:
self.asignar(n)
print("\nse saco el proceso",n,"de la cola de suspendidos y entro a la cola de listo")
else:
self.sus.encolar(n)
def revisarColaBlo(self):
for i in range(self.blo.tam):
posible=self.blo.desencolar()
if posible.recurso.libre:
self.asignar(posible)
print("\nse saco el proceso",posible," de la cola de bloqueados y entro en la cola de listos")
else:
self.blo.encolar(posible)
def contarColaLis(self):
tam = self.lis.tam
for i in range(tam):
n=self.lis.desencolar()
n.lis+=1
self.lis.encolar(n)
def contarColaBlo(self):
tam = self.blo.tam
for i in range(self.blo.tam):
n=self.blo.desencolar()
n.blo+=1
self.blo.encolar(n)
def asignar(self,proceso):
proceso.estado=0
self.lis.encolar(proceso)
class cliente:
def __init__(self):
self.numPo=0
self.numMa=0
self.numEn=0
self.recursos=[rs.Horno(),rs.Cuchillos(),rs.Licuadora()]
self.cola1=queue.Queue()
self.cola2=queue.Queue()
self.cola3=queue.Queue()
self.colaProcesadores=queue.Queue()
self.procesador1=Chef((width-900,height),1,self.cola1)
self.procesador2=Chef((width-700,height),2,self.cola2)
self.procesador3=Chef((width-500,height),3,self.cola3)
pygame.init()
pygame.mixer.init()
self.fondo = pygame.image.load("imagenes/cocina.png")
self.intro = pygame.image.load("imagenes/intro.png")
self.fondorect = self.fondo.get_rect()
self.introrect = self.intro.get_rect()
pygame.display.set_caption( "Chef Race (Universidad Distrital)" )
self.pizarra = pygame.image.load("imagenes/pizarra.png")
self.sInicio = util.cargar_sonido('sonidos/inicio.wav')
self.sHorno = util.cargar_sonido('sonidos/horno.wav')
self.sCuchillo = util.cargar_sonido('sonidos/cuchillo.wav')
self.sLicuadora = util.cargar_sonido('sonidos/licuadora.wav')
self.sPrincipal = util.cargar_sonido('sonidos/principal.wav')
self.pizarra1 = Pizarra((width-900,height))
self.pizarra2 = Pizarra((width-700,height))
self.pizarra3 = Pizarra((width-500,height))
self.receta1 = Receta((width,height))
self.receta2 = Receta((width+200,height))
self.receta3 = Receta((width+400,height))
self.comida1 = PolloConPapas(000,self.recursos[0],size)
self.comida2 = Ensalada(111,self.recursos[1],size)
self.comida3 = Malteada(222,self.recursos[2],size)
self.listaChefs = [self.procesador1, self.procesador2, self.procesador3]
self.listaPizarras = [self.pizarra1, self.pizarra2, self.pizarra3]
self.listaRecetas = [self.receta1, self.receta2, self.receta3]
self.listaComida = [self.comida1, self.comida2, self.comida3]
self.cuchillos = CuchillosIma(size)
self.licuadora = LicuadoraIma(size)
self.horno = HornoIma(size)
self.reloj = pygame.time.Clock()
self.fuente1 = pygame.font.Font(None,70)
self.fuente2 = pygame.font.Font(None,25)
self.textoBienvenida = self.fuente1.render("Bienvenido a Chef Race UD", 1, (255,255,255))
self.textoAutor1 = self.fuente2.render("Marlon Arias", 1, (0,0,0))
self.textoAutor2 = self.fuente2.render("David Amado", 1, (0,0,0))
self.textoAutor3 = self.fuente2.render("Realizado por:", 1, (0,0,0))
def iniciar(self):
self.sInicio.play()
aux = 3
while aux > 0:
screen.blit(self.intro, self.introrect)
screen.blit(self.textoAutor1,(width-170,height-680))
screen.blit(self.textoAutor2,(width-170,height-660))
screen.blit(self.textoAutor3,(width-170,height-700))
screen.blit(self.textoBienvenida,((width-880, (height/2)+30)))
pygame.display.update()
time.sleep(1)
aux=aux-1
self.sPrincipal.play(1)
self.procesador1.start()
self.procesador2.start()
self.procesador3.start()
self.hiloAnimacion = threading.Thread(name='Animacion', target = self.pintar)
self.hiloEventos = threading.Thread(name='Animacion', target = self.capturarEventos)
#self.hiloEventos.daemon=True
self.hiloEventos.start()
self.hiloAnimacion.daemon=True
self.hiloAnimacion.start()
self.cola1.join()
self.cola2.join()
self.cola3.join()
self.hiloAnimacion.join()
self.hiloEventos.join()
def capturarEventos(self):
while self.procesador1.uso or self.procesador2.uso or self.procesador3.uso:
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit()
if event.type == pygame.MOUSEBUTTONDOWN:
print("Evento ratonBtn capturado")
for x in range(700, 760):
for y in range(282, 342):
proceso = Malteada(self.numMa,self.recursos[2],size)
self.numMa+=1
estado="trabajandoLicuadora1"
if event.button == 1 and event.pos == (x, y):
self.cola1.put(proceso)
self.procesador1.estado=estado
self.pizarra1.arregloRecetas.append(proceso)
print("pico el click izq")
elif event.button == 2 and event.pos == (x, y):
self.cola2.put(proceso)
self.procesador2.estado=estado
self.pizarra2.arregloRecetas.append(proceso)
print("pico el click cent")
elif event.button == 3 and event.pos == (x, y):
self.cola3.put(proceso)
self.procesador3.estado=estado
self.pizarra3.arregloRecetas.append(proceso)
print("pico el click der")
for x in range(700, 760):
for y in range(27, 87):
proceso=PolloConPapas(self.numPo,self.recursos[0],size)
self.numPo+=1
estado="trabajandoHorno1"
if event.button == 1 and event.pos == (x, y):
self.cola1.put(proceso)
self.procesador1.estado=estado
self.pizarra1.arregloRecetas.append(proceso)
print("pico el click izq")
elif event.button == 2 and event.pos == (x, y):
self.cola2.put(proceso)
self.procesador2.estado=estado
self.pizarra2.arregloRecetas.append(proceso)
print("pico el click cent")
elif event.button == 3 and event.pos == (x, y):
self.cola3.put(proceso)
self.procesador3.estado=estado
self.pizarra3.arregloRecetas.append(proceso)
print("pico el click der")
for x in range(700, 750):
for y in range(137, 197):
proceso=Ensalada(self.numEn,self.recursos[1],size)
self.numEn+=1
estado="trabajandoCuchillo1"
if event.button == 1 and event.pos == (x, y):
self.cola1.put(proceso)
self.procesador1.estado=estado
self.pizarra1.arregloRecetas.append(proceso)
print("pico el click izq")
elif event.button == 2 and event.pos == (x, y):
self.cola2.put(proceso)
self.procesador2.estado=estado
self.pizarra2.arregloRecetas.append(proceso)
print("pico el click cent")
elif event.button == 3 and event.pos == (x, y):
self.cola3.put(proceso)
self.procesador3.estado=estado
self.pizarra3.arregloRecetas.append(proceso)
print("pico el click der")
def pintar(self):
while self.procesador1.uso or self.procesador2.uso or self.procesador3.uso:
self.reloj.tick(3)
for elemento in self.listaChefs:
elemento.update()
time.sleep(0.5)
screen.blit(self.fondo, self.fondorect)
for elemento in self.listaChefs:
screen.blit(elemento.image, elemento.rect)
for elemento in self.listaPizarras:
screen.blit(elemento.image, elemento.rect)
for i in elemento.arregloRecetas:
if elemento.arregloRecetas[elemento.arregloRecetas.index(i)].estado==0:
screen.blit(i.iml, (elemento.rect[0]+30,elemento.rect[1]+elemento.arregloRecetas.index(i)*60+10))
elif elemento.arregloRecetas[elemento.arregloRecetas.index(i)].estado==1:
screen.blit(i.imb, (elemento.rect[0]+30,elemento.rect[1]+elemento.arregloRecetas.index(i)*60+10))
elif elemento.arregloRecetas[elemento.arregloRecetas.index(i)].estado==2:
screen.blit(i.ims, (elemento.rect[0]+30,elemento.rect[1]+elemento.arregloRecetas.index(i)*60+10))
elif elemento.arregloRecetas[elemento.arregloRecetas.index(i)].estado==3:
screen.blit(i.ime, (elemento.rect[0]+30,elemento.rect[1]+elemento.arregloRecetas.index(i)*60+10))
elif elemento.arregloRecetas[elemento.arregloRecetas.index(i)].estado==4:
elemento.arregloRecetas.remove(i)
for elemento in self.listaRecetas:
screen.blit(elemento.image, elemento.rect)
for elemento in self.listaComida:
screen.blit(elemento.iml, elemento.rect)
screen.blit(self.cuchillos.image, self.cuchillos.rect)
screen.blit(self.licuadora.image, self.licuadora.rect)
screen.blit(self.horno.image, self.horno.rect)
pygame.display.update()
def crearProceso(self,nProcesos):
for i in range(nProcesos):
self.asignar_pedido_aleatorio()
def asignar_pedido_aleatorio(self):
aleatorio1=np.random.randint(3)
aleatorio2=np.random.randint(3)
if aleatorio1==0:
proceso=PolloConPapas(self.numPo,self.recursos[0],size)
self.numPo+=1
estado="trabajandoHorno1"
elif aleatorio1==1:
proceso=Ensalada(self.numEn,self.recursos[1],size)
self.numEn+=1
estado="trabajandoCuchillo1"
else:
proceso= Malteada(self.numMa,self.recursos[2],size)
self.numMa+=1
estado="trabajandoLicuadora1"
if aleatorio2==0:
self.cola1.put(proceso)
self.procesador1.estado=estado
elif aleatorio2==1:
self.cola2.put(proceso)
self.procesador2.estado=estado
else:
self.cola3.put(proceso)
self.procesador3.estado=estado
class Chef(Sprite, Procesador):
def __init__(self, cont_size,idProcesador,*args):
Sprite.__init__(self)
Procesador.__init__(self,idProcesador,*args)
self.cont_size = cont_size
self.estados = ["espera", "trabajandoCuchillo1", "trabajandoCuchillo2",
"trabajandoHorno1", "trabajandoHorno2",
"trabajandoLicuadora1", "trabajandoLicuadora2"]
self.estado = self.estados[0]
self.imagenes = [util.cargar_imagen('imagenes/chef.png'),
util.cargar_imagen('imagenes/chefCuchi.png'),
util.cargar_imagen('imagenes/chefCuchi2.png'),
util.cargar_imagen('imagenes/chefHorno.png'),
util.cargar_imagen('imagenes/chefHorno2.png'),
util.cargar_imagen('imagenes/chefLicu.png'),
util.cargar_imagen('imagenes/chefLicu2.png')]
self.image = self.imagenes[0]
self.rect = self.image.get_rect()
self.rect.move_ip(cont_size[0], cont_size[1]-250)
def update(self):
#animacion sprite
if self.proceso==None:
self.image = self.imagenes[0]
print("el procesador",self,"no tiene proceso")
else:
if self.proceso.recurso.nombre=="Cuchillos":
if self.estado == self.estados[1]:
self.image = self.imagenes[1]
self.estado = self.estados[2]
else:
self.image = self.imagenes[2]
self.estado = self.estados[1]
elif self.proceso.recurso.nombre=="Horno":
if self.estado == self.estados[3]:
self.image = self.imagenes[3]
self.estado = self.estados[4]
else:
self.image = self.imagenes[4]
self.estado = self.estados[3]
else:
if self.estado == self.estados[5]:
self.image = self.imagenes[5]
self.estado = self.estados[6]
else:
self.image = self.imagenes[6]
self.estado = self.estados[5]
cliente = cliente()
cliente.iniciar()
| [
"noreply@github.com"
] | ac-marlon.noreply@github.com |
be4bc8669b12545f0c578c87d72131ebfc8489d0 | 947273c16f8984a20cd002b99b52facd6e63e43b | /server/authentication/urls.py | dacfd5c43349691a7bc454b922558db58c2608aa | [] | no_license | ecuaappgye/App | 8e3b50b4f7a8b9c50876d24343781e8f53a51bbc | 2df7be6fd206d012f6a83acd0aa0cb75cf6d5937 | refs/heads/master | 2023-07-05T00:48:24.341021 | 2021-07-31T17:02:12 | 2021-07-31T17:02:12 | 385,267,828 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,384 | py | from django.urls import include, path
from .apis import (UserEmailChange, UserGetApi, UserLoginApi, UserLogoutApi,
UserPasswordChange, UserPasswordReset,
UserPasswordResetCheck, UserRegisterApi,
UserRegisterVerifyApi, UserRegisterVerifyCheckApi,
UserUpdateApi)
authentication_urls = [
path('register/', UserRegisterApi.as_view(), name='register'),
path('register/verify/<int:user_id>/', UserRegisterVerifyApi.as_view(), name='register_verify'),
path('register/verify_check/<int:user_id>/', UserRegisterVerifyCheckApi.as_view(), name='register_verify_check'),
path('login/', UserLoginApi.as_view(), name='login'),
path('logout/', UserLogoutApi.as_view(), name='logout'),
path('password_reset/', UserPasswordReset.as_view()),
path('password_reset_check/', UserPasswordResetCheck.as_view()),
path('password_change/<int:user_id>/', UserPasswordChange.as_view(), name='password_change'),
path('email_change/<int:user_id>/', UserEmailChange.as_view(), name='email_change'),
path('get/<int:user_id>/', UserGetApi.as_view(), name='get'),
]
drivers_urls =[
path('update/<int:user_id>/', UserUpdateApi.as_view(), name='update')
]
urlpatterns =[
path('auth/', include((authentication_urls, 'auth'))),
path('driver/', include((drivers_urls, 'driver')))
]
| [
"italobarzola18@gmail.com"
] | italobarzola18@gmail.com |
0c8795e3c608547bd7a3663df3bd69eeabac19fa | 1978ed0ffd9264e0b598e82966acd53add379fce | /BK_Scripts/shapes.py | cdc2e922bed34aca1b93307f0e70aaebe4d1941e | [
"MIT"
] | permissive | bkvignesh/manim | 70d30c22d601eccf67108bca52317bfe3e2b4a7b | a4dace8f85113b1605235531f5de8f5d408ac823 | refs/heads/master | 2023-01-22T23:39:57.324803 | 2020-10-12T13:11:21 | 2020-10-12T13:11:21 | 295,242,598 | 0 | 0 | NOASSERTION | 2020-09-13T21:41:33 | 2020-09-13T21:41:32 | null | UTF-8 | Python | false | false | 1,112 | py | from manimlib.imports import *
from math import cos, sin, pi
import numpy as np
class Shapes(Scene):
def construct(self):
circle = Circle(color = YELLOW)
square = Square(color = DARK_BLUE)
square.surround(circle)
rectangle = Rectangle(height=2, width=3, color=RED)
ring = Annulus(inner_radius=.2, outer_radius=1, color=BLUE)
ring2 = Annulus(inner_radius=.6, outer_radius=1, color=BLUE)
ring3 = Annulus(inner_radius=.2, outer_radius=1, color=BLUE)
ellipse = Ellipse(width=5, height=3, color=DARK_BLUE)
pointers = []
for i in range(8):
pointers.append(Line(ORIGIN, np.array([cos(pi/180*360/8*i),sin(pi/180*360/8*i), 0]), color = YELLOW))
self.add(circle)
self.play(FadeIn(square))
self.play(Transform(square, rectangle))
self.play(FadeOut(circle), FadeIn(ring))
self.play(Transform(ring, ring2))
self.play(Transform(ring2, ring3))
self.play(FadeOut(square), GrowFromCenter(ellipse), Transform(ring2, ring))
self.add(*pointers)
self.wait(2)
| [
"vigneshbk42@gmail.com"
] | vigneshbk42@gmail.com |
92b948fe97b26eb0b10d0f37347f317f1618052f | 47aada5f50fdb6dfe0c8b34ff30de93a32797dca | /tpfa/boundary_conditions.py | 760274d702f7f588864f85db98cb8d975fd5a9bf | [
"MIT"
] | permissive | Filipe-Cumaru/hello-world-1 | 17a7e11ac9f3efa0026c3471538cf4f28323e8ef | b12137beebbf1a4bd87d02583b609d8c68307474 | refs/heads/master | 2020-04-24T23:59:01.077919 | 2019-02-24T02:40:58 | 2019-02-24T02:40:58 | 172,363,231 | 0 | 0 | MIT | 2019-02-24T16:47:05 | 2019-02-24T16:47:05 | null | UTF-8 | Python | false | false | 779 | py | import numpy as np
from scipy.sparse import csr_matrix, lil_matrix
class BoundaryConditions():
def __init__(self, num_elements, nx, ny, coef):
self.coef = coef
self.num_elements = num_elements
self.nx = nx
self.ny = ny
self.coef, self.q = self.pressao_prescrita()
def pressao_prescrita(self):
self.q = lil_matrix((self.num_elements, 1), dtype=np.float_)
self.coef[0:self.nx*self.ny] = 0
self.q [0:self.nx*self.ny] = 500
self.coef[self.num_elements-(self.nx*self.ny):self.num_elements] = 0
for r in range(self.nx*self.ny):
self.coef[r,r] = 1
self.coef[r+self.num_elements-(self.nx*self.ny),r+self.num_elements-(self.nx*self.ny)] = 1
return self.coef, self.q
| [
"renatattavares@hotmail.com"
] | renatattavares@hotmail.com |
9b0b34eb1b3a8edde6ec56f298ac3c0a5bbe79f7 | cb0bad5bd717fef43674ffd2f76a5fe922cf0896 | /eurito_daps/flaskblog.py | 74bf352313b346746653461bbfeb148f1c63290a | [] | no_license | porter22/porter22.github.io | 2b3dbe6f8c6ffcfb3f4ff374e12f769f1ef06b51 | ad15c20fa3f8490642cd831da82eb448c694c746 | refs/heads/master | 2021-06-05T20:46:47.862065 | 2020-03-15T17:27:09 | 2020-03-15T17:27:09 | 122,872,834 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,061 | py | from flask import Flask, render_template, url_for
from flask_sqlalchemy import SQLAlchemy
from datetime import datetime
app = Flask(__name__) #instantiated flask variable
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///site.db' #/// relative path from the current dir
db = SQLAlchemy(app) #create database instance
class User(db.Model):
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(20), unique=True)
email = db.Column(db.String(120), unique=True, nullable=False)
image_file = db.Column(db.String(20), nullable=False, default='default.jpg')
password = db.Column(db.String(60), nullable=False)
posts = db.relationship('Post', backref='author', lazy=True) # one author can have multiple posts, but a post can have only one author: one to many relationship
def __repr__(self): #how object is printed
return f"User('{self.username}', '{self.email}', '{self.image_file}')"
class Post(db.Model):
id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.String(100), nullable=False)
data_posted = db.Column(db.DateTime, nullable=False, default = datetime.utcnow )
content = db.Column(db.Text, nullable=False)
user_id = db.Column(db.Integer, db.ForeignKey('user.id'), nullable=False)
def __repr__(self): #how object is printed
return f"Post('{self.title}', '{self.data_posted}')"
posts = [
{
'author': 'Corey Schafer',
'title': 'Blog Post 1',
'content': 'First post content',
'date_posted': 'April 20, 2018'
},
{
'author': 'Jane Doe',
'title': 'Blog Post 2',
'content': 'Second post content',
'date_posted': 'April 21, 2018'
}
]
@app.route("/")
@app.route("/home")
def home():
return render_template('home.html', posts = posts)
@app.route("/about")
def about():
return render_template('about.html', title = 'About')
#this is so that we will not have to restart the server whenever new changes are introduced
if __name__ == '__main__':
app.run(debug=True)
| [
"idrissov22@gmail.com"
] | idrissov22@gmail.com |
c009051241a83019b6b7b8d33d001752d3ebe5bb | 428dd6e4ab9ee4916664f13a25f04ae424769d47 | /pwd.py | 162233077798636c35d2030fa2e855c4d0cb7694 | [] | no_license | licmnn/pwd | 16e8ae1113f8a7bed8d16bdb4e989983c3e41f50 | 82921059804619e28f777691cf4d9c46e528deaa | refs/heads/master | 2022-07-10T12:50:05.914564 | 2020-05-19T15:20:21 | 2020-05-19T15:20:21 | 265,280,258 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 896 | py | #password = 'a123456'
#x = 3
#while x > 0:
# pw = input('่ฏท่พๅ
ฅๅฏ็ ๏ผ ')
# if pw == password:
# print('็ปๅ
ฅๆๅ') # quit
# break
# elif x != 0:
# x = x - 1
# print('ๅฏ็ ้่ฏฏ๏ผ ่ฟๆ%dๆฌกๆบไผ๏ผ' % x)
# else:
# pirnt('')
'''
pw = input('่ฏท่พๅ
ฅๆจ็ๅฏ็ ๏ผ ')
x = 2
print('ๅฏ็ ้่ฏฏ๏ผ ่ฟๆ%dๆฌกๆบไผ ' % x)
while x > 0 :
if pw != 'a123456' and x > 0:
pw = input('่ฏท่พๅ
ฅๆจ็ๅฏ็ ๏ผ ')
x = x - 1
print('ๅฏ็ ้่ฏฏ๏ผ ่ฟๆ%dๆฌกๆบไผ ' % x)
elif x == 0:
print('ๅฏ็ ้่ฏฏ๏ผ็ญๆๆบไผไบ')
elif pw == 'a123456':
print('็ปๅ
ฅๆๅ')
'''
password = 'a123456'
x = 3
while x > 0:
x = x -1
pw = input('่ฏท่พๅ
ฅๅฏ็ ๏ผ ')
if pw == password:
print('็ปๅ
ฅๆๅ') # quit
break
else:
# x = x -1
print('ๅฏ็ ้่ฏฏ๏ผ')
if x > 0:
print('่ฟๆ%dๆฌกๆบไผ๏ผ' % x)
else:
print('ๆฒกๆๆบไผไบ๏ผ่ดฆๅท้ๅฎ๏ผ') | [
"licmn@live.com"
] | licmn@live.com |
e944a493701484b85f0930ed5c5c716253ed6a9b | 8227d4cf270ffc45ed9c0c5f94b04f51187751c4 | /srezy.py | eb59c4eaa583d3111c0c3b1040beb6c6948da6cd | [] | no_license | NosevichOleksandr/firstrepository | ced760563a039af88dd1a5588ba0553e5e0cdef6 | 70652118bcdac48f7b638fe2d896d587f79115d1 | refs/heads/master | 2023-04-02T02:46:50.723173 | 2021-04-17T13:34:37 | 2021-04-17T13:34:37 | 356,614,492 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 205 | py | print('hello world')
a = input('write your ... something: ')
if len(a) % 2 == 0:
b = a[:len(a)//2]
c = a[len(a)//2:][::-1]
print(b + c)
else:
print('ัั ะฝะตะฟัะฐะฒะธะปัะฝะพ ะฒะฒะตะป')
| [
"bpxnastalgia@gmail.com"
] | bpxnastalgia@gmail.com |
abbef862cd5933de0ed6f118c4196a4adeb7ccc2 | 376dbd781ff32bb5c1fa64b8b2fc2cb7bfeb62bb | /main.py | 46a4a3510a8ea612c0975a28bd3c2053cae6dceb | [] | no_license | ahaggart/image-shatter | db00662c85c6f8b698008757fc38e9b42d733d91 | cca1a392bc3445b117e413bcdb8fd473de82ebfe | refs/heads/master | 2020-05-26T21:55:49.783242 | 2019-05-24T11:03:48 | 2019-05-24T11:03:48 | 188,388,941 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,065 | py | import argparse
import cv2 as cv
from blobs import caluculate_colorspace_distances, grow_blobs
from polygons import find_edges, order_edges
from colorize import color_blobs, color_edges, color_ordered
def main(config):
image = cv.imread('img/' + config.file)
width, height = image.shape[:2]
print("Calculating colorspace distances...")
xdist, ydist = caluculate_colorspace_distances(image)
print("Growing blobs...")
blobs = grow_blobs(width, height, xdist, ydist)
print("> Num blobs: {}".format(blobs.index))
edges = find_edges(blobs)
ordered = order_edges(edges)
print("Coloring blobs...")
# colors = color_blobs(image, blobs)
# colors = color_edges(edges, width, height)
colors = color_ordered(ordered, width, height)
print("Writing image...")
cv.imwrite('out/' + config.file, colors)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"file",
help="the path to the file within the img/ directory",
)
main(parser.parse_args())
| [
"alex.haggart@gmail.com"
] | alex.haggart@gmail.com |
f1b79ea5db5f487fdf2cfc2259c01ffb58418ed7 | 39c032082a86120d06c6daa33abe81fd228077bb | /tsne-test.py | c86c780cdfe556efdb8c869957eca1457810f9a9 | [] | no_license | briantimar/nn-test1 | 4b3365743f31b6bcd9aa3565a55c1239ccb81312 | 980b29b3909c6b9539a06e771f3fafbc413a2a14 | refs/heads/master | 2020-03-19T15:20:28.541618 | 2018-06-14T18:28:12 | 2018-06-14T18:28:12 | 136,667,167 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,180 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Jun 14 00:13:18 2018
@author: btimar
Visualizing ground states?
"""
import sys
import numpy as np
import tensorflow as tf
sys.path.append("/Users/btimar/Documents/ryd-theory-code/python_code")
from ryd_base import make_1d_TFI_spin
from tools import get_sent
from quspin.operators import hamiltonian
from tsne import tsne
def z(i, basis):
static = [['z', [[1.0, i]]]]
return hamiltonian(static, [], basis=basis)
def zz(i, j, basis):
static = [['zz', [[1.0, i, j]]]]
return hamiltonian(static, [], basis=basis, dtype=np.float64)
def make_labels(gvals):
return (gvals <1).astype(int)
def get_features(gvals, basis):
N=len(gvals)
s = np.empty((N, basis.Ns))
for i in range(N):
h = make_1d_TFI_spin(1, gvals[i], basis, dtype=np.float64)
_, psi0 = h.eigsh(k=1, which='SA')
s[i, :] = psi0.reshape(basis.Ns)
return s
from quspin.basis import spin_basis_1d
L=8
basis = spin_basis_1d(L, kblock=0, pblock=1)
basis_full = spin_basis_1d(L)
proj=basis.get_proj(np.float64)
N=500
gvals = np.linspace(0, 2.0, N)
states = get_features(gvals, basis)
labels = make_labels(gvals)
psi_full = np.asarray(proj.todense().dot( features.transpose()))
zzops = [zz(0, i, basis_full) for i in range(1,L//2)]
features = np.empty((N, len(zzops)))
for i in range(len(zzops)):
features[:, i] = zzops[i].expt_value(psi_full)
from tools import overlap
overlaps = [np.abs(overlap(features[i, :], features[-1, :]))**2 for i in range(N-1)]
zz1 = zz1op.expt_value(psi_full)
#tSNE params
no_dims = 2
#dimensionality of raw data
d = features.shape[1]
initial_dims = d
perplexity = 30.0
print("Passing to tsne")
y = tsne(features, no_dims=no_dims, initial_dims=initial_dims, perplexity=perplexity)
import matplotlib.pyplot as plt
from EDIO import save
fig, ax=plt.subplots()
plt.scatter(y[:, 0], y[:, 1], c=labels)
#save(fig, "20180613/tsne-tfi-symm-wfs-L={0}".format(L),which='mac')
blockA = (y[:, 1]>0)*(y[:, 0]<15)
blockB = np.logical_not(blockA)
plt.plot(gvals[blockA], zz1[blockA], label='A')
plt.plot(gvals[blockB], zz1[blockB], 'rx',label='B')
| [
"timarbrian@gmail.com"
] | timarbrian@gmail.com |
a6d1b05a1ca185859368e58727850feec6b840f5 | 209f0d778a673884cf56b83e9bde392f712f84aa | /.venv/bin/chardetect | aa41d066f5dfbc7d9dc51d3bdcff15c0287360f8 | [] | no_license | obiorbitalstar/chess-board | f65e5706405e4a420dfc843b5f18c39a1f16cc7d | 0bb61022fd49a6361813d894f28bcd99c07766aa | refs/heads/Master | 2022-12-18T01:56:50.652476 | 2020-08-30T13:43:38 | 2020-08-30T13:43:38 | 291,475,486 | 0 | 0 | null | 2020-08-30T13:43:39 | 2020-08-30T13:27:49 | Python | UTF-8 | Python | false | false | 264 | #!/home/orphues/codefellows/401/chess-board/.venv/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from chardet.cli.chardetect import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"obiorbitalstar@gmail.com"
] | obiorbitalstar@gmail.com | |
ab6f49788e9c9b703b8119182f349d2b181ec92c | f907f8ce3b8c3b203e5bb9d3be012bea51efd85f | /kaki.py | 2c2c28e9db71f7ea3b53c39e8cf861cadb925d35 | [] | no_license | KohsukeKubota/Atcoder-practice | 3b4b986395551443f957d1818d6f9a0bf6132e90 | 52554a2649445c2760fc3982e722854fed5b8ab1 | refs/heads/master | 2020-08-26T15:17:29.344402 | 2019-10-26T11:14:24 | 2019-10-26T11:14:24 | 217,052,829 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 119 | py | S = [input() for _ in range(12)]
cnt = 0
for s in S:
set_ = set(s)
if 'r' in set_:
cnt += 1
print(cnt)
| [
"kohsuke@KohsukeKubotas-MacBook-Air.local"
] | kohsuke@KohsukeKubotas-MacBook-Air.local |
7329c993e5cfe2cf131a107a9c946a0937892cb4 | 098ac9ecdaa67b717182c2aeca2a9d60833e88e7 | /opentcweb/settings/prod.py | fd788fb20c554254729032aeabf64156243e772a | [
"MIT"
] | permissive | cahya-wirawan/opentc-web | c8e758835d129cf7edb6f9dbf640632c2aa9ff2f | fa74c49f3f2b1a74624deca912f7da87afdc7e1b | refs/heads/master | 2021-01-19T19:13:44.629858 | 2018-01-21T13:21:32 | 2018-01-21T13:21:32 | 88,406,223 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 102 | py | from __future__ import absolute_import
from .base import *
# Production overrides
DEBUG = False
#...
| [
"cahya.wirawan@gmail.com"
] | cahya.wirawan@gmail.com |
46b52fe8e5c60205d2161d38dc9193d19d105f9e | cba90cdd06eced813be6ad80e6295587223c4600 | /betfairlightweight/endpoints/navigation.py | 8795b7d2b4a2e08e79350a3a78ae3dd5e1c20f13 | [
"MIT"
] | permissive | mberk/betfair | 1a22528b881e02567626dbe7e8c4f0197809c38e | 6b064a68c8d2afceda81b70d74b6a0ee9601f228 | refs/heads/master | 2023-03-07T02:33:06.443407 | 2022-08-16T08:06:10 | 2022-08-16T08:06:10 | 192,976,576 | 0 | 1 | MIT | 2023-03-01T12:03:37 | 2019-06-20T19:28:23 | Python | UTF-8 | Python | false | false | 1,510 | py | import requests
from ..exceptions import APIError, InvalidResponse
from ..utils import check_status_code
from .baseendpoint import BaseEndpoint
from ..compat import json
class Navigation(BaseEndpoint):
"""
Navigation operations.
"""
def list_navigation(self, session: requests.Session = None) -> dict:
"""
This Navigation Data for Applications service allows the retrieval of the
full Betfair market navigation menu from a compressed file.
:param requests.session session: Requests session object
:rtype: json
"""
return self.request(session=session)
def request(
self, method: str = None, params: dict = None, session: requests.Session = None
) -> (dict, float):
session = session or self.client.session
try:
response = session.get(
self.url,
headers=self.client.request_headers,
timeout=(self.connect_timeout, self.read_timeout),
)
except requests.ConnectionError as e:
raise APIError(None, method, params, e)
except Exception as e:
raise APIError(None, method, params, e)
check_status_code(response)
try:
response_json = json.loads(response.content.decode("utf-8"))
except ValueError:
raise InvalidResponse(response.text)
return response_json
@property
def url(self) -> str:
return self.client.navigation_uri
| [
"paulingliam@gmail.com"
] | paulingliam@gmail.com |
42cd98f60f8637e2f8b57280dee6eeb14f3eac98 | bb4dc40ec0b62e5d2fc3ce1234013aebd4e648d5 | /src/modules/customised/payroll/hra/__init__.py | 708a454f4468ac2e8c826538ed0f9f59fab6f7cf | [] | no_license | kakamble-aiims/work | ba6cbaf4c525ff7bc28d0a407f16c829d0c35983 | cd392bf0e80d71c4742568e9c1dd5e5211da56a9 | refs/heads/master | 2022-04-02T14:45:58.515014 | 2019-12-31T14:00:51 | 2019-12-31T14:00:51 | 199,015,147 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 145 | py | from trytond.pool import Pool
from .hra import *
def register():
Pool.register(
HRA_Allowance,
module='hra', type_='model') | [
"kakamble.aiims@gmail.com"
] | kakamble.aiims@gmail.com |
fb12aa0f0a717a88fe55aac5ace8b0cda17fdc54 | ce39286b958c01cbbd68ecffc33d3724c8c130f1 | /club/app/migrations/0026_paquete_inscrito_horas_consumidas.py | 166d62f8b0c29460f78bc02d7a841840c518c74e | [] | no_license | Rob866/club | eb26c3085bb37908226bfed306d98538eed64dff | f2e58ca773a4e461a9905c0898c1f11bd5e94099 | refs/heads/master | 2022-12-16T00:07:32.423945 | 2020-01-09T04:38:09 | 2020-01-09T04:38:09 | 216,317,177 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 436 | py | # Generated by Django 2.2.6 on 2019-10-15 20:29
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app', '0025_auto_20191015_1240'),
]
operations = [
migrations.AddField(
model_name='paquete_inscrito',
name='horas_consumidas',
field=models.DurationField(default=datetime.timedelta(0)),
),
]
| [
"juanrob_10@hotmail.com"
] | juanrob_10@hotmail.com |
6722bcef452c085f75a486160d1e49a88934b6c6 | d92a1eb61863aa0dba7df6a8e787f243715effb2 | /urls.py | 0da45212c15d635f432c6b8adbd8c6e3b4f2a0a7 | [] | no_license | Engaginglab/scoreit | 91a7cc4b610556b45355bdd32c4c608dc2993edd | a54a073e6aded904b14738867c6b9dfa31e744f0 | refs/heads/master | 2021-01-10T19:58:05.162232 | 2012-09-22T14:34:48 | 2012-09-22T14:34:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 255 | py | from django.contrib import admin
from django.conf.urls.defaults import *
admin.autodiscover()
urlpatterns = patterns('',
(r'^auth/', include('auth.urls')),
(r'^handball/', include('handball.urls')),
(r'^admin/', include(admin.site.urls))
)
| [
"martin@maklesoft.com"
] | martin@maklesoft.com |
dd8ff876cdff51683095b93c5c1e9985b5a29584 | 9732da539d940904cf09b4164a307cb1a58fbb35 | /superhero/ability_and_armor.py | bb0e7c0ea30847095581385d460942d5d2e5ad75 | [] | no_license | makhmudislamov/fun_python_exercises | f3c7557fa6ed400ee196252a84ad7b6b23b913f1 | 21ab89540fb5f4f04dbdb80f361bf4febd694c11 | refs/heads/master | 2020-05-26T05:42:20.115833 | 2019-10-17T03:28:57 | 2019-10-17T03:28:57 | 188,125,045 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,406 | py | from random import randint
class Ability:
def __init__(self, name, max_damage):
'''
Initialize the values passed into this
method as instance variables.
'''
self.name = name
self.max_damage = max_damage
def __str__(self):
return f'This ability is {self.name}'
def ability_attack(self):
'''
Use randint(a, b) to select a random attack value.
Return an attack value between 0 and the full attack.
'''
attack_value = randint(0, self.max_damage)
# print(f"attack value in ability: {attack_value}")
self.max_damage -= attack_value
return attack_value
class Weapon(Ability):
def ability_attack(self):
""" This method returns a random value
between one half to the full attack power of the weapon.
"""
return randint(self.max_damage // 2, self.max_damage)
class Armor():
def __init__(self, name, max_block):
'''
Initialize the values passed into this
method as instance variables.
'''
self.name = name
self.max_block = max_block
def block(self):
'''
Return a random value between
0 and the initialized max_block strength.
'''
block_value = randint(0, self.max_block)
return block_value
# if __name__ == "__main__":
# pass
| [
"sunnatovichvv@gmail.com"
] | sunnatovichvv@gmail.com |
0cd44d57984873ff2dd3d1e25064b5bc50b880c1 | 23fdc97552b7e55b8c5e5eca3046ee95eb29d43c | /leetcode/leetcode_111.py | db8f1ff94b73fffc72e5a91a64d84064cc74e9ef | [] | no_license | qiqimaochiyu/tutorial-python | b999ef63026d05045a33225b1fbe2a5919422961 | a0d50946859798a642aaacdc31fa97cc015ae615 | refs/heads/master | 2018-10-04T18:12:18.017065 | 2018-06-26T09:23:50 | 2018-06-26T09:23:50 | 90,226,235 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 532 | py | # Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def minDepth(self, root):
"""
:type root: TreeNode
:rtype: int
"""
if not root:
return 0
if not (root.left) or not (root.right):
return self.minDepth(root.left) + self.minDepth(root.right) + 1
return min(self.minDepth(root.left), self.minDepth(root.right)) + 1
| [
"noreply@github.com"
] | qiqimaochiyu.noreply@github.com |
90208cdc6c60917016382d39760b8a3bb14ff4d3 | 8d34c0d29b69028fb1da01499fdac19f9762a8e1 | /lib/python2.7/sunlight/service.py | cf069d357b54bd46632e3a1b8417e69f90c7f8b1 | [] | no_license | politicrowd/politicrowd | 87a98689724c288b5400acd0de6fb7fae7e058d4 | e62c89d234a59f6c7d56fae2c8af6074f426c265 | refs/heads/master | 2021-01-20T00:55:48.083054 | 2013-07-30T00:14:08 | 2013-07-30T00:14:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,259 | py | # Copyright (c) Sunlight Labs, 2012 under the terms and conditions
# of the LICENSE file.
"""
.. module:: sunlight.service
:synopsis: Sunlight API Superclass
Base service class. All API classes (such as say -
:class:`sunlight.services.openstates.OpenStates`) inherit from this.
"""
import sys
import sunlight.config
import sunlight.errors
if sys.version_info[0] >= 3:
from urllib.parse import urlencode
from urllib.request import urlopen
from urllib.error import HTTPError
else:
from urllib import urlencode
from urllib2 import urlopen
from urllib2 import HTTPError
class Service:
"""
Base class for all the API implementations, as well as a bunch of common
code on how to actually fetch text over the network.
"""
def get(self, top_level_object, **kwargs):
"""
Get some data from the network - this is where we actually fetch
something and make a request.
.. warning:: Be sure that API_KEY was set before calling this method.
This will throw a :class:`sunlight.errors.NoAPIKeyException` if
the API_KEY is not set.
args:
``top_level_object`` (str): Thing to query for (such as say,
"bills" for OpenStates )
kwargs:
These arguments will be passed to the underlying API implementation
to help create a query. Validation will happen down below, and
on a per-API level.
"""
if not sunlight.config.API_KEY:
raise sunlight.errors.NoAPIKeyException(
"Warning: Missing API Key. please visit " + sunlight.config.API_SIGNUP_PAGE +
" to register for a key.")
url = self._get_url(top_level_object, sunlight.config.API_KEY,
**kwargs)
try:
r = urlopen(url)
return_data = r.read().decode('utf8')
return self._decode_response(return_data)
except HTTPError as e:
message = e.read()
code = e.getcode()
ex = sunlight.errors.BadRequestException("Error (%s) -- %s" % (
code, message
))
ex.url = e.geturl()
ex.message = message
ex.code = code
raise ex
| [
"paul@politicrowd.com"
] | paul@politicrowd.com |
7be265dac32863a3cb746a50679f132f3cfc6705 | 7d17375998378125fa63b1cf8673b8387d99324f | /core/migrations/0006_auto_20200517_0929.py | 07187c5d3c0bad2459578712f836c9dcc72447db | [] | no_license | priyanka1698/Late-checker | c2f96993340f09c55bd9eac2261cc760c409eae9 | ff1c4615b3abefd65fa789fcd1e684322ddfc9de | refs/heads/master | 2022-08-19T15:22:00.692770 | 2020-05-20T11:45:35 | 2020-05-20T11:45:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,921 | py | # Generated by Django 3.0.5 on 2020-05-17 09:29
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0005_delete_image'),
]
operations = [
migrations.CreateModel(
name='Timing',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('start', models.TimeField()),
('end', models.TimeField()),
],
),
migrations.RenameModel(
old_name='Station',
new_name='Gate',
),
migrations.RenameField(
model_name='gate',
old_name='station_no',
new_name='no',
),
migrations.RenameField(
model_name='log',
old_name='entry_station',
new_name='entry_gate',
),
migrations.RenameField(
model_name='log',
old_name='exit_station',
new_name='exit_gate',
),
migrations.RemoveField(
model_name='log',
name='entry_image',
),
migrations.RemoveField(
model_name='log',
name='exit_image',
),
migrations.RemoveField(
model_name='log',
name='fare',
),
migrations.AddField(
model_name='log',
name='entry_status',
field=models.CharField(choices=[('late', 'late'), ('early', 'early'), ('on time', 'on time'), ('in office', 'in office')], default='on time', max_length=100),
),
migrations.AddField(
model_name='log',
name='exit_status',
field=models.CharField(choices=[('late', 'late'), ('early', 'early'), ('on time', 'on time'), ('in office', 'in office')], default='on time', max_length=100),
),
]
| [
"sanyam19092000@gmail.com"
] | sanyam19092000@gmail.com |
eaed7c034ea788beec33f6c9b31938383407cb09 | 7c98bee73fdd64a8fa08dc3a94e520b175ac190e | /bin/rst2latex.py | 311af3f8a994b04f7ad5f9f586bc0e1fc7cd2fcd | [] | no_license | Barry-Chen-Intersective/Proj-Api-Test | 97015eccb4b6d1652ca8809cbb8ddbe1132008c0 | cc707770578bd35a155f506198ad765d91926daa | refs/heads/master | 2021-07-21T04:45:09.329331 | 2018-10-23T01:25:02 | 2018-10-23T01:25:02 | 135,973,324 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 867 | py | #!/Library/Frameworks/Python.framework/Versions/2.7/Resources/Python.app/Contents/MacOS/Python
# $Id: rst2latex.py 5905 2009-04-16 12:04:49Z milde $
# Author: David Goodger <goodger@python.org>
# Copyright: This module has been placed in the public domain.
"""
A minimal front end to the Docutils Publisher, producing LaTeX.
"""
try:
import locale
locale.setlocale(locale.LC_ALL, '')
except:
pass
from docutils.core import publish_cmdline
description = ('Generates LaTeX documents from standalone reStructuredText '
'sources. '
'Reads from <source> (default is stdin) and writes to '
'<destination> (default is stdout). See '
'<http://docutils.sourceforge.net/docs/user/latex.html> for '
'the full reference.')
publish_cmdline(writer_name='latex', description=description)
| [
"barry@intersective.com"
] | barry@intersective.com |
d5e72b406d12cc64d1490e7ba89e8b441f0c7d4e | bf511b123d05788e45b17270dc3e651a82a7ee58 | /taskmate/todolist_app/migrations/0001_initial.py | 2f3cc31b47612b0ee866de917ddd0d5a8fd0076f | [] | no_license | joseluis-gc/Django-TodoApp | 36a7350064a66dd917c1383be1eee923f3ffd20e | 2e3f82ba0fe08f76fb0d98bec2bde2717684818e | refs/heads/master | 2023-06-09T18:42:46.580395 | 2021-06-24T05:31:50 | 2021-06-24T05:31:50 | 377,983,066 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 550 | py | # Generated by Django 3.1.5 on 2021-06-18 00:48
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='TaskList',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('task', models.CharField(max_length=300)),
('done', models.BooleanField(default=False)),
],
),
]
| [
"joseluisgomezcecegna@gmail.com"
] | joseluisgomezcecegna@gmail.com |
8b579bbef3904192079b88f2eac49f003e9d4eb1 | 7ad7eb81d1dc26edd3b958a5dbbc7d61f19b0640 | /test/test_mode_type.py | 4c28945ba899c6fcde6a31a07364acf1687527cc | [
"MIT"
] | permissive | camptocamp/quickpac-client | a1b6e7164e2ad362c0c6d819a31ace9f6774f926 | 761c08bdc3846c724adbc99b589d2db460a6bcdc | refs/heads/master | 2023-07-16T18:46:48.167193 | 2021-09-02T08:56:46 | 2021-09-02T09:12:46 | 401,749,434 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 820 | py | # coding: utf-8
"""
Quickpac API
Here you will find all public interfaces to the Quickpac system. # noqa: E501
OpenAPI spec version: v1.00
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import quickpac
from quickpac.models.mode_type import ModeType # noqa: E501
from quickpac.rest import ApiException
class TestModeType(unittest.TestCase):
"""ModeType unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testModeType(self):
"""Test ModeType"""
# FIXME: construct object with mandatory attributes with example values
# model = quickpac.models.mode_type.ModeType() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
"stephane.mangin@camptocamp.com"
] | stephane.mangin@camptocamp.com |
edb363be7d18412f48d26946d0a265a266919f9e | 9d43b8a3b53001f25a347fd96e5c49538b0c509a | /mxshop/apps/trade/views.py | 30e854b8ad252b98ccda10e6bfe8ca3d67cb173a | [] | no_license | w8833531/mxfresh | b81b7e4223536c6bedb049009386015935d33987 | 46b83fafdae8450491344c531de81a45ab5d8aae | refs/heads/master | 2021-04-09T15:53:50.829921 | 2018-08-08T01:41:14 | 2018-08-08T01:41:14 | 125,793,036 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,746 | py | import random, time
from datetime import datetime
from django.shortcuts import render
from rest_framework import viewsets
from rest_framework import status
from rest_framework import permissions
from rest_framework import authentication
from rest_framework import mixins
from rest_framework.response import Response
from rest_framework.views import APIView
from rest_framework_jwt.authentication import JSONWebTokenAuthentication
from utils.permissions import IsOwnerOrReadOnly
from utils.alipay import AliPay
from .serializers import ShopCartSerializer,ShopCartDetailSerializer, OrderSerializer, OrderDetailSerializer
from .models import ShoppingCart, OrderInfo, OrderGoods
from mxshop.settings import appid, private_key_path, alipay_pub_key_path, alipay_notify_url, alipay_return_url
# Create your views here.
class ShoppingCartViewset(viewsets.ModelViewSet):
"""
่ดญ็ฉ่ฝฆๅ่ฝ
list:
่ทๅ่ดญ็ฉ่ฝฆ็ฉๅๅ่กจ
create:
ๅ ๅ
ฅ่ดญ็ฉ่ฝฆ็ฉๅ
delete:
ๅ ้ค่ดญ็ฉ่ฝฆ็ฉๅ
update:
ๆดๆฐ่ดญ็ฉ่ฝฆ็ฉๅ
"""
authentication_classes = (JSONWebTokenAuthentication, authentication.SessionAuthentication)
permission_classes = (permissions.IsAuthenticated, IsOwnerOrReadOnly)
lookup_field = "goods_id"
# override get_serializer_class method, if list return DetailSerializer
def get_serializer_class(self, *args, **kwargs):
if self.action == 'list':
return ShopCartDetailSerializer
else:
return ShopCartSerializer
def get_queryset(self):
return ShoppingCart.objects.filter(user=self.request.user)
class OrderViewset(mixins.ListModelMixin, mixins.CreateModelMixin, mixins.RetrieveModelMixin, mixins.DestroyModelMixin, viewsets.GenericViewSet):
"""
่ฎขๅ็ฎก็
List:
่ทๅ่ฎขๅ
Delete:
ๅ ้ค่ฎขๅ
Create:
ๆฐๅข่ฎขๅ
Retrieve:
่ทๅ่ฎขๅ่ฏฆๆ
"""
authentication_classes = (JSONWebTokenAuthentication, authentication.SessionAuthentication)
permission_classes = (permissions.IsAuthenticated, IsOwnerOrReadOnly)
serializer_class = OrderSerializer
def get_queryset(self):
return OrderInfo.objects.filter(user=self.request.user)
def get_serializer_class(self):
if self.action == 'retrieve':
return OrderDetailSerializer
else:
return OrderSerializer
# ็ๆ่ฎขๅๅท ๅฝๅๆถ้ด+userid+random
def generate_order_sn(self):
random_int = random.Random()
order_sn = "{time_str}{userid}{random_str}".format(time_str=time.strftime('%Y%m%d%H%M%S'),
userid=self.request.user.id, random_str=random_int.randint(10, 99))
return order_sn
# ๅจๅๅปบ่ฎขๅๆถ๏ผ้่ฝฝ perform_create ๆนๆณ, set order_sn in serializer.data
def perform_create(self, serializer):
"""
ๅจๅๅปบ่ฎขๅๆถ๏ผๅ
ณ่่ฎขๅไธญ็ๅๅ๏ผๆถๅๅๅๅบๅญ๏ผๆธ
็ฉบ่ดญ็ฉ่ฝฆ
"""
# ไฟๅญๅฝๅ็จๆท็่ฎขๅ
order = serializer.save(order_sn=self.generate_order_sn())
# ่ทๅๅฝๅ็จๆท่ดญ็ฉ่ฝฆๅ
ๆๆๅๅๆก็ฎ
shop_carts = ShoppingCart.objects.filter(user=self.request.user)
# ๆๅๅใๅๅๆฐ้ๆพๅ
ฅๅฎๅ๏ผๅบๅญ็ธๅบๆถๅ๏ผๅนถๆธ
็ฉบ่ดญ็ฉ่ฝฆ
for shop_cart in shop_carts:
# ็ๆ่ฎขๅๅๅๅฏน่ฑก
order_goods = OrderGoods()
# ๆๅๅใๅๅๆฐ้ๆพๅ
ฅ่ฎขๅๅๅๅฏน่ฑก
order_goods.goods = shop_cart.goods
order_goods.goods_num = shop_cart.nums
# ๅฏนๅๅ็ๅบๅญ็ธๅบๆถๅ
order_goods.goods.goods_num -= order_goods.goods_num
order_goods.goods.save()
# ๆพๅ
ฅ่ฎขๅๅฏน่ฑกๅนถไฟๅญ
order_goods.order = order
order_goods.save()
# ๆธ
็ฉบ่ดญ็ฉ่ฝฆ
shop_cart.delete()
return order
# ๅจๅ ้ค่ฎขๅๆถ๏ผ้่ฝฝ perform_destroy ๆนๆณ๏ผๅฎ็ฐ่ฎขๅๅๅๅบๅญๅขๅ
def perform_destroy(self, instance):
if instance.pay_status != "TRADE_SUCCESS":
# ๅจๅ ้ค่ฎขๅๅ๏ผๅฆๆ่ฎขๅๆฒกๆๆฏไปๆๅ๏ผๅขๅ ่ฟไธช่ฎขๅไธญ็ๆๆๅๅๅฏนๅบๆฐ้็ๅบๅญ
order_goods = OrderGoods.objects.filter(order=instance.id)
for order_good in order_goods:
order_good.goods.goods_num += order_good.goods_num
order_good.goods.save()
instance.delete()
class AliPayViewset(APIView):
def get(self, request):
"""
ๅค็ๆฏไปๅฎreturn_url ่ฟๅ
:param request:
:return:
"""
processed_dict = {}
for key, value in request.GET.items():
processed_dict[key] = value
sign = processed_dict.pop("sign", None)
alipay = AliPay(
appid=appid,
app_notify_url=alipay_notify_url,
app_private_key_path=private_key_path,
alipay_public_key_path=alipay_pub_key_path, # ๆฏไปๅฎ็ๅ
ฌ้ฅ๏ผ้ช่ฏๆฏไปๅฎๅไผ ๆถๆฏไฝฟ็จ๏ผไธๆฏไฝ ่ชๅทฑ็ๅ
ฌ้ฅ,
debug=True, # ้ป่ฎคFalse,
return_url=alipay_return_url,
)
verify_re = alipay.verify(processed_dict, sign)
if verify_re is True:
# order_sn = processed_dict.get('out_trade_no', None)
# trade_no = processed_dict.get('trade_no', None)
# trade_status = processed_dict.get('trade_status', None)
# existed_orders = OrderInfo.objects.filter(order_sn=order_sn)
# for existed_order in existed_orders:
# existed_order.pay_status = trade_status
# existed_order.trade_no = trade_no
# existed_order.pay_time = datetime.now()
# existed_order.save()
return Response("success")
def post(self, request):
"""
ๅค็ๆฏไปๅฎnotify_url ่ฟๅ
:param request:
:return:
"""
processed_dict = {}
for key, value in request.POST.items():
processed_dict[key] = value
print(key, value)
sign = processed_dict.pop("sign", None)
alipay = AliPay(
appid=appid,
app_notify_url=alipay_notify_url,
app_private_key_path=private_key_path,
alipay_public_key_path=alipay_pub_key_path, # ๆฏไปๅฎ็ๅ
ฌ้ฅ๏ผ้ช่ฏๆฏไปๅฎๅไผ ๆถๆฏไฝฟ็จ๏ผไธๆฏไฝ ่ชๅทฑ็ๅ
ฌ้ฅ,
debug=True, # ้ป่ฎคFalse,
return_url=alipay_return_url,
)
verify_re = alipay.verify(processed_dict, sign)
if verify_re is True:
order_sn = processed_dict.get('out_trade_no', None)
trade_no = processed_dict.get('trade_no', None)
trade_status = processed_dict.get('trade_status', None)
existed_orders = OrderInfo.objects.filter(order_sn=order_sn)
for existed_order in existed_orders:
existed_order.pay_status = trade_status
# ๅฆๆๆฏไปๆๅ๏ผๆ่ฎขๅไธญๆๆๅๅๅฎๅบๆฐ้ๅ็ธๅบๅขๅ (ๆณจ๏ผ่ฟไธชๆไฝไธ่ฆๆฑๅฎๆถ๏ผๅปบ่ฎฎ็จๅๅฐ็จๅบๆฅๅฎๆไผๆดๅฅฝ)
if existed_order.pay_status == "TRADE_SUCCESS":
order_goods = existed_order.goods.all()
for order_good in order_goods:
order_good.goods.sold_num += order_good.goods_num
order_good.goods.save()
existed_order.trade_no = trade_no
existed_order.pay_time = datetime.now()
existed_order.save()
return Response("success") | [
"w8833531@hotmail.com"
] | w8833531@hotmail.com |
7d6c817fe544b5cc80a68b8c685ce92faf0c9ef5 | a9d6a3b0fe418e4e5cc131ebc05f9b56c0e4543e | /chapter11-django/site02/site02/settings.py | 1ba07484b03cf34c8252583125bc6c301d4cb224 | [] | no_license | Kianqunki/Python_CorePythonApplicationsProgramming | 34a36ba64bdc303814de507c4fcfc3c81ff88b5f | 77263c1fde0d02aade180f7e73d2cdee1d170d58 | refs/heads/master | 2021-05-07T02:41:44.567088 | 2014-10-27T17:43:51 | 2014-10-27T17:43:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,596 | py | """
Django settings for site02 project.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
TEMPLATE_DIRS = (
os.path.join(BASE_DIR, 'templates'),
)
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'y97upk5xk__c@j95sw4v-pf&#i45ir$cm6-ya)byzikor7+2sv'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'approver',
'poster'
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'site02.urls'
WSGI_APPLICATION = 'site02.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.6/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'TweetApprover.db'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
STATIC_URL = '/static/'
# from this point on MY consts
TWEET_APPROVER_EMAIL = 'georstef@gmail.com'
EMAIL_HOST = 'smtp.mydomain.com'
EMAIL_HOST_USER = 'username'
EMAIL_HOST_PASSWORD = 'password'
DEFAULT_FROM_EMAIL = 'username@mydomain.com'
SERVER_EMAIL = 'username@mydomain.com'
TWITTER_CONSUMER_KEY = 'DeH9TfrfeV7UeRgK3OSGA'
TWITTER_CONSUMER_SECRET = 'sZGBB28VZcrRfcZvexYydj2Pc2uWW307kP8l7T7yiQo'
TWITTER_OAUTH_TOKEN = '2334856880-zYwvSu8kS7cGfH67lQ64vulTUbY7zxhc39bpnlG'
TWITTER_OAUTH_TOKEN_SECRET = 'RTQ7pzSytCIPsASCkA0Z5rubpHSWbvjvYR3c3hb9QhC3M'
| [
"georstef@gmail.com"
] | georstef@gmail.com |
890ebb99722d62fb6f06cab56b621f579b3449d8 | 6044e804dc994cb342dee73124e9216d552f6096 | /first_test.py | 4b5fe2ac95c876216633f6dacbfe65f4347e5645 | [] | no_license | jjpikoov/jjblog | 8fc02a84fa45e2d92dcdc2def55b49969b361b2f | fb89e2c6b737075c14ba25f3c3331717ae565b2c | refs/heads/master | 2021-01-10T09:10:51.089560 | 2016-02-17T16:40:53 | 2016-02-17T16:40:53 | 49,321,368 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 860 | py | import os
import main
import unittest
import tempfile
class JJblogTestCase(unittest.TestCase):
def setUp(self):
self.db_fd, main.app.config['DATABASE'] = tempfile.mkstemp()
self.app = main.app.test_client()
main.database.init_db()
def tearDown(self):
os.close(self.db_fd)
os.unlink(main.app.config['DATABASE'])
def login(self, username, password):
return self.app.post('/admin', data=dict(
username=username,
password=password), follow_redirects=True)
def logout(self):
return self.app.get('/admin/logout', follow_redirects=True)
def test_login_logout(self):
rv = self.login('aadmin', 'admin')
# assert 'Failed' in rv.data
print(rv.data)
rv = self.logout()
if __name__ == '__main__':
unittest.main()
| [
"jjpikoov@gmail.com"
] | jjpikoov@gmail.com |
1c61b1085a41baf65935cafa23107b1498cffadb | 56c23dfabac2b6cdcecfc20f6cc1e53d0041fd7a | /stanCode projects/find_DNA_complement/complement.py | 555df5d7a6b415aa24d181d33ca43b2eb429b0f0 | [
"MIT"
] | permissive | rogerchang910/stanCode-projects | 174144bcb67b5e141f7acc320e5a14027134bc2a | 92beb09b23a40c09a093dfad80837d97a90c8e3a | refs/heads/main | 2022-12-30T10:11:23.402634 | 2020-10-19T07:57:46 | 2020-10-19T07:57:46 | 303,656,754 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,687 | py | """
File: complement.py
Name: Roger(Yu-Ming) Chang
----------------------------
This program uses string manipulation to
tackle a real world problem - finding the
complement strand of a DNA sequence.
The program asks users for a DNA sequence as
a python string that is case-insensitive.
Your job is to output the complement of it.
"""
def main():
"""
The program will output the complement of a DNA sequence users input.
"""
dna = input_dna()
complement = build_complement(dna)
print('The complement of ' + str(dna) + ' is ' + str(complement))
def input_dna():
"""
The function will ask users input a DNA sequence and check whether the input format is correct.
:return: str, the correct input format.
"""
while True:
dna = input('Please give me a DNA strand and I\'ll find the complement: ')
dna = dna.upper()
wrong = 0
for i in range(len(dna)):
ch = dna[i]
if ch == 'A' or ch == 'T' or ch == 'C' or ch == 'G':
wrong += 0
else:
wrong += 1
if wrong > 0:
print('The input format is not correct.')
if wrong == 0:
return dna
def build_complement(base):
"""
:param base: str, the DNA sequence users input.
:return: str, the complement of the entered DNA sequence.
"""
strand = ''
for i in range(len(base)):
ch = base[i]
if ch == 'A':
strand += 'T'
if ch == 'T':
strand += 'A'
if ch == 'C':
strand += 'G'
if ch == 'G':
strand += 'C'
return strand
if __name__ == '__main__':
main()
| [
"noreply@github.com"
] | rogerchang910.noreply@github.com |
9c762d2633df105988229e84fc9d96bc46b0cd65 | e5b72785a1a191ca8ed62dee6048f865caa61fe3 | /gestao/urls.py | c9f849a26b749eccbcb0f922bcf1c04f55871d3d | [] | no_license | MarToxAk/v5 | 344409f98ea0098b272c4e3bc2d21e7c9bbcfe65 | daa76fe9f2346805cf99b62d8f687d8f751defbc | refs/heads/master | 2020-11-27T01:54:06.684511 | 2019-12-20T12:45:43 | 2019-12-20T12:45:43 | 229,263,258 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,024 | py | """gestao URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from django.conf.urls import url
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('comparador.urls')),
path('', include('teste.urls')),
path('pousada/', include('pousada.urls')),
path('cotacao/', include('cotacao.urls')),
path('chat2/', include('chatbot.urls'), name='chatbot'),
]
| [
"junior.ilha@hotmail.com.br"
] | junior.ilha@hotmail.com.br |
b642ce9125bc51b5a9f9d0ae69199d2d0bd1bf63 | 2e8ff2eb86f34ce2fc330766906b48ffc8df0dab | /tensorflow_probability/python/experimental/inference_gym/targets/__init__.py | a5ba67a6a9b68bf31372bf5990405fe49fbdf663 | [
"Apache-2.0"
] | permissive | wataruhashimoto52/probability | 9613f9a3cc685ff1a20643c4a05a48f9cf0fe1ae | 12e3f256544eadea6e863868da825614f4423eb0 | refs/heads/master | 2021-07-16T18:44:25.970036 | 2020-06-14T02:48:29 | 2020-06-14T02:51:59 | 146,873,495 | 0 | 0 | Apache-2.0 | 2018-08-31T09:51:20 | 2018-08-31T09:51:20 | null | UTF-8 | Python | false | false | 2,223 | py | # Copyright 2020 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Targets package."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow_probability.python.experimental.inference_gym.targets.banana import Banana
from tensorflow_probability.python.experimental.inference_gym.targets.bayesian_model import BayesianModel
from tensorflow_probability.python.experimental.inference_gym.targets.ill_conditioned_gaussian import IllConditionedGaussian
from tensorflow_probability.python.experimental.inference_gym.targets.item_response_theory import ItemResponseTheory
from tensorflow_probability.python.experimental.inference_gym.targets.item_response_theory import SyntheticItemResponseTheory
from tensorflow_probability.python.experimental.inference_gym.targets.logistic_regression import GermanCreditNumericLogisticRegression
from tensorflow_probability.python.experimental.inference_gym.targets.logistic_regression import LogisticRegression
from tensorflow_probability.python.experimental.inference_gym.targets.model import Model
from tensorflow_probability.python.experimental.inference_gym.targets.sparse_logistic_regression import GermanCreditNumericSparseLogisticRegression
from tensorflow_probability.python.experimental.inference_gym.targets.sparse_logistic_regression import SparseLogisticRegression
__all__ = [
'Banana',
'BayesianModel',
'GermanCreditNumericLogisticRegression',
'GermanCreditNumericSparseLogisticRegression',
'IllConditionedGaussian',
'ItemResponseTheory',
'LogisticRegression',
'Model',
'SparseLogisticRegression',
'SyntheticItemResponseTheory',
]
| [
"gardener@tensorflow.org"
] | gardener@tensorflow.org |
414f254965e2e32371576293af109dfc8fe4d3a5 | a88f90d3aa9eb9fa7bd88458d3b78e1a7a6c3477 | /svplot/jointgrids.py | 3654cfa66dafd821320fc16c36b9de591c2ff3c0 | [
"MIT"
] | permissive | msto/svplot | a43c13ae2ae66b21b8b3176d1b722c9d88118769 | 2e16a7936328079d444bdd1edd8ab93fbbf49dde | refs/heads/master | 2021-01-13T08:14:58.545980 | 2017-03-08T17:14:17 | 2017-03-08T17:14:17 | 72,221,725 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,851 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright ยฉ 2016 Matthew Stone <mstone5@mgh.harvard.edu>
# Distributed under terms of the MIT license.
"""
Modification of Michael Waskom's JointGrid implementation in Seaborn.
Supports multiple JointGrids in single figure
"""
import numpy as np
import pandas as pd
import matplotlib.gridspec as gridspec
import matplotlib.pyplot as plt
import seaborn as sns
class JointGrid(sns.JointGrid):
"""Grid for drawing a bivariate plot with marginal univariate plots."""
def __init__(self, x, y, data=None, gs=None, ratio=5, space=.2,
dropna=True, xlim=None, ylim=None):
"""Set up the grid of subplots.
Parameters
----------
x, y : strings or vectors
Data or names of variables in ``data``.
data : DataFrame, optional
DataFrame when ``x`` and ``y`` are variable names.
size : numeric
Size of each side of the figure in inches (it will be square).
ratio : numeric
Ratio of joint axes size to marginal axes height.
space : numeric, optional
Space between the joint and marginal axes
dropna : bool, optional
If True, remove observations that are missing from `x` and `y`.
{x, y}lim : two-tuples, optional
Axis limits to set before plotting.
See Also
--------
jointplot : High-level interface for drawing bivariate plots with
several different default plot kinds.
"""
# Set up the subplot grid
if gs is None:
gs = gridspec.GridSpec(ratio + 1, ratio + 1,
hspace=space, wspace=space)
ax_joint = plt.subplot(gs[1:, :-1])
ax_marg_x = plt.subplot(gs[0, :-1], sharex=ax_joint)
ax_marg_y = plt.subplot(gs[1:, -1], sharey=ax_joint)
self.ax_joint = ax_joint
self.ax_marg_x = ax_marg_x
self.ax_marg_y = ax_marg_y
# Turn off tick visibility for the measure axis on the marginal plots
plt.setp(ax_marg_x.get_xticklabels(), visible=False)
plt.setp(ax_marg_y.get_yticklabels(), visible=False)
# Turn off the ticks on the density axis for the marginal plots
plt.setp(ax_marg_x.yaxis.get_majorticklines(), visible=False)
plt.setp(ax_marg_x.yaxis.get_minorticklines(), visible=False)
plt.setp(ax_marg_y.xaxis.get_majorticklines(), visible=False)
plt.setp(ax_marg_y.xaxis.get_minorticklines(), visible=False)
plt.setp(ax_marg_x.get_yticklabels(), visible=False)
plt.setp(ax_marg_y.get_xticklabels(), visible=False)
ax_marg_x.yaxis.grid(False)
ax_marg_y.xaxis.grid(False)
# Possibly extract the variables from a DataFrame
if data is not None:
if x in data:
x = data[x]
if y in data:
y = data[y]
# Possibly drop NA
if dropna:
not_na = pd.notnull(x) & pd.notnull(y)
x = x[not_na]
y = y[not_na]
# Find the names of the variables
if hasattr(x, "name"):
xlabel = x.name
ax_joint.set_xlabel(xlabel)
if hasattr(y, "name"):
ylabel = y.name
ax_joint.set_ylabel(ylabel)
# Convert the x and y data to arrays for plotting
self.x = np.asarray(x)
self.y = np.asarray(y)
if xlim is not None:
ax_joint.set_xlim(xlim)
if ylim is not None:
ax_joint.set_ylim(ylim)
class JointGrids:
def __init__(self, data, x, y,
col=None, col_order=None,
row=None, row_order=None,
panel_size=8, ratio=5):
# row=None, row_order=None,
# col=None, col_order=None,
# hue=None, hue_order=None):
"""
Borrowed heavily from seaborn FacetGrid
Arguments
---------
panel_size : int, optional
Height/width of each constituent JointGrid
ratio : int, optional
Ratio of joint to marginal axis size
"""
if row is None:
row_names = []
else:
row_names = sns.utils.categorical_order(data[row], row_order)
if col is None:
col_names = []
else:
col_names = sns.utils.categorical_order(data[col], col_order)
# if col is not None and col_order is None:
# col_order = data[col].drop_duplicates().sort_values()
# n_cols = len(col_order)
n_cols = 1 if col is None else len(col_names)
n_rows = 1 if row is None else len(row_names)
self.fig = plt.figure(figsize=(n_cols * panel_size,
n_rows * panel_size))
self.gs = gridspec.GridSpec(n_rows, n_cols)
self.grids = np.empty((n_rows, n_cols), dtype=object)
if len(row_names) > 0 and len(col_names) > 0:
for i, row_val in enumerate(row_names):
for j, col_val in enumerate(col_names):
subdata = data.loc[(data[col] == col_val) &
(data[row] == row_val)]
ss = self.gs[i, j]
gs = gridspec.GridSpecFromSubplotSpec(ratio + 1, ratio + 1,
subplot_spec=ss)
grid = JointGrid(x, y, data=subdata, gs=gs)
self.grids[i, j] = grid
else:
if len(row_names) > 0:
facets = row_names
facet = row
grids = self.grids[:, 0]
else:
facets = col_names
facet = col
grids = self.grids[0]
for i, val in enumerate(facets):
subdata = data.loc[data[facet] == val]
gs = gridspec.GridSpecFromSubplotSpec(ratio + 1, ratio + 1,
subplot_spec=self.gs[i])
grid = JointGrid(x, y, data=subdata, gs=gs)
grids[i] = grid
def set_xlims(self, xmin, xmax):
for grid in self.grids.flat:
grid.ax_joint.set_xlim(xmin, xmax)
def set_ylims(self, ymin, ymax):
for grid in self.grids.flat:
grid.ax_joint.set_ylim(ymin, ymax)
def set_lims(self, xmin, xmax):
for grid in self.grids.flat:
grid.ax_joint.set_xlim(xmin, xmax)
grid.ax_joint.set_ylim(xmin, xmax)
def plot_joint(self, func, **kwargs):
for grid in self.grids.flat:
grid.plot_joint(func, **kwargs)
def plot_marginals(self, func, **kwargs):
for grid in self.grids.flat:
grid.plot_marginals(func, **kwargs)
| [
"matthew.stone12@gmail.com"
] | matthew.stone12@gmail.com |
748a3810da0b0659890ef170abef1ea0d6d32b5f | 5961726d2e0d84c4ced32e5cd072c3c0c07153cb | /smart_schedule/line/handlers/__init__.py | 48b1b8553fed5e192692650955bf0185450019e4 | [] | no_license | macinjoke/smart_schedule | 46bc68d712646ffb45dcf1e8bd9d140d7a9fb84f | 605c39f2d465cb8e56bedc941109f3b716608efa | refs/heads/master | 2021-03-19T15:53:35.886128 | 2018-01-13T08:22:50 | 2018-01-13T08:22:50 | 76,947,986 | 7 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,214 | py | from datetime import datetime
import flask
import urllib
import hashlib
import re
from linebot.models import TextSendMessage
from linebot import LineBotApi
from smart_schedule.settings import (
line_env, web_env, hash_env
)
line_bot_api = LineBotApi(line_env['channel_access_token'])
# TODO ไปฅ้ใฎ้ขๆฐใใกใฏใฉใใซใใในใใใใชใใกใฏใฟใชใณใฐใฎไฝๅฐใ็กใใ่ใใ
def reply_google_auth_message(event):
auth_url = flask.url_for('oauth2')
if event.source.type == 'user':
talk_id = event.source.user_id
elif event.source.type == 'group':
talk_id = event.source.group_id
elif event.source.type == 'room':
talk_id = event.source.room_id
else:
raise Exception('invalid `event.source`')
m = hashlib.md5()
m.update(talk_id.encode('utf-8'))
m.update(hash_env['seed'].encode('utf-8'))
params = urllib.parse.urlencode({'talk_id': talk_id, 'hash': m.hexdigest()})
url = '{}{}?{}'.format(web_env['host'], auth_url, params)
line_bot_api.reply_message(
event.reply_token,
TextSendMessage(text='ใใฎใชใณใฏใใ่ช่จผใ่กใฃใฆใใ ใใ\n{}'.format(url))
)
def reply_refresh_error_message(event):
reply_text = '''่ช่จผๆ
ๅ ฑใฎๆดๆฐใจใฉใผใ็บ็ใใพใใใๅใGoogleใขใซใฆใณใใง่คๆฐใฎ\
่ช่จผใ่กใฃใฆใใๅ ดๅใซใใฎไธๅ
ทๅใ็บ็ใใพใใใใฎใใผใฏใงSmart Scheduleใไฝฟ็จใใใๅ ดๅ\
ใฏไปฅไธใฎใใใใใ่กใฃใๅพใง่ช่จผใใชใใใฆใใ ใใใ
1. ๅใใขใซใฆใณใใง่ช่จผใใฆใใใใผใฏใงlogoutใณใใณใใ่กใ(ใชในในใก)
2. ไธ่จURLใใๆๅใงSmart Scheduleใฎ่ช่จผใ่งฃ้คใใ\
https://myaccount.google.com/u/1/permissions'''
line_bot_api.reply_message(
event.reply_token,
TextSendMessage(text=reply_text)
)
def reply_invalid_credential_error_message(event):
reply_text = '''็กๅนใช่ช่จผๆ
ๅ ฑใงใใๅใGoogleใขใซใฆใณใใง่คๆฐใฎ่ช่จผใ่กใฃใฆใใ\
ๅ ดๅใซใใฎไธๅ
ทๅใ็บ็ใใพใใ่ช่จผใใใใชใใใฆใใ ใใใ'''
line_bot_api.reply_message(
event.reply_token,
TextSendMessage(text=reply_text)
)
def generate_message_from_events(events, reply_text):
day_of_week_strs = ["ๆ", "็ซ", "ๆฐด", "ๆจ", "้", "ๅ", "ๆฅ"]
for e in events:
summary = e['summary']
start = e['start'].get('dateTime', e['start'].get('date'))
if re.match('\d+[-]\d+[-]\d+[T]\d+[:]\d+[:]\d+[+]\d+[:]\d+', start):
start_datetime = datetime.strptime(start, '%Y-%m-%dT%H:%M:%S+09:00')
day_of_week = day_of_week_strs[start_datetime.weekday()]
start = start_datetime.strftime(
'%Yๅนด%mๆ%dๆฅ({}) %Hๆ%Sๅ'.format(day_of_week)
)
end = e['end'].get('dateTime', e['end'].get('date'))
end_datetime = datetime.strptime(end, '%Y-%m-%dT%H:%M:%S+09:00')
day_of_week = day_of_week_strs[end_datetime.weekday()]
end = end_datetime.strftime(
'%Yๅนด%mๆ%dๆฅ({}) %Hๆ%Sๅ'.format(day_of_week)
)
reply_text += '\n\n{}\n{}\n |\n{}\n\n---------------------------'.format(summary,
start,
end)
else:
start_datetime = datetime.strptime(start, '%Y-%m-%d')
start = start_datetime.strftime('%Yๅนด%mๆ%dๆฅ')
end = '็ตๆฅ'
reply_text += '\n\n{}\n{} {}\n\n---------------------------'.format(summary,
start,
end)
return reply_text
from .join_event_handler import JoinEventHandler
from .leave_event_handler import LeaveEventHandler
from .message_event_handler import MessageEventHandler
from .postback_event_handler import PostBackEventHandler
from .unfollow_event_handler import UnfollowEventHandler
| [
"shunji.makino@gmail.com"
] | shunji.makino@gmail.com |
a4354d06907b766c2c8e2f23546b79efe0959e4f | 06322e962c80f4c25838318e7d805ae88f0299e5 | /lengths.py | f6546177e6a717d960717d0a920b2e6122347ee7 | [
"BSD-2-Clause"
] | permissive | unixpickle/uno-ai | 6d4ec187e0c158c15cd4240ccf7e894cb599e071 | 3124afc8fa6b0cbcced95ef03ed9672cdb4f35a7 | refs/heads/master | 2020-04-21T10:20:07.310885 | 2019-08-06T15:27:45 | 2019-08-06T15:27:45 | 169,482,953 | 22 | 4 | null | null | null | null | UTF-8 | Python | false | false | 362 | py | """
Measure the lengths of random games.
"""
import random
from uno_ai.game import Game
def main():
while True:
g = Game(4)
num_moves = 0
while g.winner() is None:
action = random.choice(g.options())
g.act(action)
num_moves += 1
print(num_moves)
if __name__ == '__main__':
main()
| [
"unixpickle@gmail.com"
] | unixpickle@gmail.com |
9bd919b284a2108b62fb412c5d961bcb422c8d89 | a66460a46611483dfbdc94c7996893f427e60d97 | /ansible/my_env/lib/python2.7/site-packages/ansible/modules/network/f5/bigip_iapp_template.py | 4437352d228d92f1318fbf343532623181c1e425 | [
"GPL-3.0-only",
"MIT"
] | permissive | otus-devops-2019-02/yyashkin_infra | 06b57807dde26f94f501828c07503d6bf1d70816 | 0cd0c003884155ac922e3e301305ac202de7028c | refs/heads/master | 2020-04-29T02:42:22.056724 | 2019-05-15T16:24:35 | 2019-05-15T16:24:35 | 175,780,718 | 0 | 0 | MIT | 2019-05-15T16:24:36 | 2019-03-15T08:37:35 | HCL | UTF-8 | Python | false | false | 15,691 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2017 F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'certified'}
DOCUMENTATION = r'''
---
module: bigip_iapp_template
short_description: Manages TCL iApp templates on a BIG-IP
description:
- Manages TCL iApp templates on a BIG-IP. This module will allow you to
deploy iApp templates to the BIG-IP and manage their lifecycle. The
conventional way to use this module is to import new iApps as needed
or by extracting the contents of the iApp archive that is provided at
downloads.f5.com and then importing all the iApps with this module.
This module can also update existing iApps provided that the source
of the iApp changed while the name stayed the same. Note however that
this module will not reconfigure any services that may have been
created using the C(bigip_iapp_service) module. iApps are normally
not updated in production. Instead, new versions are deployed and then
existing services are changed to consume that new template. As such,
the ability to update templates in-place requires the C(force) option
to be used.
version_added: 2.4
options:
force:
description:
- Specifies whether or not to force the uploading of an iApp. When
C(yes), will force update the iApp even if there are iApp services
using it. This will not update the running service though. Use
C(bigip_iapp_service) to do that. When C(no), will update the iApp
only if there are no iApp services using the template.
type: bool
name:
description:
- The name of the iApp template that you want to delete. This option
is only available when specifying a C(state) of C(absent) and is
provided as a way to delete templates that you may no longer have
the source of.
content:
description:
- Sets the contents of an iApp template directly to the specified
value. This is for simple values, but can be used with lookup
plugins for anything complex or with formatting. C(content) must
be provided when creating new templates.
state:
description:
- Whether the iApp template should exist or not.
default: present
choices:
- present
- absent
partition:
description:
- Device partition to manage resources on.
default: Common
extends_documentation_fragment: f5
author:
- Tim Rupp (@caphrim007)
'''
EXAMPLES = r'''
- name: Add the iApp contained in template iapp.tmpl
bigip_iapp_template:
content: "{{ lookup('template', 'iapp.tmpl') }}"
password: secret
server: lb.mydomain.com
state: present
user: admin
delegate_to: localhost
- name: Update a template in place
bigip_iapp_template:
content: "{{ lookup('template', 'iapp-new.tmpl') }}"
password: secret
server: lb.mydomain.com
state: present
user: admin
delegate_to: localhost
- name: Update a template in place that has existing services created from it.
bigip_iapp_template:
content: "{{ lookup('template', 'iapp-new.tmpl') }}"
force: yes
password: secret
server: lb.mydomain.com
state: present
user: admin
delegate_to: localhost
'''
RETURN = r'''
# only common fields returned
'''
import re
import uuid
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.basic import env_fallback
try:
from library.module_utils.network.f5.bigip import HAS_F5SDK
from library.module_utils.network.f5.bigip import F5Client
from library.module_utils.network.f5.common import F5ModuleError
from library.module_utils.network.f5.common import AnsibleF5Parameters
from library.module_utils.network.f5.common import cleanup_tokens
from library.module_utils.network.f5.common import f5_argument_spec
from library.module_utils.network.f5.common import fq_name
try:
from library.module_utils.network.f5.common import iControlUnexpectedHTTPError
from f5.utils.iapp_parser import NonextantTemplateNameException
except ImportError:
HAS_F5SDK = False
except ImportError:
from ansible.module_utils.network.f5.bigip import HAS_F5SDK
from ansible.module_utils.network.f5.bigip import F5Client
from ansible.module_utils.network.f5.common import F5ModuleError
from ansible.module_utils.network.f5.common import AnsibleF5Parameters
from ansible.module_utils.network.f5.common import cleanup_tokens
from ansible.module_utils.network.f5.common import f5_argument_spec
from ansible.module_utils.network.f5.common import fq_name
try:
from ansible.module_utils.network.f5.common import iControlUnexpectedHTTPError
from f5.utils.iapp_parser import NonextantTemplateNameException
except ImportError:
HAS_F5SDK = False
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
class Parameters(AnsibleF5Parameters):
api_attributes = []
returnables = []
@property
def name(self):
if self._values['name']:
return self._values['name']
if self._values['content']:
try:
name = self._get_template_name()
return name
except NonextantTemplateNameException:
raise F5ModuleError(
"No template name was found in the template"
)
return None
@property
def content(self):
if self._values['content'] is None:
return None
result = self._squash_template_name_prefix()
result = self._replace_template_name(result)
return result
@property
def checksum(self):
return self._values['tmplChecksum']
def to_return(self):
result = {}
try:
for returnable in self.returnables:
result[returnable] = getattr(self, returnable)
result = self._filter_params(result)
except Exception:
pass
return result
def _squash_template_name_prefix(self):
"""Removes the template name prefix
The IappParser in the SDK treats the partition prefix as part of
the iApp's name. This method removes that partition from the name
in the iApp so that comparisons can be done properly and entries
can be created properly when using REST.
:return string
"""
pattern = r'sys\s+application\s+template\s+/Common/'
replace = 'sys application template '
return re.sub(pattern, replace, self._values['content'])
def _replace_template_name(self, template):
"""Replaces template name at runtime
To allow us to do the switch-a-roo with temporary templates and
checksum comparisons, we need to take the template provided to us
and change its name to a temporary value so that BIG-IP will create
a clone for us.
:return string
"""
pattern = r'sys\s+application\s+template\s+[^ ]+'
if self._values['name']:
name = self._values['name']
else:
name = self._get_template_name()
replace = 'sys application template {0}'.format(fq_name(self.partition, name))
return re.sub(pattern, replace, template)
def _get_template_name(self):
# There is a bug in the iApp parser in the F5 SDK that prevents us from
# using it in all cases to get the name of an iApp. So we'll use this
# pattern for now and file a bug with the F5 SDK
pattern = r'sys\s+application\s+template\s+(?P<path>\/[^\{}"\'*?|#]+\/)?(?P<name>[^\{}"\'*?|#]+)'
matches = re.search(pattern, self._values['content'])
try:
result = matches.group('name').strip()
except IndexError:
result = None
if result:
return result
raise NonextantTemplateNameException
class ModuleManager(object):
def __init__(self, *args, **kwargs):
self.module = kwargs.get('module', None)
self.client = kwargs.get('client', None)
self.have = None
self.want = Parameters(params=self.module.params)
self.changes = Parameters()
def exec_module(self):
result = dict()
changed = False
state = self.want.state
try:
if state == "present":
changed = self.present()
elif state == "absent":
changed = self.absent()
except iControlUnexpectedHTTPError as e:
raise F5ModuleError(str(e))
changes = self.changes.to_return()
result.update(**changes)
result.update(dict(changed=changed))
return result
def present(self):
if self.exists():
return self.update()
else:
return self.create()
def update(self):
self.have = self.read_current_from_device()
if not self.templates_differ():
return False
if not self.want.force and self.template_in_use():
return False
if self.module.check_mode:
return True
self._remove_iapp_checksum()
# The same process used for creating (load) can be used for updating
self.create_on_device()
self._generate_template_checksum_on_device()
return True
def template_in_use(self):
collection = self.client.api.tm.sys.application.services.get_collection()
fullname = '/{0}/{1}'.format(self.want.partition, self.want.name)
for resource in collection:
if resource.template == fullname:
return True
return False
def read_current_from_device(self):
self._generate_template_checksum_on_device()
resource = self.client.api.tm.sys.application.templates.template.load(
name=self.want.name,
partition=self.want.partition
)
result = resource.attrs
return Parameters(params=result)
def absent(self):
changed = False
if self.exists():
changed = self.remove()
return changed
def exists(self):
result = self.client.api.tm.sys.application.templates.template.exists(
name=self.want.name,
partition=self.want.partition
)
return result
def _remove_iapp_checksum(self):
"""Removes the iApp tmplChecksum
This is required for updating in place or else the load command will
fail with a "AppTemplate ... content does not match the checksum"
error.
:return:
"""
resource = self.client.api.tm.sys.application.templates.template.load(
name=self.want.name,
partition=self.want.partition
)
resource.modify(tmplChecksum=None)
def templates_differ(self):
# BIG-IP can generate checksums of iApps, but the iApp needs to be
# on the box to do this. Additionally, the checksum is MD5, but it
# is not an MD5 of the entire content of the template. Instead, it
# is a hash of some portion of the template that is unknown to me.
#
# The code below is responsible for uploading the provided template
# under a unique name and creating a checksum for it so that that
# checksum can be compared to the one of the existing template.
#
# Using this method we can compare the checksums of the existing
# iApp and the iApp that the user is providing to the module.
backup = self.want.name
# Override whatever name may have been provided so that we can
# temporarily create a new template to test checksums with
self.want.update({
'name': 'ansible-{0}'.format(str(uuid.uuid4()))
})
# Create and remove temporary template
temp = self._get_temporary_template()
# Set the template name back to what it was originally so that
# any future operations only happen on the real template.
self.want.update({
'name': backup
})
if temp.checksum != self.have.checksum:
return True
return False
def _get_temporary_template(self):
self.create_on_device()
temp = self.read_current_from_device()
self.remove_from_device()
return temp
def _generate_template_checksum_on_device(self):
generate = 'tmsh generate sys application template {0} checksum'.format(
self.want.name
)
self.client.api.tm.util.bash.exec_cmd(
'run',
utilCmdArgs='-c "{0}"'.format(generate)
)
def create(self):
if self.module.check_mode:
return True
self.create_on_device()
if self.exists():
return True
else:
raise F5ModuleError("Failed to create the iApp template")
def create_on_device(self):
remote_path = "/var/config/rest/downloads/{0}".format(self.want.name)
load_command = 'tmsh load sys application template {0}'.format(remote_path)
template = StringIO(self.want.content)
upload = self.client.api.shared.file_transfer.uploads
upload.upload_stringio(template, self.want.name)
output = self.client.api.tm.util.bash.exec_cmd(
'run',
utilCmdArgs='-c "{0}"'.format(load_command)
)
if hasattr(output, 'commandResult'):
result = output.commandResult
if 'Syntax Error' in result:
raise F5ModuleError(output.commandResult)
if 'ERROR' in result:
raise F5ModuleError(output.commandResult)
def remove(self):
if self.module.check_mode:
return True
self.remove_from_device()
if self.exists():
raise F5ModuleError("Failed to delete the iApp template")
return True
def remove_from_device(self):
resource = self.client.api.tm.sys.application.templates.template.load(
name=self.want.name,
partition=self.want.partition
)
resource.delete()
class ArgumentSpec(object):
def __init__(self):
self.supports_check_mode = True
argument_spec = dict(
name=dict(),
state=dict(
default='present',
choices=['present', 'absent']
),
force=dict(
type='bool'
),
content=dict(),
partition=dict(
default='Common',
fallback=(env_fallback, ['F5_PARTITION'])
)
)
self.argument_spec = {}
self.argument_spec.update(f5_argument_spec)
self.argument_spec.update(argument_spec)
def main():
spec = ArgumentSpec()
module = AnsibleModule(
argument_spec=spec.argument_spec,
supports_check_mode=spec.supports_check_mode
)
if not HAS_F5SDK:
module.fail_json(msg="The python f5-sdk module is required")
try:
client = F5Client(**module.params)
mm = ModuleManager(module=module, client=client)
results = mm.exec_module()
cleanup_tokens(client)
module.exit_json(**results)
except F5ModuleError as e:
cleanup_tokens(client)
module.fail_json(msg=str(e))
if __name__ == '__main__':
main()
| [
"theyashkins@gmail.com"
] | theyashkins@gmail.com |
bcb86e89e8d220c443eda53ff9c0bc4cdc174724 | 1f2342cb4bc357aa6af572a1d705d045e31dd173 | /WORKSHOPS/Workshop 9/factorial.py | 0605102f61fc4fea11ddf30452fca5c02f30e6e3 | [] | no_license | Ogaday/Programming-for-Science | 3702c8362d6a60f106ffa67b5b2a066519c91de3 | 806a9c7849f455777a6ec226fd1918f980191f78 | refs/heads/master | 2021-01-10T15:57:12.831839 | 2015-06-01T23:05:46 | 2015-06-01T23:05:46 | 36,625,188 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 563 | py | def factorial(n):
"""
return the recursive factorial of n
"""
if n == 1:
return 1
else:
return n*factorial(n-1)
if __name__ == "__main__":
print """Welcome to the FACTORIAL CALCULATOR\n \nTo use the calculator, enter the number for which you want the factorial of, then press enter. In order to quit, type 'q' or 'quit'"""
while True:
x = raw_input("==> ")
if x.lower() == "q" or x.lower() == "quit":
print "Thank you for using this program"
break
else:
try:
print factorial(int(x))
except:
print "Please enter input again" | [
"w.ogaday@gmail.com"
] | w.ogaday@gmail.com |
60d320dea98839269dab10b44bf3d83b288fe2b7 | 42b38dd5fe75148a5727760847fcea5597f9d52f | /user_auth/vendors/top/api/rest/__init__.py | 503bd7d8a0e83539629ab85ca8dad1ed8382e3e8 | [] | no_license | naitianliu/hwserver | 9d24c2ea405a6dcfafe7aa38e42a768e496608e6 | 06ddcb114cd4c1b4d8b647998b4b4637789d6b43 | refs/heads/master | 2022-12-14T10:59:43.509971 | 2016-12-18T15:38:29 | 2016-12-18T15:38:29 | 61,244,155 | 0 | 0 | null | 2022-12-07T23:39:07 | 2016-06-15T22:03:04 | Python | UTF-8 | Python | false | false | 1,968 | py | from user_auth.vendors.top.api.rest.TopIpoutGetRequest import TopIpoutGetRequest
from user_auth.vendors.top.api.rest.AlibabaAliqinFcFlowChargeProvinceRequest import AlibabaAliqinFcFlowChargeProvinceRequest
from user_auth.vendors.top.api.rest.HttpdnsGetRequest import HttpdnsGetRequest
from user_auth.vendors.top.api.rest.TopSecretGetRequest import TopSecretGetRequest
from user_auth.vendors.top.api.rest.AlibabaAliqinFcFlowQueryRequest import AlibabaAliqinFcFlowQueryRequest
from user_auth.vendors.top.api.rest.KfcKeywordSearchRequest import KfcKeywordSearchRequest
from user_auth.vendors.top.api.rest.TopatsTaskDeleteRequest import TopatsTaskDeleteRequest
from user_auth.vendors.top.api.rest.TimeGetRequest import TimeGetRequest
from user_auth.vendors.top.api.rest.AlibabaAliqinFcSmsNumSendRequest import AlibabaAliqinFcSmsNumSendRequest
from user_auth.vendors.top.api.rest.AlibabaAliqinFcTtsNumSinglecallRequest import AlibabaAliqinFcTtsNumSinglecallRequest
from user_auth.vendors.top.api.rest.AlibabaAliqinFcSmsNumQueryRequest import AlibabaAliqinFcSmsNumQueryRequest
from user_auth.vendors.top.api.rest.AlibabaAliqinFcFlowChargeRequest import AlibabaAliqinFcFlowChargeRequest
from user_auth.vendors.top.api.rest.TopatsResultGetRequest import TopatsResultGetRequest
from user_auth.vendors.top.api.rest.AreasGetRequest import AreasGetRequest
from user_auth.vendors.top.api.rest.TopAuthTokenCreateRequest import TopAuthTokenCreateRequest
from user_auth.vendors.top.api.rest.AlibabaAliqinFcFlowGradeRequest import AlibabaAliqinFcFlowGradeRequest
from user_auth.vendors.top.api.rest.AlibabaAliqinFcVoiceNumDoublecallRequest import AlibabaAliqinFcVoiceNumDoublecallRequest
from user_auth.vendors.top.api.rest.AlibabaAliqinFcVoiceNumSinglecallRequest import AlibabaAliqinFcVoiceNumSinglecallRequest
from user_auth.vendors.top.api.rest.TopAuthTokenRefreshRequest import TopAuthTokenRefreshRequest
from user_auth.vendors.top.api.rest.AppipGetRequest import AppipGetRequest
| [
"naitianliu@gmail.com"
] | naitianliu@gmail.com |
571b5e21a17bb0386eb30bd81b021035a58c3802 | 5b56d0ec345d19c3e9c17764cdfa4ef8180f25e0 | /2020-01-python/api.py | fd5f9add8cd66d0c4436d45b28fc09d9b3c73da0 | [] | no_license | suzuki-hoge/warikan | 6e6d5f814fe4a9130b61a416f495326c316e2a8c | d47c32338421d4c6c88022a7d64a478e79708835 | refs/heads/master | 2020-12-04T08:54:07.960635 | 2020-02-07T03:29:52 | 2020-02-07T10:09:56 | 231,702,653 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,406 | py | from bottle import route, get, post, put, request, response, hook, run
import json
import db, party
def handle(f):
def wrapper(*args, **kwargs):
try:
result = f(*args, **kwargs)
return {'status': 'ok', 'result': result} if result is not None else {'status': 'ok'}
except BaseException as e:
return {'status': 'ng', 'error': e.message}
return wrapper
@hook('after_request')
def allow_cors():
response.headers['Access-Control-Allow-Origin'] = '*'
response.headers['Access-Control-Allow-Headers'] = 'Content-Type'
response.headers['Access-Control-Allow-Methods'] = 'GET, POST, PUT, OPTIONS'
@route('<any:path>', method = 'OPTIONS')
def options(**kwargs):
return {}
@get('/party/<partyName>')
@handle
def find(partyName):
def party_dict(p):
return {'partyName': p.partyName, 'partyHoldAt': p.partyHoldAt, 'participants': map(participant_dict, p.participants), 'billingAmount': p.billingAmount, 'adjustingUnitAmount': p.adjustingUnitAmount}
def participant_dict(p):
return {'participantName': p.participantName, 'participantType': p.participantType, 'paymentSection': p.paymentSection}
return party_dict(db.read(partyName))
@post('/party/plan')
@handle
def plan():
p = request.json
new = party.Party.plan(p.get('partyName'), p.get('partyHoldAt'), p.get('secretaryName'), p.get('paymentSection'), p.get('billingAmount'), p.get('adjustingUnitAmount'))
db.write(new)
@put('/party/<partyName>/add')
@handle
def add(partyName):
p = request.json
found = db.read(partyName)
updated = found.add(party.Participant(p.get('participantName'), 'NotSec', p.get('paymentSection')))
db.write(updated)
@put('/party/<partyName>/remove')
@handle
def remove(partyName):
p = request.params
found = db.read(partyName)
updated = found.remove(p.participantName)
db.write(updated)
@put('/party/<partyName>/change')
@handle
def change(partyName):
p = request.json
found = db.read(partyName)
updated = found.change(p.get('adjustingUnitAmount'))
db.write(updated)
@get('/party/<partyName>/demand')
@handle
def demand(partyName):
found = db.read(partyName)
return map(lambda (participantName, paymentAmount): {'participantName': participantName, 'paymentAmount': str(paymentAmount)}, found.demand())
run(host = 'localhost', port = 9000)
| [
"user.ryo@gmail.com"
] | user.ryo@gmail.com |
01df404873ee9e3bba62ab69c2e05d7863ae98c4 | 2ce0c770b6ebf1122cfe2cc02b943101172920f4 | /wwt_data_formats/tests/test_wtml_tree.py | 56668db83d32b8c0c1913e626cf661c4e392067c | [
"MIT"
] | permissive | WorldWideTelescope/wwt_data_formats | 48269945ab835706f75fbf56801c5f19c38c1930 | 8f3a977b87d36c5a903e3bf63ff2ea89547447bb | refs/heads/master | 2022-10-31T02:02:51.003406 | 2022-10-25T19:49:38 | 2022-10-25T19:49:38 | 225,955,212 | 2 | 4 | MIT | 2023-08-18T00:18:54 | 2019-12-04T20:54:27 | Python | UTF-8 | Python | false | false | 833 | py | # -*- mode: python; coding: utf-8 -*-
# Copyright 2019-2020 the .NET Foundation
# Licensed under the MIT License.
from __future__ import absolute_import, division, print_function
import os.path
from .. import cli
from . import tempdir
def test_cli(tempdir):
"Simple smoke test to see if it runs at all."
prev_dir = os.getcwd()
try:
os.chdir(tempdir)
cli.entrypoint(
[
"tree",
"fetch",
"https://web.wwtassets.org/engine/assets/builtin-image-sets.wtml",
]
)
cli.entrypoint(["tree", "summarize"])
cli.entrypoint(["tree", "print-image-urls"])
cli.entrypoint(["tree", "print-dem-urls"])
finally:
# Windows can't remove the temp tree unless we chdir out of it.
os.chdir(prev_dir)
| [
"peter@newton.cx"
] | peter@newton.cx |
15c2472e0bb613f8974faf6aebc33081848cd35c | f493d8c49fa2c34cb7bb6bc055ae478a6bfb068c | /bike_sharing.py | bd0f8aad95de5553ec75ef08b46807d338f1cdef | [] | no_license | yangshiyu89/bike_sharing | e69e64bd8c2f7982bfb0d84eadb777a553d96699 | bfdd57fb2eb525297901c49879e926e2bb338760 | refs/heads/master | 2021-01-21T11:30:09.827920 | 2017-03-01T16:14:44 | 2017-03-01T16:14:44 | 83,567,528 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,126 | py | # -*- coding: utf-8 -*-
"""
Created on Wed Mar 1 17:37:44 2017
@author: yangshiyu89
"""
import numpy as np
import pandas as pd
import tensorflow as tf
import matplotlib.pyplot as plt
# Define dl net
def Neural_Net(train_features, train_targets, val_features, val_targets, test_features):
features = tf.placeholder(tf.float32, shape=[None, train_features.shape[1]])
targets = tf.placeholder(tf.float32, shape=[None, train_targets.shape[1]])
W_1 = tf.Variable(tf.truncated_normal(shape=[train_features.shape[1], 25], dtype=tf.float32, stddev=0.001))
b_1 = tf.Variable(tf.zeros(shape=[25], dtype=tf.float32))
W_2 = tf.Variable(tf.truncated_normal(shape=[25, train_targets.shape[1]], dtype=tf.float32, stddev=0.001))
b_2 = tf.Variable(tf.zeros(shape=[train_targets.shape[1]], dtype=tf.float32))
layer = tf.add(tf.matmul(features, W_1), b_1)
layer = tf.nn.relu(layer)
predict = tf.add(tf.matmul(layer, W_2), b_2)
loss = tf.reduce_mean(tf.pow(targets - predict, 2))
optimizer = tf.train.AdamOptimizer(learning_rate = 0.1).minimize(loss)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for epoch in range(1001):
sess.run(optimizer, feed_dict={features:train_features, targets:train_targets})
if epoch%10 == 0:
cost_test = sess.run(loss, feed_dict={features:train_features, targets:train_targets})
cost_val = sess.run(loss, feed_dict={features:val_features, targets:val_targets})
print("epoch {:4d}; cost_test: {:.4f}; cost_val: {:.4f}".format(epoch, cost_test, cost_val))
predict_targets = sess.run(predict, feed_dict={features:test_features})
return predict_targets
if __name__ == "__main__":
# Load and prepare the data
data_path = "Bike-Sharing-Dataset/hour.csv"
rides = pd.read_csv(data_path)
# Dummy variables
dummy_fields = ['season', 'weathersit', 'mnth', 'hr', 'weekday']
for each in dummy_fields:
dummies = pd.get_dummies(rides[each], prefix=each, drop_first=False)
rides = pd.concat([rides, dummies], axis=1)
fields_to_drop = ['instant', 'dteday', 'season', 'weathersit',
'weekday', 'atemp', 'mnth', 'workingday', 'hr']
data = rides.drop(fields_to_drop, axis=1)
# Scaling target variables
quant_features = ['casual', 'registered', 'cnt', 'temp', 'hum', 'windspeed']
# Store scalings in a dictionary so we can convert back later
scaled_features = {}
for each in quant_features:
mean, std = data[each].mean(), data[each].std()
scaled_features[each] = [mean, std]
data.loc[:, each] = (data[each] - mean)/std
# Splitting the data into training, testing, and validation sets
# Save the last 21 days
test_data = data[-21*24:]
data = data[:-21*24]
# Separate the data into features and targets
target_fields = ['cnt', 'casual', 'registered']
features, targets = data.drop(target_fields, axis=1), data[target_fields]
test_features, test_targets = test_data.drop(target_fields, axis=1), test_data[target_fields]
# Hold out the last 60 days of the remaining data as a validation set
train_features, train_targets = features[:-60*24], targets[:-60*24]
val_features, val_targets = features[-60*24:], targets[-60*24:]
predict_targets = Neural_Net(train_features, train_targets['cnt'][:, np.newaxis], val_features, val_targets['cnt'][:, np.newaxis], test_features)
# Check the prediction
fig, ax = plt.subplots(figsize=(8,4))
mean, std = scaled_features['cnt']
predictions = predict_targets*std + mean
ax.plot(predictions[:], label='Prediction')
ax.plot((test_targets['cnt']*std + mean).values, label='Data')
ax.set_xlim(right=len(predictions))
ax.legend()
dates = pd.to_datetime(rides.ix[test_data.index]['dteday'])
dates = dates.apply(lambda d: d.strftime('%b %d'))
ax.set_xticks(np.arange(len(dates))[12::24])
_ = ax.set_xticklabels(dates[12::24], rotation=45)
| [
"noreply@github.com"
] | yangshiyu89.noreply@github.com |
505e01d16c4946a2cc61a71edd7d0ee2504ca6d6 | a6e4a6f0a73d24a6ba957277899adbd9b84bd594 | /sdk/python/pulumi_azure_native/network/v20171001/get_virtual_network_gateway_bgp_peer_status.py | ce971110c0cb3c1a127751e2520bf66c4337635f | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | MisinformedDNA/pulumi-azure-native | 9cbd75306e9c8f92abc25be3f73c113cb93865e9 | de974fd984f7e98649951dbe80b4fc0603d03356 | refs/heads/master | 2023-03-24T22:02:03.842935 | 2021-03-08T21:16:19 | 2021-03-08T21:16:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,690 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
__all__ = [
'GetVirtualNetworkGatewayBgpPeerStatusResult',
'AwaitableGetVirtualNetworkGatewayBgpPeerStatusResult',
'get_virtual_network_gateway_bgp_peer_status',
]
@pulumi.output_type
class GetVirtualNetworkGatewayBgpPeerStatusResult:
"""
Response for list BGP peer status API service call
"""
def __init__(__self__, value=None):
if value and not isinstance(value, list):
raise TypeError("Expected argument 'value' to be a list")
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def value(self) -> Optional[Sequence['outputs.BgpPeerStatusResponseResult']]:
"""
List of BGP peers
"""
return pulumi.get(self, "value")
class AwaitableGetVirtualNetworkGatewayBgpPeerStatusResult(GetVirtualNetworkGatewayBgpPeerStatusResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetVirtualNetworkGatewayBgpPeerStatusResult(
value=self.value)
def get_virtual_network_gateway_bgp_peer_status(peer: Optional[str] = None,
resource_group_name: Optional[str] = None,
virtual_network_gateway_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetVirtualNetworkGatewayBgpPeerStatusResult:
"""
Response for list BGP peer status API service call
:param str peer: The IP address of the peer to retrieve the status of.
:param str resource_group_name: The name of the resource group.
:param str virtual_network_gateway_name: The name of the virtual network gateway.
"""
__args__ = dict()
__args__['peer'] = peer
__args__['resourceGroupName'] = resource_group_name
__args__['virtualNetworkGatewayName'] = virtual_network_gateway_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:network/v20171001:getVirtualNetworkGatewayBgpPeerStatus', __args__, opts=opts, typ=GetVirtualNetworkGatewayBgpPeerStatusResult).value
return AwaitableGetVirtualNetworkGatewayBgpPeerStatusResult(
value=__ret__.value)
| [
"noreply@github.com"
] | MisinformedDNA.noreply@github.com |
ae137b1b1b702ea94707b85faf4024ec372f1832 | 83d36e8795b19d537fab32c4ced52359561a6b3b | /ingredients/apps.py | b0fea569eed9b6ec9258bfa94c37a231c4b4fcd0 | [] | no_license | vubon/django-graphql | b1325ebc31136d19b5ca5b5fd85c6fea98972e6c | 9586b5b5098dfeb25aa26521b24bc6c3beb333bc | refs/heads/master | 2020-04-14T08:02:04.957523 | 2019-12-05T05:02:46 | 2019-12-05T05:02:46 | 163,727,935 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 96 | py | from django.apps import AppConfig
class IngrdientsConfig(AppConfig):
name = 'ingredients'
| [
"vubon.roy@gmail.com"
] | vubon.roy@gmail.com |
c7a3468c7cae4eb4836690dd475d98f13f9a6ac2 | f854ef28002a3931a8d8b8d0b9cc691b8a449db3 | /home-assistant/custom_components/hacs/helpers/classes/manifest.py | c0e43b9ba3f570e1740dbe3c9e52024391ae5891 | [
"MIT"
] | permissive | Burningstone91/smart-home-setup | 030cdaa13d05fb19a82b28ea455614d3276522ab | c2f34cc8b8243bc6ce620b3f03e3e44ff28150ca | refs/heads/master | 2023-02-23T06:25:04.476657 | 2022-02-26T16:05:02 | 2022-02-26T16:05:02 | 239,319,680 | 421 | 36 | MIT | 2023-02-08T01:16:54 | 2020-02-09T14:39:06 | JavaScript | UTF-8 | Python | false | false | 1,156 | py | """
Manifest handling of a repository.
https://hacs.xyz/docs/publish/start#hacsjson
"""
from typing import List
import attr
from custom_components.hacs.exceptions import HacsException
@attr.s(auto_attribs=True)
class HacsManifest:
"""HacsManifest class."""
name: str = None
content_in_root: bool = False
zip_release: bool = False
filename: str = None
manifest: dict = {}
hacs: str = None
hide_default_branch: bool = False
domains: List[str] = []
country: List[str] = []
homeassistant: str = None
persistent_directory: str = None
iot_class: str = None
render_readme: bool = False
@staticmethod
def from_dict(manifest: dict):
"""Set attributes from dicts."""
if manifest is None:
raise HacsException("Missing manifest data")
manifest_data = HacsManifest()
manifest_data.manifest = manifest
if country := manifest.get("country"):
if isinstance(country, str):
manifest["country"] = [country]
for key in manifest:
setattr(manifest_data, key, manifest[key])
return manifest_data
| [
"dimitri.steiner.gl@gmail.com"
] | dimitri.steiner.gl@gmail.com |
7343fb8defbea9a314d6f3be0e874c35f13e8940 | 98c6ea9c884152e8340605a706efefbea6170be5 | /examples/data/Assignment_1/dlmmin002/question3.py | 7a33ac4f48f3eddf6202f2094e5bd3b2da9e4fde | [] | no_license | MrHamdulay/csc3-capstone | 479d659e1dcd28040e83ebd9e3374d0ccc0c6817 | 6f0fa0fa1555ceb1b0fb33f25e9694e68b6a53d2 | refs/heads/master | 2021-03-12T21:55:57.781339 | 2014-09-22T02:22:22 | 2014-09-22T02:22:22 | 22,372,174 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 890 | py | #personal spam message
#nolwazi dlamini
#3 march 2014
name =input("Enter first name: \n")
surname=input("Enter last name: \n")
money=eval(input("Enter sum of money in USD: \n"))
country=input("Enter country name: \n")
print("\nDearest" ,name)
print("It is with a heavy heart that I inform you of the death of my father,")
print("General Fayk ",surname,", your long lost relative from Mapsfostol.",sep="")
print("My father left the sum of ", money,"USD for us, your distant cousins. ",sep="")
print("Unfortunately, we cannot access the money as it is in a bank in ",country,".",sep="")
print("I desperately need your assistance to access this money.")
print("I will even pay you generously, 30% of the amount - ",(money*0.3),"USD,",sep="")
print("for your help. Please get in touch with me at this email address asap.")
print("Yours sincerely")
print("Frank" ,surname) | [
"jarr2000@gmail.com"
] | jarr2000@gmail.com |
d24f43978d7bc3dc1bd471970dceef5f2bbfb976 | 8d402df39c18eba7e1c86c762f205c944357c5df | /setup/brython/make_file_system.py | b94bae446d191d813018f82085a1dbda517fd43c | [
"BSD-3-Clause"
] | permissive | brython-dev/brython | 87cc023e25550dec9ce459ba68774189f33712b6 | b33958bff0e8c7a280babc30232dc389a2500a7a | refs/heads/master | 2023-09-04T04:49:29.156209 | 2023-09-01T06:36:08 | 2023-09-01T06:36:08 | 24,046,239 | 6,569 | 625 | BSD-3-Clause | 2023-07-05T06:13:32 | 2014-09-15T06:58:21 | Python | UTF-8 | Python | false | false | 2,113 | py | import json
import os
import stat
import sys
import binascii
def make(vfs_name, prefix=None):
"""Called by
python -m brython --make_file_system <vfs_name> <prefix>
Creates a Virtual File System : a Javascript file with the files in
current directory and its children.
The file is stored in current directory as "<vfs_name>.vfs.js".
A dictionary "files" is created. Keys are the file names, relative to the
current directory (ie a file "data.txt" in current directory has the key
"data.txt", and a file "names.txt" in the subdirectory "address" has key
"address/names.txt").
If <prefix> was specified, it is prepended to the keys, followed by a /.
For instance, if prefix is "info", the files above will have keys
"info/data.txt" and "info/adress/names.txt"
Python files can be included in such files, but *programs will not be
able to import them*; for this, use --modules instead.
"""
files = {}
this_dir = os.getcwd()
dest_file = f"{vfs_name}.vfs.js"
virtual_dir = prefix.split("/") if prefix else []
print("virtual dir", virtual_dir)
for dirpath, dirnames, filenames in os.walk(this_dir):
if dirpath == this_dir:
path = []
else:
path = dirpath[len(this_dir) + len(os.sep):].split(os.sep)
for filename in filenames:
if filename.endswith(".vfs.js"):
continue
rel_path = "/".join(virtual_dir + path + [filename])
with open(os.path.join(dirpath, filename), "rb") as f:
# File content is base64-encoded
content = binascii.b2a_base64(f.read()).decode('ascii')
file_stat = os.fstat(f.fileno())
files[rel_path] = {
"content": content,
"ctime": file_stat.st_ctime,
"mtime": file_stat.st_mtime
}
print(list(files))
with open(dest_file, "w", encoding="utf-8") as out:
out.write("__BRYTHON__.add_files(")
json.dump(files, out, indent=4)
out.write(")") | [
"quentel.pierre@orange.fr"
] | quentel.pierre@orange.fr |
eee5f7823e7fce0bab38226e4c40fa15cbb05802 | 7c8fe9cf38de89dba5ed7afa9558739f037f9cc7 | /01-webtron/webtron/webtron.py | efce6211b9a1a257a1922e78b58141c9b4c10396 | [] | no_license | inigokintana/automating-AWS-python | 94b373dba4902414b37da00d4afe97f97007c1d6 | 4ee5ce245d03f4c339feee5c745b6b1fac982cf7 | refs/heads/master | 2022-02-17T02:34:17.097839 | 2019-08-01T18:16:55 | 2019-08-01T18:16:55 | 162,336,994 | 0 | 0 | null | 2022-01-21T19:45:27 | 2018-12-18T19:37:31 | Python | UTF-8 | Python | false | false | 1,511 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""Webotron: Deploy websites with aws.
Webotron automates the process of deploying static websites to AWS.
- Configure AWS S3 buckets
- Create them
- Set them up for static website hosting
- Deploy local files to them
- Configure DNS with AWS Route 53
- Configure a Content Delivery Network and SSL with AWS CloudFront
"""
import boto3
import click
from bucket import BucketManager
session = boto3.Session(profile_name='pythonAutomation')
bucket_manager = BucketManager(session)
@click.group()
def cli():
"""Webotron deploys websites to AWS."""
pass
@cli.command('list-buckets')
def list_buckets():
"""List all s3 buckets."""
for bucket in bucket_manager.all_buckets():
print(bucket)
@cli.command('list-bucket-objects')
@click.argument('bucket')
def list_bucket_objects(bucket):
"""List objects in an s3 bucket."""
for obj in bucket_manager.all_objects(bucket):
print(obj)
@cli.command('setup-bucket')
@click.argument('bucket')
def setup_bucket(bucket):
"""Create and configure S3 bucket."""
s3_bucket = bucket_manager.init_bucket(bucket)
bucket_manager.set_policy(s3_bucket)
bucket_manager.configure_website(s3_bucket)
return
@cli.command('sync')
@click.argument('pathname', type=click.Path(exists=True))
@click.argument('bucket')
def sync(pathname, bucket):
"""Sync contents of PATHNAME to BUCKET."""
bucket_manager.sync(pathname, bucket)
if __name__ == '__main__':
cli()
| [
"inigokintana@gmail.com"
] | inigokintana@gmail.com |
d0bcf451fb15a4a5e6e3ce2d28b88e1a6043437e | 3bbaf5a5dd1d19f207c01f14e06a58f6faf9a1f2 | /helpers.py | 8735343af6dcaf46167a733d832f002d37073425 | [
"MIT"
] | permissive | kelly4strength/hmchallenge | f06e1a5288a73ba523e39e5cfbf0d36c33148478 | 2195d8fa2d9acdad7a088b1422a7fb5aa3b06b28 | refs/heads/master | 2021-01-19T10:45:55.459291 | 2017-02-25T05:42:16 | 2017-02-25T05:42:16 | 82,221,490 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,029 | py | """helper functions"""
# from model import word, user_guess, partial_word
# import unnecessary
# def show_correct_guess_letter(word, user_guess, partial_word):
# """function to show current guess letter if it is in the word"""
# for i in range(len(word)):
# if user_guess != word[i]:
# partial_word = partial_word + "_ "
# else:
# partial_word = partial_word + word[i]
# return partial_word
def generate_partial_word(word, correct_guess_list):
"""generates the word with all correctly chosen letters"""
temp_partial_word = ""
# for each letter either put a dash or a letter
for i in range(len(word)):
matches = False
for letter in correct_guess_list:
if letter == word[i]:
temp_partial_word = temp_partial_word + letter
matches = True
if matches == False:
temp_partial_word = temp_partial_word + "_"
return temp_partial_word
#if there is no match to word[i] then add a underscore for that index
# only append underscore after all matches are determined
| [
"kellyhoffer@Kellys-MacBook-Pro-2.local"
] | kellyhoffer@Kellys-MacBook-Pro-2.local |
6dbdb7e147eb6c53ea0f4d1c2a060d11bbd6dfd2 | 3f6c3ac0800f5915ba1d0a26e7534ac5771145db | /src/software/simulated_tests/er_force_simulator.py | 0553ffb6a5f185f43d24937bb939534a77f40e1a | [
"LGPL-3.0-only"
] | permissive | LiCody/Software | 10b02612ab93b90b423cb0be94a5aa721f059095 | 8105c5f90d5d3b3d4ffa275a10b6fd4e81a4520b | refs/heads/master | 2023-07-09T09:04:30.463000 | 2022-04-13T04:32:30 | 2022-04-13T04:32:30 | 211,555,649 | 0 | 0 | MIT | 2019-10-09T03:30:30 | 2019-09-28T20:07:17 | C | UTF-8 | Python | false | false | 7,447 | py | from subprocess import Popen
from proto.import_all_protos import *
from software.networking.threaded_unix_listener import ThreadedUnixListener
from software.networking.threaded_unix_sender import ThreadedUnixSender
from software.py_constants import *
class ErForceSimulator(object):
def __init__(self, runtime_dir="/tmp/tbots"):
"""Runs our standalone er-force simulator binary and sets up the unix
sockets to communicate with it
:param runtime_dir: The unix path to run everything
"""
# inputs to er_force_simulator_main
self.sim_tick_sender = ThreadedUnixSender(runtime_dir + SIMULATION_TICK_PATH)
self.world_state_sender = ThreadedUnixSender(runtime_dir + WORLD_STATE_PATH)
self.blue_world_sender = ThreadedUnixSender(runtime_dir + BLUE_WORLD_PATH)
self.yellow_world_sender = ThreadedUnixSender(runtime_dir + YELLOW_WORLD_PATH)
self.blue_primitive_set_sender = ThreadedUnixSender(
runtime_dir + BLUE_PRIMITIVE_SET
)
self.yellow_primitive_set_sender = ThreadedUnixSender(
runtime_dir + YELLOW_PRIMITIVE_SET
)
# outputs from er_force_sim_main
self.ssl_wrapper_listener = ThreadedUnixListener(
runtime_dir + SSL_WRAPPER_PACKET_PATH, SSL_WrapperPacket
)
self.blue_robot_status_listener = ThreadedUnixListener(
runtime_dir + BLUE_ROBOT_STATUS_PATH, RobotStatus
)
self.yellow_robot_status_listener = ThreadedUnixListener(
runtime_dir + YELLOW_ROBOT_STATUS_PATH, RobotStatus,
)
self.world_state = WorldState()
self.simulator_process = Popen(["software/er_force_simulator_main"])
def __setup_robots(self, robot_locations, team_colour):
"""Initializes the world from a list of robot locations
:param robot_locations: A list of robot locations (index is robot id)
:param team_colour: The color (either "blue" or "yellow")
"""
if "blue" in team_colour:
robot_map = self.world_state.blue_robots
else:
robot_map = self.world_state.yellow_robots
for robot_id, robot_location in enumerate(robot_locations):
robot_map[robot_id].CopyFrom(
RobotState(
global_position=Point(
x_meters=robot_location.x(), y_meters=robot_location.y()
),
global_orientation=Angle(radians=0),
global_velocity=Vector(x_component_meters=0, y_component_meters=0),
global_angular_velocity=AngularVelocity(radians_per_second=0),
)
)
self.setup_world(self.world_state)
def setup_blue_robots(self, robot_locations):
"""Initializes the world from a list of robot locations
:param robot_locations: A list of robot locations (index is robot id)
"""
self.__setup_robots(robot_locations, "blue")
def setup_yellow_robots(self, robot_locations):
"""Initializes the world from a list of robot locations
:param robot_locations: A list of robot locations (index is robot id)
"""
self.__setup_robots(robot_locations, "yellow")
def setup_ball(self, ball_position, ball_velocity, distance_from_ground=0):
"""Setup the ball with the x, y coordinates in meters
:param ball_position: A tuple with the x,y coordinates
:param ball_velocity: A tuple with the x,y velocity components
:param distance_from_ground: How high up to start the ball
"""
self.world_state.ball_state.CopyFrom(
BallState(
global_position=Point(
x_meters=ball_position.x(), y_meters=ball_position.y(),
),
global_velocity=Vector(
x_component_meters=ball_velocity.x(),
y_component_meters=ball_velocity.y(),
),
distance_from_ground=distance_from_ground,
)
)
self.setup_world(self.world_state)
def setup_world(self, world_state):
"""Pass in a world_state proto directly to setup the simulator
:param world_state: The world state to initialize with
"""
self.world_state_sender.send(world_state)
def __get_sensor_proto(self, ssl_wrapper, robot_status_listener):
"""Helper function to create a sensor proto
:param ssl_wrapper: The ssl_wrapper packet to put in the sensor proto
:param robot_status_listener: The robot status listener (blue or yellow)
:returns: A sensor proto with the robot status from the listener
"""
sensor_proto = SensorProto()
if ssl_wrapper:
sensor_proto.ssl_vision_msg.CopyFrom(ssl_wrapper)
robot_status = robot_status_listener.get_most_recent_message()
packets = []
while robot_status is not None:
packets.append(robot_status)
robot_status = robot_status_listener.get_most_recent_message()
sensor_proto.robot_status_msgs.extend(packets)
return sensor_proto
def get_blue_sensor_proto(self, ssl_wrapper):
"""Returns the blue sensor proto
:param ssl_wrapper: The wrapper to pack in the sensor proto
"""
return self.__get_sensor_proto(ssl_wrapper, self.blue_robot_status_listener)
def get_yellow_sensor_proto(self, ssl_wrapper):
"""Returns the yellow sensor proto
:param ssl_wrapper: The wrapper to pack in the sensor proto
"""
return self.__get_sensor_proto(ssl_wrapper, self.yellow_robot_status_listener)
def get_ssl_wrapper_packet(self, block=False):
"""Get wrapper packet
:param block: If true, block until we receive a packet
:return: SSL_WrapperPacket
"""
return self.ssl_wrapper_listener.get_most_recent_message(block)
def tick(self, duration_ms):
"""Tick the simulator with the given duration
:param duration_ms: The duration to step the sim
"""
tick = SimulatorTick()
tick.milliseconds = duration_ms
self.sim_tick_sender.send(tick)
def send_blue_primitive_set_and_world(self, world, primitive_set):
"""Blue primitive set and world
:param world: The world msg to send
:param primitive_set: The primitive set to send
"""
self.blue_world_sender.send(world)
self.blue_primitive_set_sender.send(primitive_set)
def send_yellow_primitive_set_and_world(self, world, primitive_set):
"""Yellow primitive set and world
:param world: The world msg to send
:param primitive_set: The primitive set to send
"""
self.yellow_world_sender.send(world)
self.yellow_primitive_set_sender.send(primitive_set)
def stop():
"""Stop all listeners and senders.
"""
for unix_socket in [
self.sim_tick_sender,
self.world_state_sender,
self.blue_world_sender,
self.yellow_world_sender,
self.blue_primitive_set_sender,
self.yellow_primitive_set_sender,
self.ssl_wrapper_listener,
self.blue_robot_status_listener,
self.yellow_robot_status_listener,
]:
unix_socket.force_stop()
| [
"noreply@github.com"
] | LiCody.noreply@github.com |
c8ef83211988cefbe18916ab9fd7f4531c57ab0d | f9a4e1c39d722daab7de1f7a5ce6c2634fa53845 | /xorGame.py | efd55f4fec19c6b1a3bb13926c7f6b0e05d94837 | [] | no_license | EugenenZhou/leetcode | b26e4198729dd9c42dccb4cdbaa952d9c50086e1 | 03a0316ac317ae48adf2d05be62d536e1b5f2620 | refs/heads/master | 2020-06-17T16:29:54.746645 | 2019-09-21T05:36:23 | 2019-09-21T05:36:23 | 195,977,242 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,144 | py | # ไธไธช้ปๆฟไธๅ็ไธไธช้่ดๆดๆฐๆฐ็ป nums[i] ใ
# ๅฐ็บขๅๅฐๆ่ฝฎๆตไป้ปๆฟไธๆฆๆไธไธชๆฐๅญ๏ผๅฐ็บขๅ
ๆใ
# ๅฆๆๆฆ้คไธไธชๆฐๅญๅ๏ผๅฉไฝ็ๆๆๆฐๅญๆไฝๅผๆ่ฟ็ฎๅพๅบ็็ปๆ็ญไบ 0 ็่ฏ๏ผๅฝๅ็ฉๅฎถๆธธๆๅคฑ่ดฅใย
# (ๅฆๅค๏ผๅฆๆๅชๅฉไธไธชๆฐๅญ๏ผๆไฝๅผๆ่ฟ็ฎๅพๅฐๅฎๆฌ่บซ๏ผๅฆๆๆ ๆฐๅญๅฉไฝ๏ผๆไฝๅผๆ่ฟ็ฎ็ปๆไธบ0ใ๏ผ
# ๆข็ง่ฏดๆณๅฐฑๆฏ๏ผ่ฝฎๅฐๆไธช็ฉๅฎถๆถ๏ผๅฆๆๅฝๅ้ปๆฟไธๆๆๆฐๅญๆไฝๅผๆ่ฟ็ฎ็ปๆ็ญไบ 0๏ผ่ฟไธช็ฉๅฎถ่ท่ใ
# ๅ่ฎพไธคไธช็ฉๅฎถๆฏๆญฅ้ฝไฝฟ็จๆไผ่งฃ๏ผๅฝไธไป
ๅฝๅฐ็บข่ท่ๆถ่ฟๅ trueใ
######################################################################
def xorGame(nums):
re = 0
for i in nums:
re = re ^ i
if re == 0:
return True
else:
if len(nums) % 2 == 0:
return True
else:
return False
pass
######################################################################
# ๅฐ็บขๅฟ
่็ๅฏ่ฝๆงไธบ๏ผๅ
ๆๅผๆไธบ0๏ผๆnumsไธญ็ๅ
็ด ไธบๅถๆฐ๏ผๅ
ถไปๆๆๅฏ่ฝๅฐๆ่ๅฉใ
nums = [1,1,2,3]
result = xorGame(nums)
| [
"735159373@qq.com"
] | 735159373@qq.com |
59d3d0e0e07d6c1d41095d2ceff9107905522820 | 32fafd0c16351743360f00b3ca2ecac7488acef2 | /example3/populate_orders.py | fb42ae27516644797345c0949f942f4ff8a509c8 | [] | no_license | unix-way-project/postgresql | 128de649100d0c068cf2cef4a1f0abf48abeed88 | 631ddf194f0f799e97abb37f0b3c9b4f5f4b99a9 | refs/heads/master | 2023-03-17T03:59:40.660690 | 2021-03-14T20:19:39 | 2021-03-14T20:19:39 | 338,542,910 | 1 | 4 | null | null | null | null | UTF-8 | Python | false | false | 2,093 | py |
import psycopg2
import psycopg2.extras
import faker
import random
def add_order(connection):
try:
cursor = connection.cursor(cursor_factory = psycopg2.extras.DictCursor)
# Choose random user
cursor.execute('''
SELECT user_pid
FROM users
ORDER BY random() DESC
LIMIT 1;
''')
user = dict(cursor.fetchone())
user_pid = user['user_pid']
# Choose random item
cursor.execute('''
SELECT item_pid, item_price
FROM items
ORDER BY random() DESC
LIMIT 1;
''')
item = dict(cursor.fetchone())
item_pid = item['item_pid']
item_price = item['item_price']
cursor.execute('''
INSERT INTO orders(user_pid, item_pid, order_price)
VALUES(%s, %s, %s)
RETURNING order_pid
''', (user_pid, item_pid, item_price))
order = dict(cursor.fetchone())
order_pid = order['order_pid']
cursor.execute('''
UPDATE users
SET user_balance = user_balance - %s
WHERE user_pid = %s
''', (item_price, user_pid))
except (Exception, psycopg2.DatabaseError) as error:
connection.rollback()
cursor.close()
print("[ ERROR ] User: %s failed to order item %s for price: %s" % (
user_pid,
item_pid,
item_price
))
print(str(error))
return
connection.commit()
cursor.close()
print("[ ORDER ] User: %s ordered item %s for price: %s, order number: %s" % (
user_pid,
item_pid,
item_price,
order_pid
))
def connect():
print('Connecting to the PostgreSQL database...')
connection = psycopg2.connect(
host = "192.168.122.51",
database = "unixway1",
user = "unixway1user",
password = "password1"
)
for index in range(0, 100000):
add_order(
connection = connection
)
if __name__ == '__main__':
connect()
| [
"jackalsh@gmail.com"
] | jackalsh@gmail.com |
f71d7ca732dbb65b50967732e07d0777f557e075 | b0a5efbd01e9614392be8eaea595f7f8efda9bfb | /lung.py | d499312a7bcfc052b19f3dd28bfb66694d3bce76 | [] | no_license | Ismail-w/cov19_imgprocess | 4ffc117a26063d3114077e703381653a5db13116 | e707f4574f9c41ab4e58cfa8bcecc5e8d1274f65 | refs/heads/main | 2023-03-31T09:17:09.974067 | 2021-04-08T10:23:12 | 2021-04-08T10:23:12 | 355,859,702 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,219 | py |
import cv2
import numpy as np
img = cv2.imread('Covid1.png')
print("Image Properties")
print("- Number of Pixels: " + str(img.size))
print("- Shape/Dimensions: " + str(img.shape))
cv2.imshow('org',img)
cv2.waitKey(0)
blue, green, red = cv2.split(img) # Split the image into its channels
resized_image = cv2.resize(img, (200, 200))
print("Image Properties")
print("- Number of Pixels: " + str(resized_image.size))
print("- Shape/Dimensions: " + str(resized_image.shape))
cv2.imshow('res',resized_image) # Display the grayscale version of image
cv2.waitKey(0)
img_gray = cv2.cvtColor(resized_image, cv2.COLOR_BGR2GRAY)
cv2.imshow('grayscale',img_gray) # Display the grayscale version of image
cv2.waitKey(0)
r, threshold = cv2.threshold(img_gray, 125, 255, cv2.THRESH_BINARY)
cv2.imshow('threshold',threshold)
cv2.waitKey(0)
edged = cv2.Canny(img_gray, 100,200)
cv2.imshow('Edge',edged)
cv2.waitKey(0)
contours, hierarchy = cv2.findContours(edged,cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
print("Number of Contours found = " + str(len(contours)))
cv2.drawContours(resized_image, contours, -1, (0, 255, 0), 3)
cv2.imshow('Contours', resized_image)
cv2.waitKey(0)
| [
"noreply@github.com"
] | Ismail-w.noreply@github.com |
626a8f3efe3c44a285bd894dcf720fe9a98984aa | 8ccc0846442ea595015ec772c62d14695d745859 | /Backtesting/strategy/base.py | 7daa9847b42b20ed695cc39f44941ccb586a9169 | [] | no_license | linkenghong/Backtesting | 67c804446b3687a75064c8dc20713f69c11c56d7 | 29509e9e7262410275a92e42407cd8df334ecdad | refs/heads/master | 2020-06-05T02:57:30.582940 | 2019-09-08T05:20:43 | 2019-09-08T05:20:43 | 192,289,777 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,122 | py | from abc import ABCMeta, abstractmethod
class AbstractStrategy(object):
"""
AbstractStrategy is an abstract base class providing an interface for
all subsequent (inherited) strategy handling objects.
The goal of a (derived) Strategy object is to generate Signal
objects for particular symbols based on the inputs of ticks
generated from a PriceHandler (derived) object.
This is designed to work both with historic and live data as
the Strategy object is agnostic to data location.
"""
__metaclass__ = ABCMeta
@abstractmethod
def calculate_signals(self, event):
"""
Provides the mechanisms to calculate the list of signals.
"""
raise NotImplementedError("Should implement calculate_signals()")
def set_portfolio(self, portfolio_handler):
self.portfolio_handler = portfolio_handler
def get_symbol_position(self, symbol):
key = ["symbol", "quantity", "unavailable_quantity",
"available_quantity", "price", "total_commission",
"avg_price", "market_value"]
position_dict = {k:0 for k in key}
position_dict["symbol"] = symbol
try:
position = self.portfolio_handler.portfolio.positions[symbol]
except:
pass
else:
position_dict["quantity"] = position.quantity
position_dict["unavailable_quantity"] = position.unavailable_quantity
position_dict["available_quantity"] = position.available_quantity
position_dict["price"] = position.price
position_dict["total_commission"] = position.total_commission
position_dict["avg_price"] = position.avg_price
position_dict["market_value"] = position.market_value
return position_dict
class Strategies(AbstractStrategy):
"""
Strategies is a collection of strategy
"""
def __init__(self, *strategies):
self._lst_strategies = strategies
def calculate_signals(self, event):
for strategy in self._lst_strategies:
strategy.calculate_signals(event)
| [
"345852974@qq.com"
] | 345852974@qq.com |
53d2e5d291801ab5cf03ead215d5c4ba7b43273e | 947fa6a4a6155ffce0038b11f4d743603418ad68 | /.c9/metadata/environment/fb_post_learning/fb_post_learning/settings/base_aws_s3.py | 50a8801a8acf4d0f51a64b61ae58285d2bc56de6 | [] | no_license | bharathi151/bharathi_diyyala | bd75e10639d7d22b332d5ce677e7799402dc4984 | 99f8657d010c790a0e4e4c9d6b57f81814784eb0 | refs/heads/master | 2022-11-21T12:43:48.401239 | 2020-07-23T09:05:52 | 2020-07-23T09:05:52 | 281,903,260 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 466 | py | {"filter":false,"title":"base_aws_s3.py","tooltip":"/fb_post_learning/fb_post_learning/settings/base_aws_s3.py","undoManager":{"mark":-1,"position":-1,"stack":[]},"ace":{"folds":[],"scrolltop":0,"scrollleft":0,"selection":{"start":{"row":0,"column":0},"end":{"row":0,"column":0},"isBackwards":false},"options":{"guessTabSize":true,"useWrapMode":false,"wrapToView":true},"firstLineState":0},"timestamp":1589610124498,"hash":"94324fee64bfb279ca1b0e507e1414c07b06fab6"} | [
"bharathi151273@gmail.com"
] | bharathi151273@gmail.com |
73b124d0407d683b320b426ec9edb5b9f2c86f27 | 351687b2f40e8fe063c546993fb0eaefb58604f6 | /cov/example_cov_estimator_l1_nolam.py | 0118785b04c13dac6581bc07c7c93a7aebc68d0f | [] | no_license | eduff/NI_code | 79cdb22b154070d4b0e8873df810ac535d18c600 | 7b5a984e6931b8fb586f50ab390285bf6b36e785 | refs/heads/master | 2021-01-01T19:10:39.753985 | 2015-02-09T22:33:03 | 2015-02-09T22:33:03 | 9,242,635 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,517 | py | # Author: Gael Varoquaux
# Copyright: INRIA
import pylab as pl
import numpy as np
from scipy import linalg
from covariance.generate_data import generate_standard_sparse_mvn
from covariance.cov_estimator_l1 import CovEstimatorL1CV
################################################################################
N_SAMPLES = 30
DIM = 20
prng = np.random.RandomState(10)
x, true_prec = generate_standard_sparse_mvn(N_SAMPLES, DIM, prng=prng)
emp_cov = np.dot(x.T, x)/N_SAMPLES
true_cov = linalg.inv(true_prec)
model = CovEstimatorL1CV()
model.fit(x)
l1 = model.best_model.l1
if 1:
prec_ = model.precision
cov_ = linalg.inv(prec_)
#gap, pobj, dobj = model.dual_gap(emp_cov, with_obj=True)
#print "Dual gap : %s" % gap
#print "Criterion : %s" % pobj
#print "Dual criterion : %s" % dobj
###############################################################################
# Visualize
vmin = min(true_cov.min(), emp_cov.min(), cov_.min())
vmax = max(true_cov.max(), emp_cov.max(), cov_.max())
vmax = max(-vmin, vmax)
pl.figure()
pl.subplot(2, 3, 1)
pl.imshow(true_cov, interpolation='nearest', vmin=-vmax, vmax=vmax, cmap=pl.cm.RdBu_r)
pl.axis('off')
pl.title('True (simulated) covariance', fontsize=10)
pl.subplot(2, 3, 2)
pl.imshow(emp_cov, interpolation='nearest', vmin=-vmax, vmax=vmax, cmap=pl.cm.RdBu_r)
pl.axis('off')
pl.title('sample covariance', fontsize=10)
pl.subplot(2, 3, 3)
pl.imshow(cov_, interpolation='nearest', vmin=-vmax, vmax=vmax, cmap=pl.cm.RdBu_r)
pl.axis('off')
pl.title('L1 covariance estimate \n for lambda=%s' % l1, fontsize=10)
vmin = min(true_prec.min(), prec_.min())
vmax = max(true_prec.max(), prec_.max())
vmax = max(-vmin, vmax)
pl.subplot(2, 3, 4)
pl.imshow(true_prec, interpolation='nearest', vmin=-vmax, vmax=vmax, cmap=pl.cm.RdBu_r)
pl.imshow(np.ma.masked_array(np.ones_like(true_prec), true_prec!=0), cmap=pl.cm.gray, interpolation='nearest', vmin=0, vmax=2)
pl.axis('off')
pl.title('True (simulated) precision', fontsize=10)
pl.subplot(2, 3, 5)
pl.imshow(linalg.inv(emp_cov), interpolation='nearest', vmin=-vmax, vmax=vmax, cmap=pl.cm.RdBu_r)
pl.axis('off')
pl.title('Empirical precision', fontsize=10)
pl.subplot(2, 3, 6)
pl.imshow(prec_, interpolation='nearest', vmin=-vmax, vmax=vmax, cmap=pl.cm.RdBu_r)
pl.imshow(np.ma.masked_array(np.ones_like(true_prec), np.abs(prec_)>1e-2), cmap=pl.cm.gray, interpolation='nearest', vmin=0, vmax=2)
pl.axis('off')
pl.title('L1 precision estimate \n for lambda=%s' % l1, fontsize=10)
pl.show()
| [
"eduff@fmrib.ox.ac.uk"
] | eduff@fmrib.ox.ac.uk |
c72299e7afa25673891cb364c768c19408325154 | f79267b09f4fee621c7aaaa02ab2eef4f59e0dcf | /ag/sorting/graph.py | b2a6a847d623fb418ea65eca15d0c3a908ef7eb9 | [
"MIT"
] | permissive | justyre/jus | 07503972ff4933117f39fe91818c9b63dcfcbb17 | 1339c010ac4499c253061d2cce5e638ec06062bd | refs/heads/master | 2023-07-16T11:54:43.875953 | 2021-08-03T07:49:36 | 2021-09-06T02:26:13 | 389,536,962 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,359 | py | # Licensed under MIT License.
# See LICENSE in the project root for license information.
"""Several graph algorithms."""
# With DFS, we can classify the edges in a directed or undirected graph:
#
# 1. A tree edge is (u,v) if v was first discovered by exploring edge (u,v). All the edges in the depth-first forest G(parent) (aka the predecessor subgraph) are tree edges (but the reverse does not always hold - not all tree edges are in G(pa)).
# 2. A back edge is (u,v) connecting u to an ancestor v in a depth-first tree. For
# directed graphs, we consider self-loops to be back edges.
# 3. A forward edge is a non-tree edge (u,v) connecting u to a descendant v.
# 4. All other edges that is not any of the above kind are called cross edges.
# 5. For undirected graphs, we classify the edge as the first type in the list
# above; and we classify the edge according to whichever of (u,v) or (v,u) the DFS
# encounters first.
#
# When we first explore an edge (u,v), if:
# a) v.color = WHITE: then this is a tree edge.
# b) v.color = GRAY: then this is a back edge, since the gray vertices always form
# a linear chain of descendants corresponding to the stack of active _dfs_visit()
# invocations. Exploration always proceeds from the deepest (latest) gray vertex,
# so an edge that reaches another gray vertex must have reached an ancestor.
# c) v.color = BLACK (only possible for a directed graph): then this is a forward
# or cross edge. When u.grayed_time < v.grayed_time, it is a forward edge; if >,
# it is a cross edge.
#
# According to CLRS Theorem 22.10, for an undirected graph, every edge is either a
# tree edge or a back edge (ie there are no forward or cross edges).
# Hence, for an undirected graph, it has a cycle if and only if DFS finds a back
# edge.
from typing import Sequence, Tuple
import enum
class Color(enum.Enum):
"""Color definition for Graph."""
WHITE = enum.auto()
GRAY = enum.auto()
BLACK = enum.auto()
class Graph:
"""Graph represented using adjacency lists. The default is undirected graph."""
def __init__(
self, num_vertices: int, edges: Sequence[Tuple], is_directed: bool = False
) -> None:
# Adjacency list (ie list of all neighbors) for all vertices
self.adjlist = [[] for _ in range(num_vertices)]
for v1, v2 in edges:
# `edges` is a list of tuples of vertex values like (v1, v2).
# We want to store the edge info as neighbors for each vertex, so that
# we will have an adjacency list
self.adjlist[v1].append(v2)
if not is_directed:
self.adjlist[v2].append(v1)
self.color: Color = [Color.WHITE] * num_vertices
# For source vertex and all undiscovered vertices, their parents are None
self.parent = [None] * num_vertices
# Distance (ie total num of edges) from source to the vertex
self.distance = [None] * num_vertices
# The next attrs are for DFS to store the time when a vertex turns gray/black
self.timestamp = 0
self.grayed_time = [None] * num_vertices
self.blackened_time = [None] * num_vertices
# Mark the cycle index number of a vertex; `None` if it belongs to no cycle
self.cycle_mark = [None] * max(num_vertices, len(edges))
# Total number of cycles in the graph
self.num_cycles = 0
def __repr__(self) -> str:
"""Representation showing neighbors of each vertex."""
return "\n".join([f"{i}: {neighbors}" for (i, neighbors) in enumerate(self.adjlist)])
def __str__(self) -> str:
"""Representation."""
return self.__repr__()
def adjacency_matrix(self) -> list:
"""Get the adjacency matrix."""
adjmat = [[0] * len(self.adjlist) for _ in range(len(self.adjlist))]
for i, neighbors in enumerate(self.adjlist):
for j in neighbors:
adjmat[i][j] = 1
return adjmat
def breadth_first_search(self, source: int) -> list:
"""Breadth-first search (BFS) of a graph from vertex `source`, cf CLRS 22.2."""
# Time complexity: O(num_vertices + num_edges), aka O(V+E)
# Note: This initialization is a must, since other methods may change defaults
self.color = [Color.WHITE] * len(self.adjlist)
# For source vertex and all undiscovered vertices, their parents are None
self.parent = [None] * len(self.adjlist)
# Distance (ie total num of edges) from source to the vertex
self.distance = [None] * len(self.adjlist)
# Source is discovered, but not all its neighbors are discovered, so gray
self.color[source] = Color.GRAY
self.distance[source] = 0
queue = [] # Use-and-discard FIFO queue
traversal = [] # Record the BFS traversal route
queue.append(source)
traversal.append(source)
while queue:
# We use queue as FIFO here
u = queue.pop(0)
for v in self.adjlist[u]:
if self.color[v] == Color.WHITE:
# White means undiscovered, so discover it
self.color[v] = Color.GRAY
self.distance[v] = self.distance[u] + 1
self.parent[v] = u
queue.append(v)
traversal.append(v)
# When u's adjlist is exhausted, turn u to black
self.color[u] = Color.BLACK
return traversal
def shortest_path(self, source: int, vertex: int) -> list:
"""Return the shortest path from vertex `source` to `vertex`.
Note
----
The length of the shortest path (when one exists) is trivial: `len(returning list)-1`.
"""
# Time complexity: O(num of vertices in the path)
# First, we need to compute all vertices' parents using b_f_s()
_ = self.breadth_first_search(source)
if vertex == source:
return [source]
elif self.parent[vertex] is None:
print(f"No path from {source} to {vertex} exists.")
return []
else:
return self.shortest_path(source, self.parent[vertex]) + [vertex]
def breadth_first_search_jovian(self, source: int) -> list:
"""Breadth-first search (BFS) traversal of a graph from vertex `source`."""
# Time complexity: O(num_vertices + num_edges), aka O(V+E)
visited = [False] * len(self.adjlist)
queue = [] # same as `traversal` in the above breadth_first_search()
# Label root (ie source) as visited
visited[source] = True
queue.append(source)
i = 0
while i < len(queue):
for v in self.adjlist[queue[i]]:
# v is a neighbor of queue[i] (starting from queue[0]=source)
if not visited[v]:
visited[v] = True
queue.append(v)
i += 1
return queue
def depth_first_search(self) -> list:
"""Depth-first search (DFS) of a graph, cf CLRS 22.3."""
# Time complexity: Theta(V + E)
# Note: This initialization is a must, since other methods may change defaults
self.color = [Color.WHITE] * len(self.adjlist)
# For source vertex and all undiscovered vertices, their parents are None
self.parent = [None] * len(self.adjlist)
self.timestamp = 0
# `predsubg` is the predecessor subgraph: G(parent) = (V, E(parent)), where
# E(parent) = {(v.pa, v): v in G.V and v.pa is not None}.
# Note: Depending on the tree structure, predsubg may not include ALL edges of
# the original graph. But we are sure that predsubg does not include duplicate
# edges, and does not have any edges that are not present in the original graph.
predsubg = [None] * len(self.adjlist)
for vertex in range(len(self.adjlist)):
if self.color[vertex] == Color.WHITE:
# Every time _dfs_visit(vertex) is called, `vertex` becomes the root of
# a new tree in the depth-first forest
predsubg[vertex] = self._dfs_visit(vertex)
return predsubg, self.grayed_time, self.blackened_time
def _dfs_visit(self, vertex: int) -> list:
# Visit all neighbors of `vertex` using DFS approach.
traversal = []
# White `vertex` is discovered, so it turns gray
self.timestamp += 1
self.grayed_time[vertex] = self.timestamp
self.color[vertex] = Color.GRAY
for v in self.adjlist[vertex]:
# Edge (vertex, v) is being explored by the DFS
if self.color[v] == Color.WHITE:
self.parent[v] = vertex
traversal += [(vertex, v)] + self._dfs_visit(v)
elif self.color[v] == Color.GRAY:
# TODO: For an undirected graph, this means (u,v) is a back edge, which means there is a cycle
# print('cyc', traversal + [(v, self.parent[v])])
pass
# When all neighbors of `vertex` have been exhausted, it turns black
self.color[vertex] = Color.BLACK
self.timestamp += 1
self.blackened_time[vertex] = self.timestamp
return traversal
def depth_first_search_jovian(self, source: int) -> list:
"""Depth-first search (DFS) traversal of a graph from vertex `source`."""
# DFS is more memory efficient than BFS, since you can backtrack sooner.
visited = [False] * len(self.adjlist)
queue = []
stack = [source]
while stack:
v = stack.pop()
if not visited[v]:
visited[v] = True
queue.append(v)
for neighbor in self.adjlist[v]:
# Push (ie append) all neighbors of v into stack for next loop
stack.append(neighbor)
return queue
def is_cyclic(self) -> bool:
"""Check if the graph has any cycles."""
visited = [False] * len(self.adjlist)
for vertex in range(len(self.adjlist)):
if not visited[vertex] and self._is_subgraph_cyclic(vertex, visited, -1):
return True
return False
def _is_subgraph_cyclic(self, v: int, visited: Sequence, parent: int) -> bool:
# Detect cycles in the subgraph reachable from vertex `v`.
visited[v] = True
for neighbor in self.adjlist[v]:
if not visited[neighbor]:
# If neighbor is not visited, then recurse on it
if self._is_subgraph_cyclic(neighbor, visited, v):
return True
elif parent != neighbor:
# If neighbor has been visited and is not the parent of v,
# then there is a cycle
return True
return False
def dfs_cycle(self, u: int, p: int) -> None:
"""Mark the vertices with different numbers for different cycles."""
if p is None:
# This initialization is a must, since other methods may change defaults
self.color = [Color.WHITE] * len(self.adjlist)
# For source vertex and all undiscovered vertices, their parents are None
self.parent = [None] * len(self.adjlist)
# Store total number of cycles found; also used as current cycle's index num
self.num_cycles = 0
if self.color[u] == Color.GRAY:
# A vertex that is discovered but not finished.
# For an undirected graph, this means we have discovered a back edge, which
# means there is a cycle. So we backtrack based on parents to find whole cyc
self.num_cycles += 1
current = p
self.cycle_mark[current] = self.num_cycles
while current != u:
# Backtrack the parent of current, until the cycle is exhausted
current = self.parent[current]
self.cycle_mark[current] = self.num_cycles
elif self.color[u] == Color.WHITE:
# Set p to be u's parent, and mark u as (first) discovered
self.parent[u] = p
self.color[u] = Color.GRAY
for v in self.adjlist[u]:
# Edge (u, v) is being explored by the DFS
if v != self.parent[u]:
self.dfs_cycle(v, u)
# Now u is finished
self.color[u] = Color.BLACK
def print_cycles(self, edges: Sequence[Tuple]) -> None:
"""Print and return the cycles in the graph."""
self.dfs_cycle(0, None)
cycles = [[] for _ in range(self.num_cycles + 1)]
for i in range(len(self.adjlist)):
if self.cycle_mark[i] is not None:
print(i, self.cycle_mark, cycles)
cycles[self.cycle_mark[i]].append(i)
for i in range(1, self.num_cycles + 1):
print(f"Cycle #{i}:", *cycles[i])
print()
return cycles[1:]
##########################################
### Driver code
edges = [(0, 1), (1, 2), (2, 3), (3, 4), (4, 0), (1, 4), (1, 3)]
g1 = Graph(5, edges)
print(g1)
print('Adjacent matrix:', g1.adjacency_matrix())
print('BFS:', g1.breadth_first_search(3))
print('BFS jovian:', g1.breadth_first_search_jovian(3))
print('Shortest path:', g1.shortest_path(2, 4))
print('DFS:', g1.depth_first_search())
print('DFS jovian:', g1.depth_first_search_jovian(0))
print('Has cycles:', g1.is_cyclic())
print(g1.print_cycles(edges))
# Has a small cycle
edges = [(0, 1), (0, 3), (1, 2), (2, 0), (3, 4)]
g = Graph(5, edges)
print(g)
print(g.depth_first_search())
print(g.is_cyclic())
print("Cycles: ", g.print_cycles(edges))
# Has a big cycle
edges = [(0, 1), (1, 2), (2, 3), (3, 4), (4, 0)]
g = Graph(5, edges)
print(g)
print(g.depth_first_search())
print(g.is_cyclic())
print("Cycles: ", g.print_cycles(edges))
edges = [(0, 1), (0, 3), (3, 1), (1, 4), (4, 3), (2, 4), (2, 5), (5, 5)]
g = Graph(6, edges, is_directed=True)
print(g)
print(g.depth_first_search())
print("Cycles: ", g.print_cycles(edges))
edges = [(0, 1), (1, 2), (2, 3), (1, 4), (5, 6), (5, 7)]
g = Graph(8, edges, is_directed=True)
print(g)
print(g.depth_first_search()) | [
"1762873+justyre@users.noreply.github.com"
] | 1762873+justyre@users.noreply.github.com |
c0331309bf2f0dab4193eca0be842d750c839dc8 | c03b615ca32a191672be6ed8d5de1624db9409b0 | /p10.py | 8f59c18d38846c9708889c19091d76c70cae52c3 | [] | no_license | Hemangi3598/chap-8_p10 | b7b768120c8fbaf85e01b8aa699223a66ce75f8b | 8736c342b578100b4f0fada1f84edf6a511e753b | refs/heads/main | 2023-08-07T08:22:48.537950 | 2021-09-19T07:02:29 | 2021-09-19T07:02:29 | 408,056,731 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 514 | py | # waopp to add students and their rno in class
class student:
def __init__(self, rno, name):
self.rno = rno
self.name = name
def show(self):
print("rno = ", self.rno)
print("name = ",self.name)
data = []
while True:
op = int(input(" 1 add, 2 view and 3 exit"))
if op == 1:
rno = int(input("enter rno "))
name = input("enter name ")
s = student(rno, name)
data.append(s)
elif op == 2:
for d in data:
d.show()
elif op == 3:
break
else:
print("invalid option ") | [
"noreply@github.com"
] | Hemangi3598.noreply@github.com |
84bc2ed6db473e44610fce5decafd84089166c40 | 6e3396980eeee1d8d55e4afbc6148711e9e9a342 | /SPOJ/py/INTEST.py | 63e9b4121738ad4488c4a37461310bf941213675 | [] | no_license | arunpatala/scala-learn | c20ca717899d1752ddb20dfbe4f6839217ad3ac8 | b9e52aec74d360a18af99e841c6b598f2b0165b6 | refs/heads/master | 2021-01-22T23:43:26.608433 | 2015-07-31T17:30:54 | 2015-07-31T17:30:54 | 38,426,540 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 139 | py | a = str.split(raw_input())
N = int(a[0])
K = int(a[1])
ret = 0;
for i in range(0,N):
if(int(raw_input())%K==0):
ret = ret + 1
print ret
| [
"arunpatala@gmail.com"
] | arunpatala@gmail.com |
8c67bf15d9d2de2bc24154779e9a522ad89693e5 | c83ba2b21c72fa119fecb1b094fcf7a9745b626c | /CodeFiles/Metrics_L2.py | e0e921f54fcc22481bc26c70c02ba9f53dc28d3f | [] | no_license | amoghgaikwad/Click-Through-Prediction-Rate | 8074d84309e4ff824eafe531a6f62280db091113 | 0dd369a927929e4fa45d092e38cd7756276e9cb2 | refs/heads/master | 2021-01-22T20:50:39.342028 | 2017-03-18T02:58:16 | 2017-03-18T02:58:16 | 85,371,161 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,043 | py | from pyspark.sql import Row
from pyspark.ml.feature import OneHotEncoder, StringIndexer
from pyspark.ml.feature import VectorAssembler
from pyspark import SparkConf, SparkContext
from pyspark.sql import SQLContext
from pyspark.ml import Pipeline
from pyspark.mllib.classification import LogisticRegressionWithSGD, LogisticRegressionModel
from pyspark.mllib.regression import LabeledPoint
from pyspark.mllib.linalg import Vectors
from pyspark.mllib.evaluation import BinaryClassificationMetrics
import os, tempfile
path = tempfile.mkdtemp()
#sc = spark.sparkContext
conf = SparkConf().setAppName("Assignment 3")
sc = SparkContext(conf=conf)
sqlCtx = SQLContext(sc)
###### Part 1- Preparing the data #######
#To count the number of distinct values in each column
distnct_values = {}
# Load a text file and convert each line to a Row.
lines = sc.textFile("test_2.csv")
#extract header
header = lines.first()
lines = lines.filter(lambda row : row != header)
parts = lines.map(lambda l: l.split(","))
data_T = parts.map(lambda p: Row(click=(p[1]), C1=p[3], banner_pos = p[4], site_id=p[5], site_domain=p[6],
site_category=p[7], app_id=p[8], app_domain=p[9], app_category=p[10], device_id=p[11], device_ip=p[12], device_model=p[13], device_type=p[14],
device_conn_type=p[15], C14=p[16], C15=p[17], C16=p[18], C17=p[19], C18=p[20], C19=p[21], C20=p[22], C21=p[23] ))
#create the dataframe
df = sqlCtx.createDataFrame(data_T)
#selecting all the categorical columns and checking the distinct values in each
col_names= ['C1','site_category','app_category','device_type','C14','C15','C16','C17','C18','C19','C20','C21']
for i in col_names: ##col_names contains names of cols that contains categorical data
distinctValues = df.select(i).distinct().rdd.map(lambda r: r[0]).count()
distnct_values[i] = distinctValues
#delete columns which have more than 100 distinct values in them
for key, value in distnct_values.iteritems() :
if int(value) >100:
df = df.drop(str(key))
#drop the columns which have NA values in it.
df = df.na.replace('', 'NA', 'C1')
df = df.dropna()
#stringIndexer on all the categorical columns
c1I = StringIndexer(inputCol="C1", outputCol="iC1", handleInvalid="skip")
c15I = StringIndexer(inputCol="C15", outputCol="iC15", handleInvalid="skip")
c16I = StringIndexer(inputCol="C16", outputCol="iC16", handleInvalid="skip")
c18I = StringIndexer(inputCol="C18", outputCol="iC18", handleInvalid="skip")
c19I = StringIndexer(inputCol="C19", outputCol="iC19", handleInvalid="skip")
c21I = StringIndexer(inputCol="C21", outputCol="iC21", handleInvalid="skip")
appcatI = StringIndexer(inputCol="app_category", outputCol="i_app_category", handleInvalid="skip")
devtypeI = StringIndexer(inputCol="device_type", outputCol="i_device_type", handleInvalid="skip")
sitecatI = StringIndexer(inputCol="site_category", outputCol="i_site_category", handleInvalid="skip")
#OneHotEncoder applied after the stringIndexer to form binary vector for each column
c1E = OneHotEncoder(inputCol="iC1", outputCol="C1Vector")
c15E = OneHotEncoder(inputCol="iC15", outputCol="C15Vector")
c16E = OneHotEncoder(inputCol="iC16", outputCol="C16Vector")
c18E = OneHotEncoder(inputCol="iC18", outputCol="C18Vector")
c19E = OneHotEncoder(inputCol="iC19", outputCol="C19Vector")
c21E = OneHotEncoder(inputCol="iC21", outputCol="C21Vector")
appcatE = OneHotEncoder(inputCol="i_app_category", outputCol="i_app_category_Vector")
devtypeE = OneHotEncoder(inputCol="i_device_type", outputCol="i_device_type_Vector")
sitecatE = OneHotEncoder(inputCol="i_site_category", outputCol="i_site_category_Vector")
#Vector assembler
fAssembler = VectorAssembler(
inputCols=["C1Vector", "C15Vector", "C16Vector", "C18Vector", "C19Vector", "C21Vector", "i_app_category_Vector", "i_device_type_Vector", "i_site_category_Vector"],
outputCol="features")
#pipeline to sum up all the stringIndexers and OneHotEncoders and VectorAssemebler
data_P = Pipeline(stages=[c1I, c15I, c16I, c18I, c19I, c21I, appcatI, devtypeI, sitecatI,
c1E, c15E, c16E, c18E, c19E, c21E, appcatE, devtypeE, sitecatE, fAssembler])
model = data_P.fit(df)
data_t = model.transform(df)
###### Part 1 ends here #####
# Making the labelpoints to train the data with LR
parsedData=data_t.select('click', 'features').rdd.map(lambda row: LabeledPoint(float(row.click),Vectors.dense((row.features).toArray())))
# split the dataset
training,test = parsedData.randomSplit([0.6, 0.4], seed=11L)
training.cache()
##### PART 4 ######
# Retrain your model using an L2 Regularization method.
# Tune the cost parameter by dividing your training set into a training and a validation set.
# Output the value of the best choice. Test the new model with the provided test set. Output the accuracy, FPR, and AUC
training_l2, validation_l2 = training.randomSplit([0.8, 0.2], seed=11L)
training_l2.cache()
model1 = LogisticRegressionWithSGD.train(training_l2, step=0.1, miniBatchFraction=0.1, regType='l2', regParam=0.01)
model2 = LogisticRegressionWithSGD.train(training_l2, step=0.1, miniBatchFraction=0.1, regType='l2', regParam=1.0)
model3 = LogisticRegressionWithSGD.train(training_l2, step=0.1, miniBatchFraction=0.1, regType='l2', regParam=0.9)
model3.save(sc, path)
# 1- Testing the Accuracy by changing the regParam(cost Parameter)
print("Model 1:")
labelsAndPreds1 = validation_l2.map(lambda p: (float(model1.predict(p.features)), p.label))
Accuracy = labelsAndPreds1.filter(lambda (v, p): v == p).count() / float(validation_l2.count())
print("Accuracy 1 = " + str(Accuracy))
print("Model 2:")
labelsAndPreds2 = validation_l2.map(lambda p: (float(model2.predict(p.features)), p.label))
Accuracy = labelsAndPreds2.filter(lambda (v, p): v == p).count() / float(validation_l2.count())
print("Accuracy 2 = " + str(Accuracy))
print("Model 3:")
labelsAndPreds3 = validation_l2.map(lambda p: (float(model3.predict(p.features)), p.label))
Accuracy = labelsAndPreds3.filter(lambda (v, p): v == p).count() / float(validation_l2.count())
print("Accuracy 3 = " + str(Accuracy))
# After running all the three models, the accuracy was slightly higher in the 3rd model - Accuracy = 0.831453634085
# So using the 3rd model to evaluate the test set:
print("Test Data Metrics on Best model:")
model_best = LogisticRegressionModel.load(sc,path)
# 1- Accuracy
labelsAndPredsT = test.map(lambda p: (float(model_best.predict(p.features)), p.label))
Accuracy = labelsAndPredsT.filter(lambda (v, p): v == p).count() / float(test.count())
print("Accuracy = " + str(Accuracy))
# 2- To Find the FPR
fpr_of_data= labelsAndPredsT.filter(lambda (v, p): v==1 and p==0).count()
fpr= fpr_of_data/(fpr_of_data+labelsAndPredsT.filter(lambda (v, p): v==0 and p==0).count())
print("FPR = " + str(fpr))
#Clears the threshold so that predict will output raw prediction scores, which will be used for AU -ROC
model_best.clearThreshold()
# Instantiate metrics object
metrics = BinaryClassificationMetrics(labelsAndPredsT)
# Area under ROC curve
print("Area under ROC = %s" % metrics.areaUnderROC) | [
"amogh.gk@gmail.com"
] | amogh.gk@gmail.com |
6ddcba986a72f513f2c34d92c5c53b6cd277e169 | b309e6a809cb722f0ee9c42f6fcfacbd9495ad43 | /KMP.py | 96fb793e047549f5d87c349b2d7d49be285f16c9 | [] | no_license | leesen934/leetcode_practices | 0bee0bef00e7459bd4ecbb5a2f98436c270889ad | c93f15bee2ee2eea2e6f276c4907280d110c0467 | refs/heads/master | 2020-03-28T14:24:31.362300 | 2018-09-13T08:19:47 | 2018-09-13T08:19:47 | 148,484,508 | 0 | 0 | null | 2018-09-12T13:26:23 | 2018-09-12T13:26:22 | null | UTF-8 | Python | false | false | 1,029 | py | def getNext(p):
j = 0
k = -1 # next[j]็ๅผ๏ผไนๅฐฑๆฏk๏ผ่กจ็คบ๏ผๅฝP[j] != T[i]ๆถ๏ผjๆ้็ไธไธๆญฅ็งปๅจไฝ็ฝฎใ
next_p = [-1] * len(p)
while j < len(p) - 1:
print("p[k]: " + p[k] + ", p[j]: " + p[j])
if k == -1 or p[k] == p[j]:
j += 1
k += 1
if p[j] == p[k]: # ๅฝไธคไธชๅญ็ฌฆ็ธ็ญๆถ่ฆ่ทณ่ฟ
next_p[j] = next_p[k]
else:
next_p[j] = k
else:
k = next_p[k]
print(next_p)
return next_p
def KMP(s, p):
i = 0 # ไธปไธฒไฝ็ฝฎ
j = 0 # ๆจกๅผไธฒไฝ็ฝฎ
next_p =getNext(p)
while i < len(s) and j < len(p):
if j == -1 or s[i] == p[j]: # ๅฝjไธบ-1ๆถ๏ผ่ฆ็งปๅจ็ๆฏi๏ผๅฝ็ถjไน่ฆๅฝ0
i += 1
j += 1
else:
j = next_p[j]
if j == len(p):
return i - j
else:
return -1
if __name__ == "__main__":
s = "abcabcabcabcabxabc"
p = "abcabx"
p = "abbcabcaabbcaa"
print(KMP(s, p)) | [
"lichunchn0516@gmail.com"
] | lichunchn0516@gmail.com |
fa3510c04357a2e5a1420c7e718ab0f2cde76df7 | f9aecf1d54f9919f48b523ce7e68397a13de4db6 | /cgi-bin/XmlToJson.py | 4757bbb9790b7b26fb990dd842f120c092ae8217 | [] | no_license | zestroly/www | 2ae2dc2aa3de5f47cf03e310e071049ff7790f04 | a7e15dac6e0494dcdf298edda8e1e3e2c81ae3c8 | refs/heads/master | 2021-01-18T03:53:05.678155 | 2017-06-12T01:04:14 | 2017-06-12T01:04:14 | 85,780,757 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,933 | py | #!/usr/bin/python3.6
from xml.dom.minidom import parse
import xml.dom.minidom
import os
import sys,json
json_str=sys.argv[1]
#print(json_str)
#print(type(json_str))
json_dict=json.loads(json_str)
xmlstr = ""
for key in json_dict:
if(key == 'data'):
break;
xmlstr +="<Param ErrorCode=\"0\">"
xmlstr += "<camera>"
xmlstr += "<" + key + ">"
for i in json_dict[key]:
xmlstr += "<" + i + " "
for j in json_dict[key][i]:
xmlstr +=" " + j + "=\"" +json_dict[key][i][j] + "\""
xmlstr += "/>"
xmlstr += "</" + key + ">"
xmlstr += "</camera>"
xmlstr += "</Param>"
f=open('/tmp/temp.xml', 'w')
f.write(xmlstr);
f.close();
os.system("/home/root/bin/XmlDevice set /tmp/temp.xml > /dev/null")
os.system("/home/root/bin/XiXmlDevice get /home/root/config/camera.xml > /dev/null")
DOMTree = xml.dom.minidom.parse("/home/root/config/camera.xml")
root=DOMTree.documentElement
cameraNodes=root.getElementsByTagName('camera')
def getAttrbute(node):
tempstr="{"
j=1
for key in node.attributes.keys():
tempstr += "\""+ key+"\":\""+node.attributes[key].value+"\""
if j < (node.attributes.length):
tempstr +=","
j=j+1
tempstr+="}"
return tempstr
def metaNode(cells, str):
str +="\""+cells.nodeName+"\"" + ":"
str+='{'
i=0
for cell in cells.childNodes:
i=i+1
if cell.nodeType == 3:
continue
str += "\""+cell.nodeName+"\"" + ":"
str += getAttrbute(cell)
if i < (cells.childNodes.length-1):
str +=","
str += "}"
return str
str="{"
for camerchild in cameraNodes:
k=0
for cell in camerchild.childNodes:
k=k+1
if cell.nodeType == 3:
continue
str = metaNode(cell,str)
if k < (camerchild.childNodes.length-1):
str += ","
str+="}"
print (str)
| [
"zestroly@126.com"
] | zestroly@126.com |
bb72ca08e04c6993447c5ba8e25163b95a1d07f8 | cf1636cd2108ae86c5df5bfc1ae9448e3fd9dbf7 | /nash_test.py | 6e61f59897bf12985704bd9184416d09b3079de8 | [] | no_license | ryanpig/MultiagentSystem-FindNE | 991d5db6950eef11b41b400627f9b05b3710759b | 470b5f838d4b9296a91824024cbef412881abcf9 | refs/heads/master | 2021-04-12T04:22:24.828274 | 2019-02-08T10:41:49 | 2019-02-08T10:41:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,923 | py | import nash
import numpy as np
import datetime
import matplotlib.pyplot as plt
import matplotlib.mlab as mlab
from scipy.stats import norm
# Algorithms
def non_duplicate_print(eqs):
tmp1 = []
tmp2 = []
for eq in eqs:
#print("eq:",eq,"tmp:",tmp)
#print(np.any(eq in tmp))
#print(eq in tmp)
# print(tmp1,tmp2)
if len(tmp1) == 0 and len(tmp2) == 0:
tmp1.append(eq[0])
tmp2.append(eq[1])
print(eq)
flag = False
for i in range(len(tmp1)):
if np.all(eq[0] == tmp1[i]):
if np.all(eq[1] == tmp2[i]):
flag = True
if flag is False:
tmp1.append(eq[0])
tmp2.append(eq[1])
print(eq)
def find_nash_support_enum(utilA , utilB):
random_game = nash.Game(utilA, utilB)
eqs = random_game.support_enumeration()
non_duplicate_print(eqs)
def find_nash_lemke_howson(utilA , utilB):
random_game = nash.Game(utilA, utilB)
a = random_game.lemke_howson(initial_dropped_label=0)
print("NE", a[0],a[1])
#print("row", a[3])
#print("col", a[4])
return a[2]
#for eq in random_game.lemke_howson_enumeration():
# print(eq)
#eqs = random_game.lemke_howson_enumeration()
#non_duplicate_print(eqs)
def find_nash_vertex_enum(utilA , utilB):
random_game = nash.Game(utilA, utilB)
for eq in random_game.vertex_enumeration():
print(eq)
# generate m x n 2-player game
def generate_game(m = 2, n = 2, utility_max = 10, utility_min = -10):
# two player game, multiple actions (m x n)
# m = 6
# n = 4
# utility_max = 10
# utility_min = -10
print("Rows:", m, "Cols:", n, "Max:", utility_max, "Min:", utility_min)
utilA = np.random.uniform(utility_min, utility_max, size=(m, n))
utilB = np.random.uniform(utility_min, utility_max, size=(m, n))
utilA = np.asarray(utilA, dtype=np.int8)
utilB = np.asarray(utilB, dtype=np.int8)
print("utility of row player")
print(utilA)
print("utility of col player")
print(utilB)
return utilA, utilB
# generate a single m x n game
def gen_single_game(m, n):
# Configuration
flag_steps_distribusion = False
flag_single_game = True
flag_use_customized_game = True
flag_save_game = False
#outfileA = 'game_A_prison.npy'
#outfileB = 'game_B_prison.npy'
#outfileA = 'game_A_rock.npy'
#outfileB = 'game_B_rock.npy'
#outfileA = 'game_A.npy'
#outfileB = 'game_B.npy'
outfileA = 'game_A_53.npy'
outfileB = 'game_B_53.npy'
# outfileA = 'game_A_12.npy'
# outfileB = 'game_B_12.npy'
if flag_single_game == True:
# Generate a new game or use customized game.
if flag_use_customized_game == True:
a = np.load(outfileA)
b = np.load(outfileB)
print(a)
print(b)
else:
a, b = generate_game(m=m, n=n)
if flag_save_game == True:
# Save
np.save(outfileA, a)
np.save(outfileB, b)
a = [[3,1,5],[2,2,4]]
b = [[2,1,0],[2,3,1]]
t1 = datetime.datetime.now()
print("Support Enumeration")
find_nash_support_enum(a, b)
print("Lemke Howson")
find_nash_lemke_howson(a, b)
print("Vertex Enumeration")
find_nash_vertex_enum(a, b)
t2 = datetime.datetime.now()
diff = t1 - t2
tdiff_sec = abs(diff.total_seconds())
print("Time cost for finding NE:", tdiff_sec)
# Cal steps distribution
if flag_steps_distribusion == True:
count = 0
arrs = []
counts = []
for k in range(5, 9, 1):
counts.clear()
for i in range(300):
a, b = generate_game(m=k, n=k)
# Using customized game
# a = [[0,-1,1],[1,0,-1],[-1,1,0]]
# b = [[0,1,-1],[-1,0,1],[1,-1,0]]
# a = [[3,0],[0,2]]
# b = [[2,0],[0,3]]
# Loading existing game
# outfileA = 'game_A.npy'
# outfileB = 'game_B.npy'
# a = np.load(outfileA)
# b = np.load(outfileB)
# Run
count = find_nash_lemke_howson(a, b)
# Save
# np.save(outfileA, a)
# np.save(outfileB, b)
print(count)
if count <= 40:
counts.append(count)
arrs.append(counts.copy())
title = "Random size:" + str(k) + " x " + str(k)
# plot_histogram(counts,title)
plot_four_hist(arrs[0], arrs[1], arrs[2], arrs[3])
def plot_histogram(arrs,title):
#x = np.random.normal(size=1000)
#plt.hist(x, normed=True, bins=30)
plt.ylabel('Steps to find a NE');
plt.xlabel('counts');
plt.title(title)
plt.hist(arrs, bins=20)
plt.show()
#l = plt.plot(bins, y, 'r--', linewidth=1)
def plot_four_hist(arr1,arr2,arr3,arr4):
f, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2, sharex='col', sharey='row')
(mu1, sigma1) = norm.fit(arr1)
(mu2, sigma2) = norm.fit(arr2)
(mu3, sigma3) = norm.fit(arr3)
(mu4, sigma4) = norm.fit(arr4)
mu1 = format(mu1, '.2f')
mu2 = format(mu2, '.2f')
mu3 = format(mu3, '.2f')
mu4 = format(mu4, '.2f')
sigma1 = format(sigma1, '.2f')
sigma2 = format(sigma2, '.2f')
sigma3 = format(sigma3, '.2f')
sigma4 = format(sigma4, '.2f')
ax1.hist(arr1, rwidth=0.8, color='sandybrown')
ax2.hist(arr2, rwidth=0.8, color='sandybrown')
ax3.hist(arr3, rwidth=0.8, color='sandybrown')
ax4.hist(arr4, rwidth=0.8, color='sandybrown')
plt.title("Find a NE in different Size in 300 rounds")
ax1.set_ylabel("Counts")
ax2.set_ylabel("Counts")
ax3.set_ylabel("Counts")
ax4.set_ylabel("Counts")
str1 = "Game Size 5 x 5 " + ", u:" + str(mu1) + ", s:" + str(sigma1)
str2 = "Game Size 6 x 6 " + ", u:" + str(mu2) + ", s:" + str(sigma2)
str3 = "Game Size 7 x 7 " + ", u:" + str(mu3) + ", s:" + str(sigma3)
str4 = "Game Size 8 x 8 " + ", u:" + str(mu4) + ", s:" + str(sigma4)
ax1.set_title(str1)
ax2.set_title(str2)
ax3.set_title(str3)
ax4.set_title(str4)
plt.show()
def plot_cal_time(arrs):
# debug
print(arrs[0])
print(arrs[1])
print(arrs[2])
#
len1 = len(arrs[0])
x = range(2, len1+2)
plt.title('Finding NE in a symmetric game')
plt.xlabel('The number of actions')
plt.ylabel('Running Time in second')
#plt.xlim(xmin=2)
a0, = plt.plot(x, arrs[0], 'r')
a1, = plt.plot(x, arrs[1], 'g')
a2, = plt.plot(x, arrs[2], 'b')
plt.legend((a0, a1, a2), ('Support Enum','LH','Vertex Enum'))
plt.tight_layout()
def gen_multi_games(m, n):
max1 = max(m, n)
len_algorithms = 3
arr_tdiffs = []
# Loop all algorithm
for ind in range(len_algorithms):
arr_tdiff = []
for i in range(2, max1+1, 1):
a, b = generate_game(m=i, n=i)
# time start for the various algorithms
t1 = datetime.datetime.now()
# pick up one algorithm
if ind == 0:
find_nash_support_enum(a, b)
elif ind == 1:
find_nash_lemke_howson(a, b)
elif ind == 2:
find_nash_vertex_enum(a, b)
t2 = datetime.datetime.now()
# time end
diff = t1 - t2
tdiff_sec = abs(diff.total_seconds())
arr_tdiff.append(tdiff_sec)
print("Time cost for finding NE:", tdiff_sec)
arr_tdiffs.append(arr_tdiff)
# plotting
plot_cal_time(arr_tdiffs)
# main -> find_nash -> random_game.support_enumeration()
# loop symmetric games from (2,2) to (10,10) actions
#gen_multi_games(12, 12)
# single game test
gen_single_game(5, 3)
| [
"ryanpig@gmail.com"
] | ryanpig@gmail.com |
b0de702648187305cb55de4c208e37e3f49e804d | abd7a71f62eb1f85dd8725e2c5ed5f2ff9f43fbf | /tuples_comparing.py | 7a45e1b83fd00b5cffdd9c9883bc405af5b5b1c2 | [] | no_license | danielrhunt/python | 3f97df39f01ac9ef58d0c8d8aaf47a2c6abaeec8 | 6a5e5057b18dc55e317ca3027440765b409b1a43 | refs/heads/master | 2020-04-21T02:51:04.719043 | 2019-03-12T20:22:46 | 2019-03-12T20:22:46 | 169,267,330 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,748 | py | '''COMPARING TUPLES'''
'''comparison operators work on tuples, just like other sequences
Python starts by comparing the first element in each sequence
if they are equal, it goes on to the next element, and then on again, and again, until it finds elements that differ
subsequent elements are not considered (even if they are really big)'''
'''the SORT FUNCTION works the same way: it sorts primarily by first element, but in case of a tie, it moves onto the next element until the tie is broken
this feature lends itself to a pattern called DSU: DECORATE, SORT, and UNDECORATE'''
example = "but soft what light in yonder window breaks"
print(example)
print(type(example))
words = example.split() #this doesn't appear to do anything on its own
tt = list() #create empty list
for word in words:
#build list of tuples, where each tuple is preceded by it's length
tt.append((len(word), word)) #add length of word, and then the word
print(tt) #prints list in original order, preceded by count
print(type(tt))
#the sort compares the first element (length) first, and only considers the second element to break ties
#reverse = True tells sort method to go in decreasing order
tt.sort(reverse = True) #reverse number sort (i.e. starts at highest)
print(tt) #prints list in number sorted order (highest to lowest)
print(type(tt))
#this loop traverses the list of tuples, and builds a list of words in descending order of length
#the four character words are sorted in reverse alphabetical order
res = list() #create another empty list
for length, word in tt: #use two mnemonically named iteration variables
res.append(word) #append the words to res list
print(res) #should just print the words
print(type(res))
| [
"noreply@github.com"
] | danielrhunt.noreply@github.com |
c72500ece3f030d2acbe56879e08b891d53d15f9 | 408491958cece161e3f7b27d10926b5cd80b4b14 | /Python/Itertools/Compress the String/CompressTheString.py | 9d710345eef369aa0def85dc24211ea56380c842 | [] | no_license | Snoblomma/HackerRank | 279212173fbe0024ecb5e34fdbacc1c01faad7f5 | 1547913ada66d13fd59b06bc2781911c0895fbbf | refs/heads/master | 2021-07-11T09:16:43.323607 | 2021-03-23T22:52:11 | 2021-03-23T22:52:11 | 70,170,819 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 148 | py | from itertools import groupby
w = list(input())
k = [(len(list(cgen)), int(c)) for c,cgen in groupby(w)]
print(" ".join(str(item) for item in k))
| [
"noreply@github.com"
] | Snoblomma.noreply@github.com |
619f32ab28b0c9805f69cf283ef37f4d1facbada | 2051155a91c262ec951b42cfd6eff52a8ad65707 | /prepare_submission_20180430.py | b60f36c44633830a71f9e6ed20521921cda16014 | [] | no_license | detrout/C1_mouse_limb_combined | 607af5402eebcc79aed68f6c9966d217fb79ee54 | 44d604bf86588bd89db227b20dac589253820bca | refs/heads/master | 2021-07-10T04:50:33.089606 | 2020-06-19T22:39:01 | 2020-06-19T22:39:01 | 152,499,640 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,426 | py | #!/usr/bin/python3
from argparse import ArgumentParser
import os
import collections
from lxml.html import fromstring
import json
import re
import requests
import glob
import pandas
from urllib.parse import urljoin
from rdflib import Graph, Literal, URIRef
from generate_combined_transcript_C1 import (
paper_433_experiment_files,
ASOF_RUN17_experiment_files
)
from woldrnaseq.models import load_experiments
from htsworkflow.util.opener import autoopen
from htsworkflow.util.rdfns import (
libraryOntology,
RDF,
RDFS,
)
from htsworkflow.util.rdfhelp import (
dump_model,
)
# 20031-20038 are good on flowcell HF7NTBCX2
# 20026-20030 are mixed on flowcell HF7NTBCX2
def main(cmdline=None):
parser = ArgumentParser()
parser.add_argument('--first-tranche', default=False, action='store_true',
help='Use just the first tranche as experiment list')
parser.add_argument('--name', required=True, help='submission name')
parser.add_argument('-s', '--sheet', default=0, help='Sheet to use')
parser.add_argument('--header', default=None, help="header row")
parser.add_argument('filename', nargs=1, help='driver spreadsheet')
args = parser.parse_args(cmdline)
root_fastq_url = 'http://jumpgate.caltech.edu/runfolders/volvox02/'
desplit = os.path.expanduser('~/proj/htsworkflow/htsworkflow/pipelines/desplit_fastq.py')
header = int(args.header) if args.header is not None else None
data = read_spreadsheet(args.filename[0], args.sheet, header)
print(data.shape)
if args.first_tranche:
experiment_file_list = paper_433_experiment_files.split('\n')
else:
experiment_file_list = ASOF_RUN17_experiment_files.split('\n')
experiment_files = [ os.path.expanduser(x.strip()) for x in experiment_file_list]
experiments = load_experiments(experiment_files)
experiments['replicates'] = experiments['replicates'].apply(lambda l: [x.replace('_mm10', '').replace('_clean', '') for x in l])
current_experiments = find_experiments_to_submit(experiments, data)
aliases_tsv = '{}-aliases.tsv'.format(args.name)
make_library_aliases(current_experiments, aliases_tsv)
submission_fastqs_tsv = '{}-fastqs.tsv'.format(args.name)
if not os.path.exists(submission_fastqs_tsv):
fastq_urls = find_all_fastqs(root_fastq_url, current_experiments, submission_fastqs_tsv)
fastq_urls = pandas.read_csv(submission_fastqs_tsv, sep='\t')
barcodes_tsv = '{}-barcodes.tsv'.format(args.name)
make_library_barcodes(fastq_urls, barcodes_tsv)
metadata_tsv = '{}-flowcell-details.tsv'.format(args.name)
metadata = make_metadata(fastq_urls, root_fastq_url, metadata_tsv)
merge_file = '{}-merge-fastqs.condor'.format(args.name)
make_desplit_condor(fastq_urls, metadata, desplit, root_fastq_url, merge_file)
def read_spreadsheet(filename, sheet, header=None):
if filename.endswith('xlsx'):
data = pandas.read_excel(
'Second_set_of_limb_single_cell_data_for_Diane_almost_complete_April13_2018.xlsx',
sheet=sheet,
header=header
)
elif filename.endswith('ods'):
from pandasodf import ODFReader
book = ODFReader(filename)
data = book.parse(sheet, header=header)
return data
def find_all_fastqs(root_fastq_url, experiments, output_file):
"""Get urls to the raw fastq files for all our replicates
"""
runfolder = Runfolder(root_fastq_url)
records = []
multi = []
for record in find_replicate_flowcells(experiments):
fastqs = []
for flowcell in record['flowcells']:
fastqs.extend(list(runfolder.find_fastqs(flowcell, record['library_id'])))
record['fastq_urls'] = fastqs
fluidigm_fields = parse_fluidigm(urljoin(root_fastq_url, fastqs[0]))
record['barcode'] = fluidigm_fields['barcode']
record['location'] = fluidigm_fields['location']
records.append(record)
if len(record['flowcells']) > 1:
multi.append(record)
df = pandas.DataFrame(records)
df.to_csv(output_file, sep='\t', index=False)
if len(multi) > 0:
print('Warning, runs on multiple flowcells check multiple_flowcells.tsv')
pandas.DataFrame(multi).to_csv('multiple_flowcells.tsv', sep='\t')
return df
def find_replicate_flowcells(experiments):
model = Graph()
for i, row in experiments.iterrows():
for extended_id in row.replicates:
library_id, location, *_ = extended_id.split('_')
extended_id = library_id + '_' + location
uri = URIRef('https://felcat.caltech.edu/library/{}/'.format(library_id))
s = (uri, RDF['type'], libraryOntology['Library'])
if s not in model:
model.parse(source=uri, format='rdfa')
flowcells = model.query("""PREFIX libns: <http://jumpgate.caltech.edu/wiki/LibraryOntology#>
select distinct ?flowcell_id
where {
?library a libns:Library ;
libns:has_lane ?lane .
?lane libns:flowcell ?flowcell .
?flowcell libns:flowcell_id ?flowcell_id .
}
""", initBindings={'library': uri})
yield {'experiment': row.name,
'library_id': extended_id,
'flowcells': sorted([x[0].value for x in flowcells])
}
def find_experiments_to_submit(experiments, submission_table):
to_upload = set(submission_table[submission_table.columns[0]])
missing = set(submission_table[submission_table.columns[0]])
tosubmit = []
for i, row in experiments.iterrows():
current = to_upload.intersection(set(row.replicates))
missing = missing.difference(set(row.replicates))
if len(current) > 0:
tosubmit.append({
'name': row.name,
'analysis_dir': row.analysis_dir,
'replicates': list(current)
})
print('Not found:', len(missing), sorted(missing))
df = pandas.DataFrame(tosubmit)
df.set_index('name', inplace=True)
return df
def find_seans_fastqs(experiments):
for i, row in experiments.iterrows():
for library_id in row.replicates:
pattern = os.path.join(row.analysis_dir, library_id + '*.fastq.gz')
files = glob.glob(pattern)
assert len(files) > 0
filesets.setdefault(i, []).extend(files)
#make_desplit_condor(filesets)
def make_library_aliases(experiments, aliases_tsv):
aliases = {}
for i, row in experiments.iterrows():
for library_id in row.replicates:
aliases.setdefault(row.name, []).append('barbara-wold:{}'.format(library_id))
with open(aliases_tsv, 'wt') as outstream:
for key in sorted(aliases):
outstream.write(key)
outstream.write('\t')
outstream.write(','.join(sorted(aliases[key])))
outstream.write(os.linesep)
def make_library_barcodes(experiments, barcode_tsv):
def sorted_plate_key(row):
return row['plate_id'] + '_' + row['plate_location']
barcodes = {}
for i, row in experiments.iterrows():
plate_id, location, *_ = row.library_id.split('_')
record = {'barcode': row.barcode, 'plate_id': plate_id, 'plate_location': location}
barcodes.setdefault(row.experiment, []).append(record)
with open(barcode_tsv, 'wt') as outstream:
for key in sorted(barcodes):
outstream.write(key)
outstream.write('\t')
outstream.write(json.dumps(sorted(barcodes[key], key=sorted_plate_key)))
outstream.write(os.linesep)
def make_desplit_condor(experiments, metadata, desplit_cmd, root_url, condor_file):
"""Make condor file to build merged fastqs
:Parameters:
- experiments: (pandas.DataFrame) Experiments and their fastq urls
from find_all_fastqs()
- metadata: (pandas.DataFrame) metadata details about each fastq
- desplit_cmd: (filename) Path to the desplit_fastq.py file from htsworkflow
- condor_file: (filename) target to write condor file
:Returns:
True if all the merged fastqs exists, otherwise False
"""
header = """universe=vanilla
executable=/usr/bin/python3
error=log/desplit_fastq.$(process).out
output=log/desplit_fastq.$(process).out
log=log/desplit_fastq.log
environment="PYTHONPATH=/woldlab/loxcyc/home/diane/proj/htsworkflow"
requirements=(MACHINE != "wold-clst-3.woldlab") && (MACHINE != "wold-clst-4.woldlab")
"""
experiment_fastqs = {}
for i, row in metadata.iterrows():
output_name = row.experiment + '.fastq.gz'
experiment_fastqs.setdefault(output_name, []).append(row.fastq_url)
# chunk all fastqs by experiment
body = []
for output_name in experiment_fastqs:
print(output_name)
fastq_urls = experiment_fastqs[output_name]
body.extend(['arguments="{} --gzip -o {} -s 0:50 {}"'.format(desplit_cmd, output_name, ' '.join(sorted(fastq_urls))),
'queue',
''])
if len(body) > 0:
with open(condor_file, 'wt') as outstream:
outstream.write(header)
outstream.write(os.linesep.join(body))
return False
else:
return True
def make_metadata(experiments, root_fastq_url, filename):
model = Graph()
metadata = []
for i, row in experiments.iterrows():
fastq_urls = [ urljoin(root_fastq_url, x[1:-1]) for x in row.fastq_urls[1:-1].split(', ')]
for fastq_url in fastq_urls:
fastq_data = parse_fluidigm(fastq_url)
metadata.append({
'experiment': row.experiment,
'fastq_url': fastq_url,
'machine': 'http://jumpgate.caltech.edu/sequencer/8',
'flowcell': fastq_data['flowcell_id'],
'lane': fastq_data['lane_number'],
'barcode': fastq_data['barcode'],
'read_length': fastq_data['read_length']
})
metadata = sorted(metadata, key=lambda row: (row['experiment'], row['flowcell'], row['barcode']))
df = pandas.DataFrame(metadata, columns=['experiment', 'fastq_url', 'machine', 'flowcell', 'lane', 'barcode', 'read_length'])
print(df.head())
df.to_csv(filename, sep='\t', index=False)
return df
fluidigm_fields = ['library_id', 'location', 'barcode', 'lane_number', 'read']
def parse_fluidigm(pathname):
path, name = os.path.split(pathname)
p = r'(?P<library_id>[0-9]{5})_'\
'(?P<location>[A-H][0-9]{1,2})_'\
'(?P<barcode>[AGCT-]+)_'\
'L00(?P<lane_number>[1-8])_'\
'R(?P<read>[1-3])'
match = re.match(p, name)
if match is not None:
fields = { k: match.group(k) for k in fluidigm_fields }
with autoopen(pathname, 'rt') as stream:
fields.update(parse_fastq_header(stream.readline()))
seq = stream.readline()
fields['read_length'] = len(seq)
return fields
def parse_fastq_header(header):
header = header.strip()
read_id, extra = header.split(' ')
fields = read_id.split(':')
extra_fields = extra.split(':')
return {
'flowcell_id': fields[2],
#'lane_number': fields[3],
#'read': fields[4],
'barcode': extra_fields[3],
}
class Runfolder:
def __init__(self, root_url):
self.root_url = root_url
self.pages = {}
def load_index(self, url=''):
absolute_url = urljoin(self.root_url, url)
response = requests.get(absolute_url)
if response.status_code != 200:
raise RuntimeError('Unable to access {}. Status {}'.format(absolute_url, response.status_code))
tree = fromstring(response.content)
rows = tree.xpath('*/table/tr/td/a')
if len(rows) == 0:
raise RuntimeError('{} is not a directory'.format(absolute_url))
if rows[0].text == 'Parent Directory':
rows.pop(0)
self.pages[url] = [ x.text for x in rows ]
def find_flowcell(self, flowcell):
root = ''
if root not in self.pages:
self.load_index(root)
for name in self.pages[root]:
if flowcell in name:
return name
def _find_unaligned(self, url):
if url not in self.pages:
self.load_index(url)
for name in self.pages[url]:
for unaligned in ['Unaligned.dualIndex/', 'Unaligned/']:
if unaligned == name:
return url + name
raise RuntimeError('Unable to find index in {}'.format(url))
def _find_extended_id(self, url, extended_id):
if url not in self.pages:
self.load_index(url)
for name in self.pages[url]:
if extended_id in name:
return url + name
def find_fastqs(self, flowcell, extended_id):
runfolder = self.find_flowcell(flowcell)
assert runfolder is not None
unaligned = self._find_unaligned(runfolder)
assert unaligned is not None
project = self._find_extended_id(unaligned, extended_id)
sample = self._find_extended_id(project, extended_id)
if sample not in self.pages:
self.load_index(sample)
for name in self.pages[sample]:
if 'fastq.gz' in name:
yield sample + name
if __name__ == '__main__':
main()
| [
"diane@ghic.org"
] | diane@ghic.org |
21571df9c1f56860a1f9f82333fa6ad56924aaa8 | 51c255de526c7f0b6a0f8b232a184ba69128e7af | /02_QUICKVIEW_hand_made_quick_view_classification/02_1_classification_basic_perceptron.py | 6283dbafd55f24ccb14981fdf16675031075f88f | [] | no_license | jerrychen44/python_machine_learning_sr | eea7e2b873763c15b8582af05dfcd9b20c589123 | bcf5fc0188d4aa956af46496c4498a2bb42bdc3d | refs/heads/master | 2021-01-11T04:16:58.669737 | 2016-11-05T07:46:55 | 2016-11-05T07:46:55 | 71,191,453 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,400 | py | import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
filepath=os.path.dirname(os.path.realpath(__file__))#root, where the apk_integration_test.py file is.
source_folder='source'
print(filepath)
data_csv_path=filepath+'/'+source_folder+'/iris.csv'
################################
#implement the perceptron class
###############################
class Perceptron(object):
"""Perceptron classifier.
Parameters
------------
eta : float
Learning rate (between 0.0 and 1.0)
n_iter : int
Passes over the training dataset.
Attributes
-----------
w_ : 1d-array
Weights after fitting.
errors_ : list
Number of misclassifications in every epoch.
"""
def __init__(self, eta=0.01, n_iter=10):
self.eta = eta
self.n_iter = n_iter
def fit(self, X, y):
"""Fit training data.
Parameters
----------
X : {array-like}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target values.
Returns
-------
self : object
"""
self.w_ = np.zeros(1 + X.shape[1])
self.errors_ = []
for _ in range(self.n_iter):
errors = 0
for xi, target in zip(X, y):
update = self.eta * (target - self.predict(xi))
self.w_[1:] += update * xi
self.w_[0] += update
errors += int(update != 0.0)
self.errors_.append(errors)
return self
def net_input(self, X):
"""Calculate net input"""
return np.dot(X, self.w_[1:]) + self.w_[0]
def predict(self, X):
"""Return class label after unit step"""
return np.where(self.net_input(X) >= 0.0, 1, -1)
#####################
#loading data set
# ref: https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data
# Attribute Information:
# 1. sepal length in cm
# 2. sepal width in cm
# 3. petal length in cm
# 4. petal width in cm
# 5. class:
# -- Iris Setosa
# -- Iris Versicolour
# -- Iris Virginica
##########################
def read_csv_pd():
#df = pd.read_csv('https://archive.ics.uci.edu/ml/'
# 'machine-learning-databases/iris/iris.data', header=None)
df = pd.read_csv(data_csv_path, header=None)
#df.to_csv(filepath+'/'+source_folder+'/iris.csv',index=0,header=False)
print(df.tail())
print(df.shape)
return df
def plot_2d_data(df):
######################
# plot to take a look,Plotting the Iris data
####################
# select setosa and versicolor
y = df.iloc[0:100, 4].values
y = np.where(y == 'Iris-setosa', -1, 1)
# extract sepal length and petal length
X = df.iloc[0:100, [0, 2]].values
# plot data
plt.scatter(X[:50, 0], X[:50, 1],
color='red', marker='o', label='setosa')
plt.scatter(X[50:100, 0], X[50:100, 1],
color='blue', marker='x', label='versicolor')
plt.xlabel('petal length [cm]')
plt.ylabel('sepal length [cm]')
plt.legend(loc='upper left')
plt.tight_layout()
# plt.savefig('./iris_1.png', dpi=300)
plt.show()
return X,y
##############
#Training the perceptron model
############
def train_perceptron_model(X,y):
#new a object
ppn = Perceptron(eta=0.1, n_iter=10)
ppn.fit(X, y)
#show the error history
#it shows the model converge at 6th round.
plt.plot(range(1, len(ppn.errors_) + 1), ppn.errors_, marker='o')
plt.xlabel('Epochs')
plt.ylabel('Number of misclassifications')
plt.tight_layout()
# plt.savefig('./perceptron_1.png', dpi=300)
plt.show()
#return the model object
return ppn
#A function for plotting decision regions
def plot_decision_regions(X, y, classifier, resolution=0.02):
from matplotlib.colors import ListedColormap
# setup marker generator and color map
markers = ('s', 'x', 'o', '^', 'v')
colors = ('red', 'blue', 'lightgreen', 'gray', 'cyan')
cmap = ListedColormap(colors[:len(np.unique(y))])
# plot the decision surface
x1_min, x1_max = X[:, 0].min() - 1, X[:, 0].max() + 1
x2_min, x2_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx1, xx2 = np.meshgrid(np.arange(x1_min, x1_max, resolution),
np.arange(x2_min, x2_max, resolution))
Z = classifier.predict(np.array([xx1.ravel(), xx2.ravel()]).T)
Z = Z.reshape(xx1.shape)
plt.contourf(xx1, xx2, Z, alpha=0.4, cmap=cmap)
plt.xlim(xx1.min(), xx1.max())
plt.ylim(xx2.min(), xx2.max())
# plot class samples
for idx, cl in enumerate(np.unique(y)):
plt.scatter(x=X[y == cl, 0], y=X[y == cl, 1],
alpha=0.8, c=cmap(idx),
marker=markers[idx], label=cl)
def plot_decision_plan(ppn,X,y):
plot_decision_regions(X, y, classifier=ppn)
plt.xlabel('sepal length [cm]')
plt.ylabel('petal length [cm]')
plt.legend(loc='upper left')
plt.tight_layout()
# plt.savefig('./perceptron_2.png', dpi=300)
plt.show()
return 0
def main():
data_df=read_csv_pd()
X,y=plot_2d_data(data_df)
ppn=train_perceptron_model(X,y)
plot_decision_plan(ppn,X,y)
return 0
main()
| [
"jerrychen040@gmail.com"
] | jerrychen040@gmail.com |
b024aad18dfb436560ff9893287a0262f4a1f76d | d36546287721db2e97e0a4323e143163a14ce0b1 | /2016/19/an_elephant_named_joseph.py | 11737177b14154baaf7b8b0d01877cfaf5f14c2c | [
"Unlicense"
] | permissive | GeoffRiley/AdventOfCode | ca258edee05ad7a4b6e6db2e59b83e8879b48af0 | 567df9cb5645bc6cf4c22063a84a621039069311 | refs/heads/master | 2023-01-12T03:42:11.099541 | 2022-12-25T17:16:20 | 2022-12-25T17:16:20 | 225,139,440 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 857 | py | from collections import deque
def an_elephant_named_joseph(inp, part1=True):
elves = deque(range(1, inp + 1))
elves2 = deque()
if part1:
while len(elves) > 1:
elves.rotate(-1)
elves.popleft()
else:
elf_c = inp
while len(elves) > len(elves2):
elves2.append(elves.pop())
while elf_c > 1:
elves2.pop()
elves2.appendleft(elves.popleft())
if len(elves2) - len(elves) > 1:
elves.append(elves2.pop())
elf_c -= 1
return elves[0] if part1 else elves2[0]
if __name__ == '__main__':
elf_count = 3_004_953
print(f'Day 19, part 1: {an_elephant_named_joseph(elf_count)}')
print(f'Day 19, part 2: {an_elephant_named_joseph(elf_count, False)}')
# Day 19, part 1: 1815603
# Day 19, part 2: 1410630
| [
"geoffr@adaso.com"
] | geoffr@adaso.com |
ede10fd47f66d7aee777757eb88519c3ff63a7ee | e7de3d7139e73589e2172384fd114ce0c3e3655c | /test_template.py | e7fcca2adac6e7bd26f9fd3df3517004ef23fd62 | [
"BSD-3-Clause"
] | permissive | jnieuwen/python-default-requirements | 5384d08c50c844e43999409191d1536cb4558164 | f04df6da2b8de0ff20e6c24cea7769383254a552 | refs/heads/master | 2021-04-27T04:21:33.206701 | 2020-06-23T13:47:02 | 2020-06-23T13:47:02 | 122,730,745 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 218 | py | # Set up the paths.
import os
import sys
sys.path.append(os.path.abspath('.'))
import themodule
def test_hallo():
assert themodule.hallo() == "hallo"
def test_nohallo():
assert themodule.hallo() != "blaat"
| [
"jeroen.van.nieuwenhuizen@jeroen.se"
] | jeroen.van.nieuwenhuizen@jeroen.se |
f3df497c0894663eb52e0d21dc7c21eb0ae41a48 | 63f61f5a8fab6dd89b557666317b3cdc2a27e5af | /partyDataDownload.py | 40340068dabadd97f8ed0a347cbf6ee125baf78d | [] | no_license | Shan-Herald-Agency-for-News/MMElection2020_Scripting | 55d048235d903fafd69355528dc6df419ec27f39 | ea079f64803a49d08599eac2db6d3924f76746c3 | refs/heads/main | 2023-01-23T02:54:19.052822 | 2020-11-23T10:55:54 | 2020-11-23T10:55:54 | 306,220,889 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,866 | py | import json
import requests
import os
import shutil
partyInfoFile = "shanRegionParties.json"
def flag_imageDownload():
image_url = ""
filename = ""
filepath = ""
with open(partyInfoFile) as jFile:
data = json.load(jFile)
for d in data['data']:
attr = d['attributes']
image_url = attr['flag_image']
filename = filename.join(
[attr['name_english'], "_", d['id'], ".jpg"])
filepath = os.path.join("party/flag", filename)
r = requests.get(image_url, stream=True)
if r.status_code == 200:
r.raw.decode_content = True
with open(filepath, 'wb') as f:
shutil.copyfileobj(r.raw, f)
print("Image successfully Downloaded: ", filename)
image_url = ""
filename = ""
filepath = ""
else:
print("Image Couldn\'t be retreived")
def policy_download():
file_url = ""
filename = ""
filepath = ""
with open(partyInfoFile) as jFile:
data = json.load(jFile)
for d in data['data']:
attr = d['attributes']
file_url = attr['policy']
filename = filename.join(
[attr['name_english'], "_", d['id'], ".pdf"])
filepath = os.path.join("party/policy", filename)
r = requests.get(file_url, stream=True)
if r.status_code == 200:
r.raw.decode_content = True
with open(filepath, 'wb') as f:
shutil.copyfileobj(r.raw, f)
print("Image successfully Downloaded: ", filename)
file_url = ""
filename = ""
filepath = ""
else:
print("Image Couldn\'t be retreived")
partyInfoFile = "shanRegionParties.json"
def seal_imageDownload():
image_url = ""
filename = ""
filepath = ""
with open(partyInfoFile) as jFile:
data = json.load(jFile)
for d in data['data']:
attr = d['attributes']
image_url = attr['seal_image']
filename = filename.join(
[attr['name_english'], "_", d['id'], ".jpg"])
filepath = os.path.join("party/seal", filename)
r = requests.get(image_url, stream=True)
if r.status_code == 200:
r.raw.decode_content = True
with open(filepath, 'wb') as f:
shutil.copyfileobj(r.raw, f)
print("Image successfully Downloaded: ", filename)
image_url = ""
filename = ""
filepath = ""
else:
print("Image Couldn\'t be retreived")
# seal_imageDownload()
# flag_imageDownload()
# policy_download()
| [
"noernova666@gmail.com"
] | noernova666@gmail.com |
8239bdcbcbb37d192f3f3cff9af9527dbcdac038 | f66dfa2fc9bfbcc97259eb17e8b54a80727e1ce6 | /migrations/versions/061243403ebc_.py | 5a6c6f619b346ba996dd641501b2ef2f28fe2271 | [] | no_license | tam876/info3180-lab5 | 4ec8ea45e4ca513bd8cd6cf163d579b5ffba1ffa | 2fa6a57dd409788e52a1bee69c0048e843e478b7 | refs/heads/master | 2021-01-26T08:47:04.867508 | 2020-02-29T00:21:47 | 2020-02-29T00:21:47 | 243,389,967 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 908 | py | """empty message
Revision ID: 061243403ebc
Revises:
Create Date: 2020-02-28 20:57:43.760065
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '061243403ebc'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('user_profiles',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('first_name', sa.String(length=80), nullable=True),
sa.Column('last_name', sa.String(length=80), nullable=True),
sa.Column('username', sa.String(length=80), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('username')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('user_profiles')
# ### end Alembic commands ###
| [
"noreply@github.com"
] | tam876.noreply@github.com |
2428e97f642a009fcaf507f939507bb69f6dffab | 916f9880e97016fa9501c52df2cdb10ad89488ec | /Sample.py | 738afcb91b8f3940e95dec5810f61580661be9c7 | [] | no_license | jamunagithub/Sample | 61a027f7fd4d86e3224caa5a24b8bab6affd1a89 | 1539c78553d4fbab2880c7ecec169350ccf7468e | refs/heads/master | 2023-05-15T08:51:56.202120 | 2021-06-12T09:45:40 | 2021-06-12T09:45:40 | 376,249,131 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 38 | py | print ("This is the sample GIT code")
| [
"jamunamec@gmail.com"
] | jamunamec@gmail.com |
6f3c7087617984089152d4cc6b9c5fafc46b3f17 | 9d0195aa83cc594a8c61f334b90375961e62d4fe | /JTTest/SL7/CMSSW_10_2_15/src/dataRunA/nano200.py | 73b13cf0c88c6b8338ab73ca1e913d4a70757784 | [] | no_license | rsk146/CMS | 4e49592fc64f6438051544c5de18598db36ed985 | 5f8dab8c59ae556598b9747b52b88205fffc4dbe | refs/heads/master | 2022-12-01T03:57:12.126113 | 2020-08-04T03:29:27 | 2020-08-04T03:29:27 | 284,863,383 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,292 | py | # Auto generated configuration file
# using:
# Revision: 1.19
# Source: /local/reps/CMSSW/CMSSW/Configuration/Applications/python/ConfigBuilder.py,v
# with command line options: nanoAOD_jetToolbox_cff -s NANO --data --eventcontent NANOAOD --datatier NANOAOD --no_exec --conditions 102X_dataRun2_Sep2018Rereco_v1 --era Run2_2018,run2_nanoAOD_102Xv1 --customise_commands=process.add_(cms.Service('InitRootHandlers', EnableIMT = cms.untracked.bool(False))) --customise JMEAnalysis/JetToolbox/nanoAOD_jetToolbox_cff.nanoJTB_customizeMC --filein /users/h2/rsk146/JTTest/SL7/CMSSW_10_6_12/src/ttbarCutTest/dataReprocessing/0004A5E9-9F18-6B42-B31D-4206406CE423.root --fileout file:jetToolbox_nano_datatest.root
import FWCore.ParameterSet.Config as cms
from Configuration.StandardSequences.Eras import eras
process = cms.Process('NANO',eras.Run2_2018,eras.run2_nanoAOD_102Xv1)
# import of standard configurations
process.load('Configuration.StandardSequences.Services_cff')
process.load('SimGeneral.HepPDTESSource.pythiapdt_cfi')
process.load('FWCore.MessageService.MessageLogger_cfi')
process.load('Configuration.EventContent.EventContent_cff')
process.load('Configuration.StandardSequences.GeometryRecoDB_cff')
process.load('Configuration.StandardSequences.MagneticField_AutoFromDBCurrent_cff')
process.load('PhysicsTools.NanoAOD.nano_cff')
process.load('Configuration.StandardSequences.EndOfProcess_cff')
process.load('Configuration.StandardSequences.FrontierConditions_GlobalTag_cff')
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(-1)
)
# Input source
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring('file:root://cms-xrd-global.cern.ch//store/data/Run2018A/EGamma/MINIAOD/17Sep2018-v2/270000/A2A03ED2-C2C7-D446-B850-478F84233086.root'),
secondaryFileNames = cms.untracked.vstring()
)
process.options = cms.untracked.PSet(
)
# Production Info
process.configurationMetadata = cms.untracked.PSet(
annotation = cms.untracked.string('nanoAOD_jetToolbox_cff nevts:1'),
name = cms.untracked.string('Applications'),
version = cms.untracked.string('$Revision: 1.19 $')
)
# Output definition
process.NANOAODoutput = cms.OutputModule("NanoAODOutputModule",
compressionAlgorithm = cms.untracked.string('LZMA'),
compressionLevel = cms.untracked.int32(9),
dataset = cms.untracked.PSet(
dataTier = cms.untracked.string('NANOAOD'),
filterName = cms.untracked.string('')
),
fileName = cms.untracked.string('file:jetToolbox_nano_datatest200.root'),
outputCommands = process.NANOAODEventContent.outputCommands
)
# Additional output definition
# Other statements
from Configuration.AlCa.GlobalTag import GlobalTag
process.GlobalTag = GlobalTag(process.GlobalTag, '102X_dataRun2_Sep2018Rereco_v1', '')
# Path and EndPath definitions
process.nanoAOD_step = cms.Path(process.nanoSequence)
process.endjob_step = cms.EndPath(process.endOfProcess)
process.NANOAODoutput_step = cms.EndPath(process.NANOAODoutput)
# Schedule definition
process.schedule = cms.Schedule(process.nanoAOD_step,process.endjob_step,process.NANOAODoutput_step)
from PhysicsTools.PatAlgos.tools.helpers import associatePatAlgosToolsTask
associatePatAlgosToolsTask(process)
# customisation of the process.
# Automatic addition of the customisation function from PhysicsTools.NanoAOD.nano_cff
from PhysicsTools.NanoAOD.nano_cff import nanoAOD_customizeData
#call to customisation function nanoAOD_customizeData imported from PhysicsTools.NanoAOD.nano_cff
process = nanoAOD_customizeData(process)
# Automatic addition of the customisation function from JMEAnalysis.JetToolbox.nanoAOD_jetToolbox_cff
from JMEAnalysis.JetToolbox.nanoAOD_jetToolbox_cff import nanoJTB_customizeMC
#call to customisation function nanoJTB_customizeMC imported from JMEAnalysis.JetToolbox.nanoAOD_jetToolbox_cff
process = nanoJTB_customizeMC(process)
# End of customisation functions
# Customisation from command line
process.add_(cms.Service('InitRootHandlers', EnableIMT = cms.untracked.bool(False)))
# Add early deletion of temporary data products to reduce peak memory need
from Configuration.StandardSequences.earlyDeleteSettings_cff import customiseEarlyDelete
process = customiseEarlyDelete(process)
# End adding early deletion | [
"rsk146@scarletmail.rutgers.edu"
] | rsk146@scarletmail.rutgers.edu |
63c131575e15d03798d9cea07eca0474a3a6be3f | 4434118abceaad2388fb8b2e989154fea0e6c07e | /verletPygame.py | cb336f18cdd2b5bf2697ec79de23145d88a00782 | [] | no_license | Kelloggs/verletPygame | bc599803efa34ab0cbecf48be4f17c9545a91c35 | 146789e413313fa022e618f9184cf1270b7df682 | refs/heads/master | 2021-01-06T20:41:51.519263 | 2012-05-29T11:11:49 | 2012-05-29T11:11:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,857 | py | """
Verlet integration scheme for a deformable object using a mesh-based
Mass Spring System. This has just been written to test pygame.
Require: numpy, pygame
TODO:
- add less naive collision handling and response between objects
Author: Jens Cornelis
"""
from numpy import array, linalg, cross, dot
import math
import pygame
import sys
import time
#globalsb
world_size = 1000,700
world_rect = pygame.Rect(0,0, world_size[0], world_size[1])
num_iterations = 10 #iteration for constraint relaxation
pickedParticle = None
mousePosition = 0,0
frames = 25
drawVelocities = True
paused = False
class Material:
def __init__(self, stiffness=0.3, friction=0.1):
self.stiffness = stiffness
self.friction = friction
class MSSObject:
def __init__(self, vertices, indexedSprings, screen, material):
self.screen = screen
self.particles = []
self.material = material
#set up particles
for vertex in vertices:
self.particles.append(Particle(vertex, screen, self))
self.mass = len(self.particles)
#set up springs/constraints
self.constraints = []
for spring in indexedSprings:
self.constraints.append(Constraint(self.particles[spring[0]], self.particles[spring[1]]))
#initial draw
self.draw()
def setMass(self, mass):
self.mass = mass
partialMass = mass / float(len(self.particles))
for particle in self.particles:
particle.mass = partialMass
def get_rect(self):
x_min, y_min = world_size[0], world_size[1]
x_max, y_max = 0, 0
for particle in self.particles:
if particle.x[0] > x_max:
x_max = particle.x[0]
if particle.x[0] < x_min:
x_min = particle.x[0]
if particle.x[1] > y_max:
y_max = particle.x[1]
if particle.x[1] < y_min:
y_min = particle.x[1]
return pygame.Rect(x_min, y_min, x_max - x_min, y_max - y_min)
def update(self):
for particle in self.particles:
particle.update()
def draw(self):
for constraint in self.constraints:
pos1 = (constraint.p1.x[0], constraint.p1.x[1])
pos2 = (constraint.p2.x[0], constraint.p2.x[1])
pygame.draw.aaline(self.screen, (0,0,255), pos1, pos2)
for particle in self.particles:
particle.draw()
class DeformableSphere(MSSObject):
def __init__(self, center, radius, vertices, indexedSprings, screen, material=Material()):
MSSObject.__init__(self, vertices, indexedSprings, screen, material)
self.radius = radius
self.center = center
def movePointOut(self, point):
direction = point - self.center
directionLength = linalg.norm(direction)
normalizedDirection = direction/directionLength
distMax = 0
for particle in self.particles:
tmp = dot(normalizedDirection, particle.x - self.center)
if tmp > distMax:
distMax = tmp
if linalg.norm(direction) < distMax:
diff = (self.radius - directionLength)/directionLength
return direction*diff
else:
return (0,0)
class DeformableCube(MSSObject):
def __init__(self, vertices, indexedSprings, screen, material=Material()):
MSSObject.__init__(self, vertices, indexedSprings, screen, material)
raise Error("Not yet implemented")
def movePointOut(self, point):
return (0,0)
class Constraint:
def __init__(self, p1, p2):
self.p1 = p1
self.p2 = p2
self.restlength = linalg.norm(p1.x - p2.x)
class Particle:
def __init__(self, x, screen, parentObject, mass = 1.0):
#set up physical quantities
self.x = x
self.oldx = x
self.force = array([0., 0.])
self.mass = mass
self.image = pygame.image.load("sphere.png")
self.picked = False
self.parentObject = parentObject
self.velocity = array([0,0])
#set bounding volume and position
self.bv = self.image.get_rect()
self.radius = self.bv[2]/2.0
self.bv[0] = self.x[0] - self.radius
self.bv[1] = self.x[1] - self.radius
#initial drawing
self.screen = screen
self.draw()
def draw(self):
self.screen.blit(self.image, self.bv)
if drawVelocities:
pygame.draw.aaline(self.screen, (255,0,0), self.x, self.x + 0.1*self.velocity)
def update(self):
self.bv[0] = self.x[0] - self.radius
self.bv[1] = self.x[1] - self.radius
def computeForces(objects):
#add gravitational forces and friction
for obj in objects:
for particle in obj.particles:
particle.force = array([0.0, particle.mass * 9.81 * 100.0])
def computeFriction(objects):
for obj in objects:
for particle in obj.particles:
friction = particle.parentObject.material.friction
if not particle.x[0] < (world_size[0] - particle.radius):
delta = particle.x[1] - particle.oldx[1]
depth = math.fabs(world_size[0] - particle.radius - particle.x[0])
particle.oldx[1] = particle.x[1] - depth*friction*delta
if not (particle.x[0] > particle.radius):
delta = particle.x[1] - particle.oldx[1]
depth = math.fabs(particle.radius - particle.x[0])
particle.oldx[1] = particle.x[1] - depth*friction*delta
if not particle.x[1] < (world_size[1] - particle.radius):
delta = particle.x[0] - particle.oldx[0]
depth = math.fabs(world_size[1] - particle.radius - particle.x[1])
particle.oldx[0] = particle.x[0] - depth*friction*delta
if not (particle.x[1] > particle.radius):
delta = particle.x[0] - particle.oldx[0]
depth = math.fabs(particle.radius - particle.x[1])
particle.oldx[0] = particle.x[0] - depth*friction*delta
def verlet(h, objects):
for obj in objects:
for particle in obj.particles:
x = array([particle.x[0], particle.x[1]])
tmp = array([particle.x[0], particle.x[1]])
oldx = particle.oldx
a = particle.force / particle.mass
particle.x += x - oldx + a*h*h
#compute velocity as central difference of positions
particle.velocity = (particle.x - particle.oldx)/(2.0*h)
particle.oldx = tmp
def satisfyConstraints(objects):
for val in range(num_iterations):
for obj in objects:
#check and solve world collisions
for particle in obj.particles:
particle.x[0] = min(max(particle.x[0], particle.radius), world_size[0] - particle.radius)
particle.x[1] = min(max(particle.x[1], particle.radius), world_size[1] - particle.radius)
#solve constraints deformable object
for constraint in obj.constraints:
p1 = constraint.p1
p2 = constraint.p2
delta = p2.x - p1.x
deltalength = linalg.norm(delta)
diff = (deltalength - constraint.restlength)/deltalength
#make material stiffness linear to solver iterations and apply to
#particle positions
k = 1 - (1 - obj.material.stiffness)**(1.0/float(num_iterations))
p1.x += delta*0.5*diff*k
p2.x -= delta*0.5*diff*k
#constraint for picked particle to act on mouse action
if pickedParticle:
delta = pickedParticle.x - mousePosition
deltalength = linalg.norm(delta)
if deltalength > 0:
diff = (0 - deltalength)/deltalength
pickedParticle.x += delta*diff
for particle in obj.particles:
for obj2 in objects:
if obj == obj2:
continue
else:
if obj2.get_rect().collidepoint(particle.x):
particle.x += obj2.movePointOut(particle.x)
def create2DBall(screen, center, radius, particles, material):
'''Convenience method to generate a ball mesh'''
p = []
p.append(center)
for val in range(particles):
angle = val * 360./particles
tmp_x = center[0] + radius*math.cos((angle*math.pi)/180.)
tmp_y = center[1] + radius*math.sin((angle*math.pi)/180.)
p.append(array([tmp_x, tmp_y]))
c = []
for val in range(1, len(p) - 1):
c.append((val, val + 1))
c.append((0, val))
c.append((1, len(p) - 1))
c.append((0, len(p) - 1))
return DeformableSphere(center, radius, p, c, screen, material)
def create2DCube(screen, rect, material):
p = []
p.append(array([rect[0], rect[1]]))
p.append(array([rect[0], rect[1] + rect[3]]))
p.append(array([rect[0] + rect[2], rect[1]]))
p.append(array([rect[0] + rect[2], rect[1] + rect[3]]))
c = (0,1), (1,2), (2,3), (3, 0), (0,2), (1,3)
return DeformableCube(p, c, screen, material)
def main():
global pickedParticle, mousePosition, mouseClickPosition, paused
#initialization of pygame and window
pygame.init()
screen = pygame.display.set_mode(world_size)
#setting up objects
objects = []
mat1 = Material(0.9)
mat2 = Material(0.6)
sphere = create2DBall(screen, array([300., 300.]), 100., 8, mat2)
objects.append(sphere)
# sphere2 = create2DBall(screen, array([100., 100.]), 80., 7, mat1)
# objects.append(sphere2)
clock = pygame.time.Clock()
#main simulation loop
while True:
#set clock of pygame to predefined frames for equal timesteps
clock.tick(frames)
for event in pygame.event.get():
#stop the program if user wants us to
if event.type == pygame.QUIT:
sys.exit()
#flag particle as picked if user clicked on it
if event.type == pygame.MOUSEBUTTONDOWN:
for obj in objects:
for particle in obj.particles:
if particle.bv.collidepoint(event.pos):
particle.picked = True
pickedParticle = particle
if event.type == pygame.MOUSEMOTION:
mousePosition = event.pos
if event.type == pygame.MOUSEBUTTONUP:
if pickedParticle:
pickedParticle.picked = False
pickedParticle = None
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_p:
paused = not paused
if event.key == pygame.K_s:
pygame.image.save(screen, "screenshot" + str(time.time()) + ".jpg")
if paused:
continue
#clear screen with background color
screen.fill((255,255,255))
#compute external forces
computeForces(objects)
computeFriction(objects)
#compute timestep according to frame set py pygame.clock
h=frames/1000.
#do integration step and satisfy constraints
verlet(h, objects)
satisfyConstraints(objects)
#update and draw particles
for obj in objects:
obj.update()
obj.draw()
#make everything visible
pygame.display.flip()
#########################################
if __name__ == '__main__':
main()
######################################### | [
"mail@jenscornelis.de"
] | mail@jenscornelis.de |
161c51566a4e0d910527636a2197e923a1518102 | 84239d0809dca1c88a33d42e1cda225ae5512f0f | /models/models_3_2.py | dbb8dd0b87933e755fa9ddfed094e529d0f03ca4 | [] | no_license | siebeniris/Understanding-NN | 92e2e9662d9d56e2946dec151d9d8f13bb3ae776 | a6d1553aea8e137827a7b909461664c87f1db238 | refs/heads/master | 2021-05-10T22:43:29.609052 | 2018-01-20T06:05:20 | 2018-01-20T06:05:20 | 118,264,703 | 1 | 0 | null | 2018-01-20T17:25:00 | 2018-01-20T17:25:00 | null | UTF-8 | Python | false | false | 7,861 | py | from tensorflow.python.ops import nn_ops, gen_nn_ops
import tensorflow as tf
class MNIST_CNN:
def __init__(self, name):
self.name = name
def __call__(self, X, reuse=False):
with tf.variable_scope(self.name) as scope:
if reuse:
scope.reuse_variables()
with tf.variable_scope('layer0'):
X_img = tf.reshape(X, [-1, 28, 28, 1])
# Convolutional Layer #1 and Pooling Layer #1
with tf.variable_scope('layer1'):
conv1 = tf.layers.conv2d(inputs=X_img, filters=32, kernel_size=[3, 3], padding="SAME", activation=tf.nn.relu, use_bias=False)
pool1 = tf.layers.max_pooling2d(inputs=conv1, pool_size=[2, 2], padding="SAME", strides=2)
# Convolutional Layer #2 and Pooling Layer #2
with tf.variable_scope('layer2'):
conv2 = tf.layers.conv2d(inputs=pool1, filters=64, kernel_size=[3, 3], padding="SAME", activation=tf.nn.relu, use_bias=False)
pool2 = tf.layers.max_pooling2d(inputs=conv2, pool_size=[2, 2], padding="SAME", strides=2)
# Convolutional Layer #3 and Pooling Layer #3
with tf.variable_scope('layer3'):
conv3 = tf.layers.conv2d(inputs=pool2, filters=128, kernel_size=[3, 3], padding="SAME", activation=tf.nn.relu, use_bias=False)
pool3 = tf.layers.max_pooling2d(inputs=conv3, pool_size=[2, 2], padding="SAME", strides=2)
# Dense Layer with Relu
with tf.variable_scope('layer4'):
flat = tf.reshape(pool3, [-1, 128 * 4 * 4])
dense4 = tf.layers.dense(inputs=flat, units=625, activation=tf.nn.relu, use_bias=False)
# Logits (no activation) Layer: L5 Final FC 625 inputs -> 10 outputs
with tf.variable_scope('layer5'):
logits = tf.layers.dense(inputs=dense4, units=10, use_bias=False)
prediction = tf.nn.softmax(logits)
return [X_img, conv1, pool1, conv2, pool2, conv3, pool3, flat, dense4, prediction], logits
@property
def vars(self):
return tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=self.name)
class Taylor:
def __init__(self, activations, weights, conv_ksize, pool_ksize, conv_strides, pool_strides, name):
self.last_ind = len(activations)
for op in activations:
self.last_ind -= 1
if any([word in op.name for word in ['conv', 'pooling', 'dense']]):
break
self.activations = activations
self.activations.reverse()
self.weights = weights
self.weights.reverse()
self.conv_ksize = conv_ksize
self.pool_ksize = pool_ksize
self.conv_strides = conv_strides
self.pool_strides = pool_strides
self.name = name
def __call__(self, logit):
with tf.name_scope(self.name):
Rs = []
j = 0
for i in range(len(self.activations) - 1):
if i is self.last_ind:
if 'conv' in self.activations[i].name.lower():
Rs.append(self.backprop_conv_input(self.activations[i + 1], self.weights[j], Rs[-1], self.conv_strides))
else:
Rs.append(self.backprop_dense_input(self.activations[i + 1], self.weights[j], Rs[-1]))
continue
if i is 0:
Rs.append(self.activations[i][:,logit,None])
Rs.append(self.backprop_dense(self.activations[i + 1], self.weights[j][:,logit,None], Rs[-1]))
j += 1
continue
elif 'dense' in self.activations[i].name.lower():
Rs.append(self.backprop_dense(self.activations[i + 1], self.weights[j], Rs[-1]))
j += 1
elif 'reshape' in self.activations[i].name.lower():
shape = self.activations[i + 1].get_shape().as_list()
shape[0] = -1
Rs.append(tf.reshape(Rs[-1], shape))
elif 'conv' in self.activations[i].name.lower():
Rs.append(self.backprop_conv(self.activations[i + 1], self.weights[j], Rs[-1], self.conv_strides))
j += 1
elif 'pooling' in self.activations[i].name.lower():
# Apply average pooling backprop regardless of type of pooling layer used, following recommendations by Montavon et al.
# Uncomment code below if you want to apply the winner-take-all redistribution policy suggested by Bach et al.
#
# if 'max' in self.activations[i].name.lower():
# pooling_type = 'max'
# else:
# pooling_type = 'avg'
# Rs.append(self.backprop_pool(self.activations[i + 1], Rs[-1], self.pool_ksize, self.pool_strides, pooling_type))
Rs.append(self.backprop_pool(self.activations[i + 1], Rs[-1], self.pool_ksize, self.pool_strides, 'avg'))
else:
raise Error('Unknown operation.')
return Rs[-1]
def backprop_conv(self, activation, kernel, relevance, strides, padding='SAME'):
W_p = tf.maximum(0., kernel)
z = nn_ops.conv2d(activation, W_p, strides, padding) + 1e-10
s = relevance / z
c = nn_ops.conv2d_backprop_input(tf.shape(activation), W_p, s, strides, padding)
return activation * c
def backprop_pool(self, activation, relevance, ksize, strides, pooling_type, padding='SAME'):
if pooling_type.lower() in 'avg':
z = nn_ops.avg_pool(activation, ksize, strides, padding) + 1e-10
s = relevance / z
c = gen_nn_ops._avg_pool_grad(tf.shape(activation), s, ksize, strides, padding)
return activation * c
else:
z = nn_ops.max_pool(activation, ksize, strides, padding) + 1e-10
s = relevance / z
c = gen_nn_ops._max_pool_grad(activation, z, s, ksize, strides, padding)
return activation * c
def backprop_dense(self, activation, kernel, relevance):
W_p = tf.maximum(0., kernel)
z = tf.matmul(activation, W_p) + 1e-10
s = relevance / z
c = tf.matmul(s, tf.transpose(W_p))
return activation * c
def backprop_conv_input(self, X, kernel, relevance, strides, padding='SAME', lowest=0., highest=1.):
W_p = tf.maximum(0., kernel)
W_n = tf.minimum(0., kernel)
L = tf.ones_like(X, tf.float32) * lowest
H = tf.ones_like(X, tf.float32) * highest
z_o = nn_ops.conv2d(X, kernel, strides, padding)
z_p = nn_ops.conv2d(L, W_p, strides, padding)
z_n = nn_ops.conv2d(H, W_n, strides, padding)
z = z_o - z_p - z_n + 1e-10
s = relevance / z
c_o = nn_ops.conv2d_backprop_input(tf.shape(X), kernel, s, strides, padding)
c_p = nn_ops.conv2d_backprop_input(tf.shape(X), W_p, s, strides, padding)
c_n = nn_ops.conv2d_backprop_input(tf.shape(X), W_n, s, strides, padding)
return X * c_o - L * c_p - H * c_n
def backprop_dense_input(self, X, kernel, relevance, lowest=0., highest=1.):
W_p = tf.maximum(0., kernel)
W_n = tf.minimum(0., kernel)
L = tf.ones_like(X, tf.float32) * lowest
H = tf.ones_like(X, tf.float32) * highest
z_o = tf.matmul(X, kernel)
z_p = tf.matmul(L, W_p)
z_n = tf.matmul(H, W_n)
z = z_o - z_p - z_n + 1e-10
s = relevance / z
c_o = tf.matmul(s, tf.transpose(kernel))
c_p = tf.matmul(s, tf.transpose(W_p))
c_n = tf.matmul(s, tf.transpose(W_n))
return X * c_o - L * c_p - H * c_n
| [
"1202kbs@gmail.com"
] | 1202kbs@gmail.com |
1d13b98d948da4230d205362cbabd0696af80cf6 | 2c0e1786044c2818be20062a1c8f75990c61ae26 | /argparse/5_conflicting_options.py | 55541fc040e8ba860bdcc2e766342e231fbcbf87 | [] | no_license | jukim-greenventory/python-practice | 51802bf354c36049f41f539778f576d7e9560305 | 4a1bb17c08204edf6954196c29f6e40a88274ef6 | refs/heads/master | 2023-05-31T21:15:37.770161 | 2021-06-10T12:38:57 | 2021-06-10T12:38:57 | 375,691,460 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 967 | py | """
Letโs introduce a third one, add_mutually_exclusive_group().
It allows for us to specify options that conflict with each other.
Letโs also change the rest of the program so that the new functionality makes more sense:
weโll introduce the --quiet option, which will be the opposite of the --verbose one:
"""
import argparse
parser = argparse.ArgumentParser(description="calculate X to the power of Y")
group = parser.add_mutually_exclusive_group()
group.add_argument("-v", "--verbose", action="store_true")
group.add_argument("-q", "--quiet", action="store_true")
parser.add_argument("x", type=int, help="the base")
parser.add_argument("y", type=int, help="the exponent")
args = parser.parse_args()
answer = args.x ** args.y
print("Running '{}'".format(__file__))
if args.quiet:
print(answer)
elif args.verbose:
print("{} to the power {} equals {}".format(args.x, args.y, answer))
else:
print("{}^{} == {}".format(args.x, args.y, answer))
| [
"junseok.kim@greenventory.de"
] | junseok.kim@greenventory.de |
b20893d703e00928bbb7b86e4af0fa4b71d78cca | e569c41ec81382630693d3bc0a163c06a6a23d52 | /PythonProgramming/ICP3/Source/3.py | a3abe312b2325a5978f48b2d5217e9b574de48a9 | [] | no_license | Sravanthi-Gogadi/PythonDeeplearningCourse | 20a074763283c3bdbcbc3846576509c5e7a733e9 | 037e94f19362635dd6911cdbd70f60830ec51f5c | refs/heads/master | 2020-03-19T08:21:18.901163 | 2018-07-28T04:39:00 | 2018-07-28T04:39:00 | 136,197,623 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,204 | py | from bs4 import BeautifulSoup
import urllib.request
import pandas as pd
import csv
# using tabulate to display the pandas dataframe
from tabulate import tabulate
# Read the web url into a variable
url = "https://en.wikipedia.org/wiki/List_of_state_and_union_territory_capitals_in_India"
# use urllib to open the url
res = urllib.request.urlopen(url)
plain_text = res
# Use beautiful soup to get the content of webpage
soup = BeautifulSoup(plain_text, "html.parser")
# Print the title of the web page
print(soup.find('title').string)
# Print all the anchor tags in the webpage
result_list = soup.findAll('a')
# Print the text of href
for i in result_list:
link = i.get('href')
print(link)
# Read the table from webpage
result_table = soup.findAll('table', {'class': 'wikitable sortable plainrowheaders'})
for tr in result_table:
table_data = tr.findAll('td')
table_head = tr.findAll('th')
# Print td and th
print(table_data, table_head)
# To display the list of union territories
table = soup.find_all('table')[1]
# using pandas object read the table and assign header
df = pd.read_html(str(table),header=0)
# display the output
print( tabulate(df[0], headers='keys', tablefmt='psql') )
| [
"sravanthigogadi@gmail.com"
] | sravanthigogadi@gmail.com |
fafedd086eb52ca3a26667cd17b01a87c8ca5b04 | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_155/791.py | f4c857964fa46a84265cc71f3b483d20abda438d | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 538 | py | __author__ = 'rrampage'
t = int(input())
def input_format():
s = input().split()[1]
return [int(i) for i in s]
def ovation(aud):
extras = 0
tot_standing = 0
for i, a in enumerate(aud):
if a == 0:
continue
if tot_standing >= i:
tot_standing += a
else:
extras += (i - tot_standing)
tot_standing += (i - tot_standing)
tot_standing += a
return extras
for x in range(t):
print("Case #%d: %d" % (x+1, ovation(input_format()))) | [
"miliar1732@gmail.com"
] | miliar1732@gmail.com |
08028b087d65af74817e4362ee88f3cb8f285acb | ece03546e09b3880222598a6b3955281341283ae | /Scrapy/DouyuPicture/DouyuPicture/settings.py | df93d3fce09547f77e38fb6afd1e5729ab471bba | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | haochen95/python_tutorial | e24bdf603183793680233499adcc94c10d7e90da | ec02114a40b6c42fc54a5504b79d56f4ed1abef8 | refs/heads/master | 2020-04-18T03:26:19.331623 | 2019-02-26T04:47:14 | 2019-02-26T04:47:14 | 167,198,727 | 0 | 3 | Apache-2.0 | 2019-01-28T07:20:59 | 2019-01-23T14:42:05 | Jupyter Notebook | UTF-8 | Python | false | false | 3,337 | py | # -*- coding: utf-8 -*-
# Scrapy settings for DouyuPicture project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# https://doc.scrapy.org/en/latest/topics/settings.html
# https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
# https://doc.scrapy.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'DouyuPicture'
SPIDER_MODULES = ['DouyuPicture.spiders']
NEWSPIDER_MODULE = 'DouyuPicture.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'DouyuPicture (+http://www.yourdomain.com)'
# Obey robots.txt rules
ROBOTSTXT_OBEY = True
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See https://doc.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
#COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
DEFAULT_REQUEST_HEADERS = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98',
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
}
# Enable or disable spider middlewares
# See https://doc.scrapy.org/en/latest/topics/spider-middleware.html
# SPIDER_MIDDLEWARES = {
# 'DouyuPicture.middlewares.DouyupictureSpiderMiddleware': 543,
# }
IMAGE_STORE = "C:/Users/haoch/Desktop/Programming/Python/Scrapy_project01/Image/"
# Enable or disable downloader middlewares
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
# 'DouyuPicture.middlewares.DouyupictureDownloaderMiddleware': 543,
#}
# Enable or disable extensions
# See https://doc.scrapy.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See https://doc.scrapy.org/en/latest/topics/item-pipeline.html
ITEM_PIPELINES = {
'DouyuPicture.pipelines.DouyupicturePipeline': 300,
}
# Enable and configure the AutoThrottle extension (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
| [
"haochen273@gmail.com"
] | haochen273@gmail.com |
e5d1427da5952429304f092fff6d772d00a430d1 | 2865d34e84abea09836c9a84e1aa02ba262b8f6d | /Distances/superior.py | f02f8ccc65fe2fee530e93820de28977d1106921 | [] | no_license | magabydelgado/numpy-formulas | f52119ef1387f078e1527c80343ca0de2336bc9f | 093657d4a23dfe82685595254aae50e0c6e46afb | refs/heads/main | 2023-05-08T14:06:48.142258 | 2021-05-25T06:16:41 | 2021-05-25T06:16:41 | 379,125,857 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 894 | py | import numpy as np
'''
In mathematics, Chebyshev distance (or Tchebychev distance), maximum metric,
or Lโ metric is a metric defined on a vector space where
the distance between two vectors is the greatest of their differences
along any coordinate dimension.[2] It is named after Pafnuty Chebyshev.
It is also known as chessboard distance, since in the game of chess the minimum number of
moves needed by a king to go from one square on a chessboard to another equals the
Chebyshev distance between the centers of the squares, if the squares have side length one,
as represented in 2-D spatial coordinates with axes aligned to the edges of the board.
'''
objA = [22, 1, 42, 10]
objB = [20, 0, 36, 8]
npA = np.array(objA)
npB = np.array(objB)
chebyshev = np.abs(npA - npB).max()
# chebyshev = np.linalg.norm(npA -npB, ord=np.inf)
print(chebyshev) | [
"mangelladen@gmail.com"
] | mangelladen@gmail.com |
21cf2eb653fc11c07a6ebf96569ea5090c294c25 | a42d240a05ddb7e77f9cd517451fde2c82d5156b | /Problem-089.py | f624636ca0bd14d29e71b74d507211f619c0500b | [] | no_license | spirosrap/Project-Euler | 3d7edc05c677a5edfa084308380839e2c018157e | 83c2a2467b15426216483bfa34aeeb7a21728a16 | refs/heads/master | 2016-09-06T08:07:18.383648 | 2013-11-08T13:19:15 | 2013-11-08T13:19:15 | 3,463,916 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,053 | py | import math
import re
#Define exceptions
class RomanError(Exception): pass
class OutOfRangeError(RomanError): pass
class NotIntegerError(RomanError): pass
class InvalidRomanNumeralError(RomanError): pass
#Define digit mapping
romanNumeralMap = (('M', 1000),
('CM', 900),
('D', 500),
('CD', 400),
('C', 100),
('XC', 90),
('L', 50),
('XL', 40),
('X', 10),
('IX', 9),
('V', 5),
('IV', 4),
('I', 1))
def toRoman(n):
"""convert integer to Roman numeral"""
if not (0 < n < 5000):
raise OutOfRangeError, "number out of range (must be 1..4999)"
if int(n) != n:
raise NotIntegerError, "decimals can not be converted"
result = ""
for numeral, integer in romanNumeralMap:
while n >= integer:
result += numeral
n -= integer
return result
#Define pattern to detect valid Roman numerals
romanNumeralPattern = re.compile("""
^ # beginning of string
M{0,4} # thousands - 0 to 4 M's
(CM|CD|D?C{0,3}) # hundreds - 900 (CM), 400 (CD), 0-300 (0 to 3 C's),
# or 500-800 (D, followed by 0 to 3 C's)
(XC|XL|L?X{0,3}) # tens - 90 (XC), 40 (XL), 0-30 (0 to 3 X's),
# or 50-80 (L, followed by 0 to 3 X's)
(IX|IV|V?I{0,3}) # ones - 9 (IX), 4 (IV), 0-3 (0 to 3 I's),
# or 5-8 (V, followed by 0 to 3 I's)
$ # end of string
""" ,re.VERBOSE)
def fromRoman(s):
"""convert Roman numeral to integer"""
if not s:
raise InvalidRomanNumeralError, 'Input can not be blank'
if not romanNumeralPattern.search(s):
raise InvalidRomanNumeralError, 'Invalid Roman numeral: %s' % s
result = 0
index = 0
for numeral, integer in romanNumeralMap:
while s[index:index+len(numeral)] == numeral:
result += integer
index += len(numeral)
return result
def int_to_roman(input):
"""
Convert an integer to Roman numerals.
Examples:
>>> int_to_roman(0)
Traceback (most recent call last):
ValueError: Argument must be between 1 and 3999
>>> int_to_roman(-1)
Traceback (most recent call last):
ValueError: Argument must be between 1 and 3999
>>> int_to_roman(1.5)
Traceback (most recent call last):
TypeError: expected integer, got <type 'float'>
>>> for i in range(1, 21): print int_to_roman(i)
...
I
II
III
IV
V
VI
VII
VIII
IX
X
XI
XII
XIII
XIV
XV
XVI
XVII
XVIII
XIX
XX
>>> print int_to_roman(2000)
MM
>>> print int_to_roman(1999)
MCMXCIX
"""
if type(input) != type(1):
raise TypeError, "expected integer, got %s" % type(input)
ints = (1000, 900, 500, 400, 100, 90, 50, 40, 10, 9, 5, 4, 1)
nums = ('M', 'CM', 'D', 'CD','C', 'XC','L','XL','X','IX','V','IV','I')
result = ""
for i in range(len(ints)):
count = int(input / ints[i])
result += nums[i] * count
input -= ints[i] * count
return result
def number(char):
if char=='M':
return 1000
elif char=='D':
return 500
elif char=='C':
return 100
elif char=='L':
return 50
elif char=='X':
return 10
elif char=='V':
return 5
elif char=='I':
return 1
else:
return 'error'
def romanToNumber(s):
sum=0
previous=10**7
for char in s:
if previous<number(char):
sum+=number(char)-2*previous
previous=number(char)
else:
sum+=number(char)
previous=number(char)
return sum
print romanToNumber('MMMMCCCXIV')
print romanToNumber('MMDCCLXIX')
print romanToNumber('CMLXXXVII')
print toRoman(romanToNumber('MMMMCCCLXXXXVII'))
lines = [line.strip() for line in open('roman.txt')]
print lines
sum=0
for s in lines:
sum+=len(s)-len(toRoman(romanToNumber(s)))
print sum
| [
"spirosrap@gmail.com"
] | spirosrap@gmail.com |
1f0050636b553377350ef958e53062abe0a0aec4 | 2db7597686f33a0d700f7082e15fa41f830a45f0 | /Python/String/266. ๅๆๆๅ.py | 2dba117a4cfd0caece5666e521229f85abe7fe4f | [] | no_license | Leahxuliu/Data-Structure-And-Algorithm | 04e0fc80cd3bb742348fd521a62bc2126879a70e | 56047a5058c6a20b356ab20e52eacb425ad45762 | refs/heads/master | 2021-07-12T23:54:17.785533 | 2021-05-17T02:04:41 | 2021-05-17T02:04:41 | 246,514,421 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 468 | py | '''
ๅฅๆฐไธช็charๆๅคๅช่ฝๆไธไธช
'''
from collections import defaultdict
class Solution:
def canPermutePalindrome(self, s: str) -> bool:
if s == '':
return True
info = defaultdict(int)
for i in s:
info[i] += 1
count = 0
for v in info.values():
if v % 2 == 1:
count += 1
if count >= 2:
return False
return True | [
"leahxuliu@gmail.com"
] | leahxuliu@gmail.com |
e432f4e76d689a36074aaa8adfdda869d6809a85 | 491c298283c3af8ca5188e7191758512b758fdc7 | /examples/ex_pyside.py | 4f9f43350bb17fa137396aac3a0d85c186502fe3 | [
"BSD-3-Clause"
] | permissive | merydwin/idascrtipt | 0bda6f2253dd94698a82cb09a7a1855cbced6221 | 431e04847e55adbb1d263aa2aadc2d489d068f50 | refs/heads/master | 2021-01-22T04:48:40.353877 | 2015-03-02T15:54:20 | 2015-03-02T15:54:20 | 38,051,147 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 844 | py | from idaapi import PluginForm
from PySide import QtGui, QtCore
class MyPluginFormClass(PluginForm):
def OnCreate(self, form):
"""
Called when the plugin form is created
"""
# Get parent widget
self.parent = self.FormToPySideWidget(form)
self.PopulateForm()
def PopulateForm(self):
# Create layout
layout = QtGui.QVBoxLayout()
layout.addWidget(
QtGui.QLabel("Hello from <font color=red>PySide</font>"))
layout.addWidget(
QtGui.QLabel("Hello from <font color=blue>IDAPython</font>"))
self.parent.setLayout(layout)
def OnClose(self, form):
"""
Called when the plugin form is closed
"""
pass
plg = MyPluginFormClass()
plg.Show("PySide hello world")
| [
"elias.bachaalany@fccdda4b-c33c-0410-84de-61e1e3e5f415"
] | elias.bachaalany@fccdda4b-c33c-0410-84de-61e1e3e5f415 |
6947929c742bc0792eea07204e55f54a00bbcc60 | 32df7046ccf6ef2dd9b3148c390149f7557101f6 | /Porthole_Detection/Data_to_Image.py | 92b33630fe62b40cc422c8cfec351cef1c485aa5 | [] | no_license | MLJejuCamp2017/Pothole_Detection_using_CNN | 06f849bf9b78b11acf0ef1ec7a75bd9db559e6f5 | 33a6b58837fc36a2d4e04a14d28376a3a456a790 | refs/heads/master | 2021-01-01T18:36:04.602634 | 2017-07-25T06:23:44 | 2017-07-25T06:23:44 | 98,374,940 | 1 | 3 | null | null | null | null | UTF-8 | Python | false | false | 2,028 | py | # ํ์ผ ํ๋๋ง ๋ฐ๊ฟ์ค
'''
import numpy as np
from scipy.misc import toimage
x = np.loadtxt("/Users/User/PycharmProjects/network/ML_Camp/Porthole_Detection/all.csv", delimiter=',')
# toimage(x).show()
toimage(x).save('all(grayscale).jpg')
'''
'''
# ๋๋ ํ ๋ฆฌ ๋ด์ ํ์ผ๋ค์ ํ๋ฒ์ ์ผ๊ด ๋ณํ ์ ๊ฐ์ฉ๋ค ๋ ๋
ธ๊ฐ๋คํ๋๋ฐ ใ
ใ
์ง์์ ํ ๊ฑธ
import os
import numpy as np
from scipy.misc import toimage
path = "/Users/User/OneDrive/์ผ์๋ก๊ทธ/์์ ๊ฑฐ/ํฌํธํ/csv/๋ค๋ฌ๋ค๋ฌ/"
dirs = os.listdir(path)
def convert():
for item in dirs:
if os.path.isfile(path+item):
print(path+item)
x = np.loadtxt(path+item, delimiter=',')
f, e = os.path.splitext(path+item)
toimage(x).save(f + '.jpg')
convert()
'''
# ์คํํธ๋ผ ์ด๋ฏธ์ง๋ก ์ผ๊ด ๋ณํ
# '''
import matplotlib.pyplot as plt
import stft
import os
import numpy as np
path = "/Users/User/OneDrive/์ผ์๋ก๊ทธ/์์ ๊ฑฐ/ํฌํธํ/csv/๋ค๋ฌ๋ค๋ฌ/"
dirs = os.listdir(path)
def convert():
for item in dirs:
if os.path.isfile(path+item):
print(path+item)
x = np.loadtxt(path+item, delimiter=',', unpack=True, dtype='float32')
f, e = os.path.splitext(path+item)
z_data = np.transpose(x[2])
# specgram_z = stft.spectrogram(z_data)
specgram_z = stft.spectrogram(z_data, window=0.4)
plt._imsave(f + '.jpg', abs(specgram_z), vmin=-40, vmax=40, cmap=plt.get_cmap('coolwarm'), format='jpg') # gray Wistia
convert()
# '''
# ํ์ผ ํ๋๋ง ์คํํธ๋ผ ์ด๋ฏธ์ง๋ก ๋ฐ๊ฟ์ค
'''
import matplotlib.pyplot as plt
import stft
import numpy as np
x = np.loadtxt("/Users/User/PycharmProjects/network/ML_Camp/Porthole_Detection/all.csv", delimiter=',', unpack=True)
# toimage(x).show()
z_data = np.transpose(x[2])
specgram_z = stft.spectrogram(z_data, window=0.4)
plt._imsave('all(test).jpg', abs(specgram_z), vmin=-40, vmax=40, cmap=plt.get_cmap('coolwarm'), format='jpg')
# '''
| [
"chzhqk1994@gmail.com"
] | chzhqk1994@gmail.com |
3614b892b438862adb7730b5927fba103d610fdd | 2fdb9f2b2f3ffc13a04de7a13e3f177d88e85798 | /likes/templatetags/likes_tags.py | e4631e6e8eda65d969f91b1b3a7714083e2f1232 | [] | no_license | dyr201500800475/web_novels | 4d0eca0dbe7b1eba75bfc203361caa324d43eaad | df5daafd4661ede64554f19a074bd0581113f4b9 | refs/heads/master | 2020-05-09T15:59:55.266787 | 2019-04-16T01:58:34 | 2019-04-16T01:58:34 | 181,253,766 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 925 | py | from django import template
from django.contrib.contenttypes.models import ContentType
from ..models import LikeCount, LikeRecord
register = template.Library()
# ่ทๅ็น่ตๆฐ
@register.simple_tag
def get_like_count(obj):
content_type = ContentType.objects.get_for_model(obj)
like_count, created = LikeCount.objects.get_or_create(content_type=content_type, object_id=obj.pk)
return like_count.liked_num
# ่ทๅ็น่ต็ถๆ
@register.simple_tag(takes_context=True)
def get_like_status(context, obj):
content_type = ContentType.objects.get_for_model(obj)
user=context['user']
if not user.is_authenticated:
return ''
if LikeRecord.objects.filter(content_type=content_type, object_id=obj.pk, user=user).exists():
return 'active'
else:
return ''
# ่ทๅ็น่ตๅฏน่ฑก็็ฑปๅ
@register.simple_tag
def get_content_type(obj):
content_type = ContentType.objects.get_for_model(obj)
return content_type.model
| [
"870850834@qq.com"
] | 870850834@qq.com |
8a8eb30d68328005ec249519efc1016a86616c7f | 45bfeba3abab88eeb08b54946a8729d0152a22cc | /src/python/codechef/JAN19B/DPAIRS.py | 0e5f5b1b7ae76609189e196aac939bfee444999f | [
"MIT"
] | permissive | duke79/compro | c32ee2aca9b5adf2d62e18fa8822736821148b0b | a5577e043888df513a78a94a93ed5d08bc6ad2cd | refs/heads/master | 2022-06-13T22:09:52.451149 | 2022-06-12T05:15:28 | 2022-06-12T05:21:44 | 165,487,247 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 623 | py | N, M = input().split(" ")
N = int(N)
M = int(M)
A = input().split(" ")
A = [int(elem) for elem in A]
B = input().split(" ")
B = [int(elem) for elem in B]
A_sorted = [i[0] for i in sorted(enumerate(A), key=lambda x: x[1])]
B_sorted = [i[0] for i in sorted(enumerate(B), key=lambda x: x[1])]
A_i = 0
B_i = 0
As_turn = True
# print(A_sorted)
# print(B_sorted)
while len(A_sorted) > A_i and len(B_sorted) > B_i:
print("%s %s" % (A_sorted[A_i], B_sorted[B_i]))
if As_turn:
# print("As_turn")
As_turn = False
A_i += 1
else:
# print("Bs_turn")
As_turn = True
B_i += 1
| [
"pulkitsingh01@gmail.com"
] | pulkitsingh01@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.