blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 5
133
| path
stringlengths 2
333
| src_encoding
stringclasses 30
values | length_bytes
int64 18
5.47M
| score
float64 2.52
5.81
| int_score
int64 3
5
| detected_licenses
listlengths 0
67
| license_type
stringclasses 2
values | text
stringlengths 12
5.47M
| download_success
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
e6e7c893aed1a560826d905feab545ef4384fdb1
|
Python
|
ningCherry/django_test
|
/ttsx2/booktest/views.py
|
UTF-8
| 3,012
| 2.734375
| 3
|
[] |
no_license
|
from django.shortcuts import render,redirect
from django.http import HttpResponse,HttpResponseRedirect
# Create your views here.
def index(request):
return HttpResponse('hello cherry!')
def detail(request,p):
return HttpResponse(p)
def detail1(request,p1,p2,p3):
return HttpResponse('years-{},month-{},day-{}'.format(p1,p2,p3))
#get属性练习
#展示链接的页面
def getTest1(request):
return render(request,'booktest/getTest1.html')
#接收一键一值得情况
def getTest2(request):
# 根据键接收值
a1=request.GET['a'] #只能获取键的一个值
b1=request.GET['b']
c1=request.GET['c']
#构造上下文
context={'a':a1,'b':b1,'c':c1}
#向模板中传递上下文,并进行渲染
return render(request,'booktest/getTest2.html',context)
#接收一键多值得情况
def getTest3(request):
a1=request.GET.getlist('a') #getlist():根据键获取值,将键的值以列表返回,可以获取一个键的多个值
context={'a':a1}
return render(request,'booktest/getTest3.html',context)
#post属性练习
def postTest1(request):
return render(request,'booktest/postTest1.html')
def postTest2(request):
name=request.POST['uname'] #获取uname的值
pwd=request.POST['upwd']
gender=request.POST.get('ugender') #等价于request.Post['ugender']
hobby=request.POST.getlist('uhobby') #获取uhhoby所有值
context={'name':name,'pwd':pwd,'gender':gender,'hobby':hobby}
return render(request,'booktest/postTest2.html',context)
#cookie练习
def cookieTest(request):
response=HttpResponse()
cookie=request.COOKIES #获取所有cookie信息
if 't1' in cookie: #判断cookie是否有键t1
response.write(cookie['t1']) #如果有则获取t1的值
else:
response.set_cookie('t1','abc') #如果没有则设置Cookie
return response
#重定向练习
def redTest1(request):
# return HttpResponseRedirect('/booktest/redTest2/')
return redirect('/booktest/redTest2/')
def redTest2(request):
return HttpResponse('这是重定向的页面')
#通过用户登录练习session
#首页
def session1(request):
# uname=request.session['uname'] #没获取到uname的值会报错
uname=request.session.get('uname','未登录') #根据键获取会话的值,没获取到uname的值不会报错
context={'uname':uname}
return render(request,'booktest/session1.html',context)
#登录页
def session2(request):
return render(request,'booktest/session2.html')
def session2_handle(request):
uname=request.POST['uname'] #获取uname的值
request.session['uname']=uname #设置session
# request.session.set_expiry(0) #设置会话的超时时间,value为0,那么用户会话的Cookie将在用户的浏览器关闭时过期
return redirect('/booktest/session1/') #重定向
#退出
def session3(request):
#删除session
del request.session['uname']
return redirect('/booktest/session1/')
| true
|
dcf9b5ae1489bb6e201a9a28dc15d8b40f725ce4
|
Python
|
mabioo/Python-codewar
|
/20.求整数的字母转化/test.py
|
UTF-8
| 1,358
| 3.59375
| 4
|
[] |
no_license
|
def factorial(n):
S = 1
while n>0:
S=S*n
n = n-1
return S
def dec2FactString(nb):
i = 1
targetList = ""
while (nb >= factorial(i)):
i = i+1
i = i-1
print (i)
while i>=0:
j = 1
while nb>=factorial(i)*j:
j = j+1
j =j-1
nb = nb -factorial(i)*(j)
i = i-1
if j>35:
j = j-35
if(j >=10):
t = chr(ord("A")+j-10)
else:
t = str(j)
targetList= targetList+ t
return targetList
def factString2Dec(string):
j =0
Sums = 0
for i in range(len(string)-1,-1,-1):
temp =string[i]
if ord(temp)>=65:
temp = ord(temp) - ord("A") +10
Sums = Sums + factorial(j)*int(temp)
j = j+1
return Sums
#the best solutions:
Base36='0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ'
def factString2Decs(string):
l,r=len(string)-1,0
for i in range(0,l):
r=(r+Base36.index(string[i]))*(l-i)
return r
def dec2FactStrings(nb):
l=['0']
c,tmp=2, 0
while(nb!=0):
tmp=nb%c
l.insert(0,Base36[tmp])
nb=(nb-tmp)/c
c+=1
return ''.join(l)
if __name__ =="__main__":
print (dec2FactString(23))
#print (factorial(12))
print (factString2Dec("3210"))
#print (dec2FactString(9))
print (factString2Decs("3210"))
| true
|
8ed82d568191c9cb7296aa60ae84ef283ea82ca3
|
Python
|
26tanishabanik/Interview-Coding-Questions
|
/Strings/ScoreOfParenthesis.py
|
UTF-8
| 740
| 4.09375
| 4
|
[
"MIT"
] |
permissive
|
"""
Given a balanced parentheses string s, compute the score of the string based on the following rule:
() has score 1
AB has score A + B, where A and B are balanced parentheses strings.
(A) has score 2 * A, where A is a balanced parentheses string.
"""
def scoreOfParentheses(s) -> int:
stack = []
ans = 0
for i in s:
if i == '(':
stack.append('(')
else:
res = 0
count = 1
j = len(stack)-1
while stack[j] != '(':
res += stack[j]
stack.pop()
j -= 1
res *= 2
if res == 0:
res = 1
stack[-1] = res
for i in stack:
ans += i
return ans
| true
|
1f899320f5038d20473df3cced4bc2d3d3260fa9
|
Python
|
tonngw/leetcode
|
/python/1299-replace-elements-with-greatest-element-on-right-side.py
|
UTF-8
| 265
| 2.765625
| 3
|
[
"MIT"
] |
permissive
|
class Solution:
def replaceElements(self, arr: List[int]) -> List[int]:
rightMax = -1
for i in range(len(arr) -1, -1, -1):
newMax = max(rightMax, arr[i])
arr[i] = rightMax
rightMax = newMax
return arr
| true
|
f38dff6fdee210c53d9ff94f987ef88ac0705b55
|
Python
|
frohman04/advent-2017
|
/18/main_181.py
|
UTF-8
| 6,340
| 2.84375
| 3
|
[] |
no_license
|
import abc
import fileinput
import logging
import sys
from typing import List
import unittest
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
class RegisterFile(object):
def __init__(self):
self._file = {
'pc': 0,
'snd': 0
}
self._file.update({(chr(i), 0) for i in range(ord('a'), ord('z') + 1)})
def next(self):
self._file['pc'] += 1
def jump(self, to: int):
self._file['pc'] = to
def pc(self) -> int:
return self._file['pc']
def send(self, val: int):
self._file['snd'] = val
def receive(self) -> int:
return self._file['snd']
def set(self, reg: str, val: int):
self._file[reg] = val
def get(self, reg: str) -> int:
return self._file[reg]
def __str__(self):
return 'RegisterFile(pc={}, snd={}, rf={})'.format(
self._file['pc'],
self._file['snd'],
{(key, value) for key, value in self._file.items()
if key not in {'pc', 'snd'} and value != 0})
class Op(object, metaclass=abc.ABCMeta):
@abc.abstractmethod
def perform(self, rf: RegisterFile):
pass
class RegOp(Op):
def __init__(self, reg: str):
self._reg = reg
def __repr__(self):
return '{}({})'.format(type(self).__name__, self._reg)
def __str__(self):
return repr(self)
def __eq__(self, other):
return type(self) == type(other) and self._reg == other._reg
class RegValOp(Op):
def __init__(self, reg: str, val: int):
self._reg = reg
self._val = val
def __repr__(self):
return '{}({}, {})'.format(type(self).__name__, self._reg, self._val)
def __str__(self):
return repr(self)
def __eq__(self, other):
return type(self) == type(other) and self._reg == other._reg and self._val == other._val
class RegRegOp(Op):
def __init__(self, reg1: str, reg2: str):
self._reg1 = reg1
self._reg2 = reg2
def __repr__(self):
return '{}({}, {})'.format(type(self).__name__, self._reg1, self._reg1)
def __eq__(self, other):
return type(self) == type(other) and self._reg1 == other._reg1 and self._reg2 == other._reg2
class Snd(RegOp):
def perform(self, rf: RegisterFile):
rf.send(rf.get(self._reg))
rf.next()
class Rcv(RegOp):
def perform(self, rf: RegisterFile):
if rf.get(self._reg) != 0:
print(rf.receive())
sys.exit(0)
rf.next()
class SetVal(RegValOp):
def perform(self, rf: RegisterFile):
rf.set(self._reg, self._val)
rf.next()
class SetReg(RegRegOp):
def perform(self, rf: RegisterFile):
rf.set(self._reg1, rf.get(self._reg2))
rf.next()
class AddVal(RegValOp):
def perform(self, rf: RegisterFile):
rf.set(self._reg, rf.get(self._reg) + self._val)
rf.next()
class AddReg(RegRegOp):
def perform(self, rf: RegisterFile):
rf.set(self._reg1, rf.get(self._reg1) + rf.get(self._reg2))
rf.next()
class MulVal(RegValOp):
def perform(self, rf: RegisterFile):
rf.set(self._reg, rf.get(self._reg) * self._val)
rf.next()
class MulReg(RegRegOp):
def perform(self, rf: RegisterFile):
rf.set(self._reg1, rf.get(self._reg1) * rf.get(self._reg2))
rf.next()
class ModVal(RegValOp):
def perform(self, rf: RegisterFile):
rf.set(self._reg, rf.get(self._reg) % self._val)
rf.next()
class ModReg(RegRegOp):
def perform(self, rf: RegisterFile):
rf.set(self._reg1, rf.get(self._reg1) % rf.get(self._reg2))
rf.next()
class JgzVal(RegValOp):
def perform(self, rf: RegisterFile):
if rf.get(self._reg) > 0:
rf.jump(rf.pc() + self._val)
else:
rf.next()
class JgzReg(RegRegOp):
def perform(self, rf: RegisterFile):
if rf.get(self._reg1) > 0:
rf.jump(rf.pc() + rf.get(self._reg2))
else:
rf.next()
class Program(object):
def __init__(self, inst: List[Op]):
self._rf = RegisterFile()
self._inst = inst
def execute(self):
logger.info(self._rf)
while 0 <= self._rf.pc() < len(self._inst):
next_inst = self._inst[self._rf.pc()]
logger.info('> {}'.format(next_inst))
next_inst.perform(self._rf)
logger.info(self._rf)
def parse_single(line: str) -> Op:
def build_two_arg(val_ctr, reg_ctr, parts: List[str]) -> Op:
try:
int(parts[2])
is_int = True
except ValueError:
is_int = False
if is_int:
return val_ctr(parts[1], int(parts[2]))
else:
return reg_ctr(parts[1], parts[2])
parts = line.split()
if parts[0] == 'snd':
return Snd(parts[1])
elif parts[0] == 'rcv':
return Rcv(parts[1])
elif parts[0] == 'set':
return build_two_arg(SetVal, SetReg, parts)
elif parts[0] == 'add':
return build_two_arg(AddVal, AddReg, parts)
elif parts[0] == 'mul':
return build_two_arg(MulVal, MulReg, parts)
elif parts[0] == 'mod':
return build_two_arg(ModVal, ModReg, parts)
elif parts[0] == 'jgz':
return build_two_arg(JgzVal, JgzReg, parts)
def parse(lines: List[str]) -> Program:
return Program([parse_single(x) for x in lines])
if __name__ == '__main__':
lines = []
for line in fileinput.input():
lines += [line.strip()]
print(parse(lines).execute())
class Tests181(unittest.TestCase):
def test_parse(self):
self.assertEqual(
[
SetVal('a', 1),
AddVal('a', 2),
MulReg('a', 'a'),
ModVal('a', 5),
Snd('a'),
SetVal('a', 0),
Rcv('a'),
JgzVal('a', -1),
SetVal('a', 1),
JgzVal('a', -2)
],
parse([
'set a 1',
'add a 2',
'mul a a',
'mod a 5',
'snd a',
'set a 0',
'rcv a',
'jgz a -1',
'set a 1',
'jgz a -2'
])._inst
)
| true
|
6bc3706c2d029e4b2042db109a57373aa932609b
|
Python
|
gnprice/ssns-manip
|
/ssns-manip
|
UTF-8
| 5,750
| 2.65625
| 3
|
[] |
no_license
|
#!/usr/bin/env python
from dataclasses import dataclass
from datetime import datetime, timedelta
import io
import os
import re
import struct
import sys
from typing import List, Optional, Tuple
import click
def log(*args):
print(*args, file=sys.stderr)
@dataclass
class Instruction:
'''What to do with a particular command.'''
offset: int
command_type: int
should_omit: bool
def parse_instructions(filename):
'''
Parse an instructions file.
This is a format we made up right here; see CLI usage message for
description.
'''
# And really the details of the format are mostly up to ccl_ssns.py.
instructions = []
with open(filename, 'r') as f:
for line in f:
if re.search('^\s*$', line):
continue
m = re.search('^(x )?([0-9a-f]{8}): C(\d+)',
line)
if not m:
raise click.ClickException(f"Can't parse instruction: {line}")
should_omit = (m[1] is not None)
offset = int(m[2], 16)
command_type = int(m[3])
instructions.append(Instruction(
offset=offset, command_type=command_type, should_omit=should_omit))
return instructions
windows_epoch = datetime.fromisoformat('1601-01-01')
one_microsecond = timedelta(microseconds=1)
def int_of_timestamp(dt: datetime) -> int:
'''
Intify the timestamp in the quirky format Chrome uses here.
'''
# See TimestampToString:
# https://chromium.googlesource.com/chromium/src.git/+/refs/tags/89.0.4389.90/components/sessions/core/command_storage_backend.cc#341
return (dt - windows_epoch) // one_microsecond
def open_output_file(output_dir: str) -> Tuple[str, io.BufferedWriter]:
'''
Create and open our output file at an appropriate filename.
'''
latest_existing = 0
for name in os.listdir(output_dir):
m = re.fullmatch('Session_(\d+)', name)
if m:
latest_existing = max(latest_existing, int(m[1]))
now = int_of_timestamp(datetime.utcnow())
timestamp = max(now, latest_existing + 1)
while True:
try:
path = os.path.join(output_dir, f'Session_{timestamp}')
f = open(path, 'xb')
return (path, f)
except FileExistsError:
timestamp += 1
# These follow definitions in Chromium upstream:
# https://chromium.googlesource.com/chromium/src.git/+/refs/tags/89.0.4389.90/components/sessions/core/command_storage_backend.cc#341
k_file_signature = b'SNSS'
k_file_version_1 = 1
k_file_version_with_marker = 3
def write_results(
f_out: io.BufferedWriter,
f_in: io.BufferedReader,
instructions: Optional[List[Instruction]]):
header_bytes = f_in.read(8)
if header_bytes[:4] != k_file_signature:
raise ClickException('Input is not an SNSS file')
format_version, = struct.unpack('<i', header_bytes[4:])
if format_version not in (k_file_version_1, k_file_version_with_marker):
raise ClickException(f'Input has unexpected version: {format_version}')
f_out.write(header_bytes)
i = 0
while True:
offset = f_in.tell()
size_bytes = f_in.read(2)
if not size_bytes:
# EOF
if instructions is not None and i < len(instructions):
raise ClickException(f'Too many instructions; input consumed before line {i+1}')
break
size, = struct.unpack('<H', size_bytes)
command_bytes = f_in.read(size)
command_type = command_bytes[0]
if instructions is not None:
if i >= len(instructions):
raise ClickException(
f'Ran out of instructions; commands continue at offset 0x{offset:08x}')
instruction = instructions[i]
i += 1
if offset != instruction.offset:
raise ClickException(f'Offset mismatch: next command is at 0x{offset:08x}'
+ f' but instruction is for 0x{instruction.offset:08x}')
if command_type != instruction.command_type:
raise ClickException(
f'Command type mismatch: command at 0x{offset:08x}'
+ f' has type {command_type} but expected {instruction.command_type}')
if instruction.should_omit:
continue
f_out.write(size_bytes + command_bytes)
@click.command()
@click.argument('input_file')
@click.argument('output_dir')
@click.option('-f', 'instructions_file', metavar='FILE',
help='File with instructions for how to edit.')
def main(input_file, output_dir, *, instructions_file):
'''
Make an edited version of a Chrome or Chromium session file.
The edited version will be written as the newest Session_* file
in OUTPUT_DIR.
If no instructions are provided, the file will be copied
unmodified.
With `-f`, the given instructions should be based on the output of
ccl_ssns.py. Each line indicates what to do with the corresponding
command in the file. The line should be either:
* unmodified, to copy the command unmodified to the output; or
* have "x " inserted at the beginning, to skip the command.
Whitespace lines are also permitted, and ignored.
'''
instructions = None
if instructions_file is not None:
instructions = parse_instructions(instructions_file)
log(f'{len(instructions)} instructions read.')
f_in = open(input_file, 'rb')
path, f_out = open_output_file(output_dir)
log(f'Writing output to: {path}')
write_results(f_out, f_in, instructions)
f_out.close()
if __name__ == '__main__':
main()
| true
|
4abadb77953d766488c4ee3c5f374edd89550b04
|
Python
|
wavce/classificationx
|
/core/optimizers/gradient_centralization.py
|
UTF-8
| 16,220
| 2.546875
| 3
|
[
"LicenseRef-scancode-unknown-license-reference",
"MulanPSL-1.0",
"LicenseRef-scancode-mulanpsl-1.0-en"
] |
permissive
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import ops
from tensorflow.python.keras.optimizer_v2 import optimizer_v2
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.training import training_ops
from tensorflow.python.keras import backend_config
from tensorflow.python.ops import state_ops
from ..builder import OPTIMIZERS
def _gradient_centeralization(grad_and_vars):
results = []
for grad, var in grad_and_vars:
if array_ops.rank(grad) == 4:
grad -= math_ops.reduce_mean(grad)
results.append((grad, var))
return results
@OPTIMIZERS.register
class SGDGC(optimizer_v2.OptimizerV2):
_HAS_AGGREGATE_GRAD = True
def __init__(self,
learning_rate=0.01,
momentum=0.0,
nesterov=False,
name="SGDGC",
**kwargs):
"""Construct a new Stochastic Gradient Descent or Momentum optimizer.
Arguments:
learning_rate: A `Tensor`, floating point value, or a schedule that is a
`tf.keras.optimizers.schedules.LearningRateSchedule`, or a callable
that takes no arguments and returns the actual value to use. The
learning rate. Defaults to 0.01.
momentum: float hyperparameter >= 0 that accelerates SGD in the relevant
direction and dampens oscillations. Defaults to 0.0, i.e., SGD.
nesterov: boolean. Whether to apply Nesterov momentum.
Defaults to `False`.
name: Optional name prefix for the operations created when applying
gradients. Defaults to 'SGD'.
**kwargs: keyword arguments. Allowed to be {`clipnorm`, `clipvalue`, `lr`,
`decay`}. `clipnorm` is clip gradients by norm; `clipvalue` is clip
gradients by value, `decay` is included for backward compatibility to
allow time inverse decay of learning rate. `lr` is included for backward
compatibility, recommended to use `learning_rate` instead.
"""
super(SGDGC, self).__init__(name, **kwargs)
self._set_hyper("learning_rate", kwargs.get("lr", learning_rate))
self._set_hyper("decay", self._initial_decay)
self._momentum = False
if isinstance(momentum, ops.Tensor) or callable(momentum) or momentum > 0:
self._momentum = True
if isinstance(momentum, (int, float)) and (momentum < 0 or momentum > 1):
raise ValueError("`momentum` must be between [0, 1].")
self._set_hyper("momentum", momentum)
self.nesterov = nesterov
def _create_slots(self, var_list):
if self._momentum:
for var in var_list:
self.add_slot(var, "momentum")
def apply_gradients(self,
grads_and_vars,
name=None,
experimental_aggregate_gradients=True):
"""Apply gradients to variables.
This is the second part of `minimize()`. It returns an `Operation` that
applies gradients.
The method sums gradients from all replicas in the presence of
`tf.distribute.Strategy` by default. You can aggregate gradients yourself by
passing `experimental_aggregate_gradients=False`.
Example:
```python
grads = tape.gradient(loss, vars)
grads = tf.distribute.get_replica_context().all_reduce('sum', grads)
# Processing aggregated gradients.
optimizer.apply_gradients(zip(grads, vars),
experimental_aggregate_gradients=False)
```
Args:
grads_and_vars: List of (gradient, variable) pairs.
name: Optional name for the returned operation. Default to the name passed
to the `Optimizer` constructor.
experimental_aggregate_gradients: Whether to sum gradients from different
replicas in the presense of `tf.distribute.Strategy`. If False, it's
user responsibility to aggregate the gradients. Default to True.
Returns:
An `Operation` that applies the specified gradients. The `iterations`
will be automatically increased by 1.
Raises:
TypeError: If `grads_and_vars` is malformed.
ValueError: If none of the variables have gradients.
"""
grads_and_vars = _gradient_centeralization(grad_and_vars=grads_and_vars)
return super(SGDGC, self).apply_gradients(grads_and_vars=grads_and_vars,
name=name,
experimental_aggregate_gradients=experimental_aggregate_gradients)
def _prepare_local(self, var_device, var_dtype, apply_state):
super(SGDGC, self)._prepare_local(var_device, var_dtype, apply_state)
apply_state[(var_device, var_dtype)]["momentum"] = array_ops.identity(
self._get_hyper("momentum", var_dtype))
def _resource_apply_dense(self, grad, var, apply_state=None):
var_device, var_dtype = var.device, var.dtype.base_dtype
coefficients = ((apply_state or {}).get((var_device, var_dtype))
or self._fallback_apply_state(var_device, var_dtype))
if self._momentum:
momentum_var = self.get_slot(var, "momentum")
return training_ops.resource_apply_keras_momentum(
var.handle,
momentum_var.handle,
coefficients["lr_t"],
grad,
coefficients["momentum"],
use_locking=self._use_locking,
use_nesterov=self.nesterov)
else:
return training_ops.resource_apply_gradient_descent(
var.handle, coefficients["lr_t"], grad, use_locking=self._use_locking)
def _resource_apply_sparse_duplicate_indices(self, grad, var, indices,
**kwargs):
if self._momentum:
return super(SGDGC, self)._resource_apply_sparse_duplicate_indices(
grad, var, indices, **kwargs)
else:
var_device, var_dtype = var.device, var.dtype.base_dtype
coefficients = (kwargs.get("apply_state", {}).get((var_device, var_dtype))
or self._fallback_apply_state(var_device, var_dtype))
return resource_variable_ops.resource_scatter_add(
var.handle, indices, -grad * coefficients["lr_t"])
def _resource_apply_sparse(self, grad, var, indices, apply_state=None):
# This method is only needed for momentum optimization.
var_device, var_dtype = var.device, var.dtype.base_dtype
coefficients = ((apply_state or {}).get((var_device, var_dtype))
or self._fallback_apply_state(var_device, var_dtype))
momentum_var = self.get_slot(var, "momentum")
return training_ops.resource_sparse_apply_keras_momentum(
var.handle,
momentum_var.handle,
coefficients["lr_t"],
grad,
indices,
coefficients["momentum"],
use_locking=self._use_locking,
use_nesterov=self.nesterov)
def get_config(self):
config = super(SGDGC, self).get_config()
config.update({
"learning_rate": self._serialize_hyperparameter("learning_rate"),
"decay": self._serialize_hyperparameter("decay"),
"momentum": self._serialize_hyperparameter("momentum"),
"nesterov": self.nesterov,
})
return config
@OPTIMIZERS.register
class AdamGC(optimizer_v2.OptimizerV2):
_HAS_AGGREGATE_GRAD = True
def __init__(self,
learning_rate=0.001,
beta_1=0.9,
beta_2=0.999,
epsilon=1e-7,
amsgrad=False,
name='AdamGC',
**kwargs):
super(AdamGC, self).__init__(name, **kwargs)
self._set_hyper('learning_rate', kwargs.get('lr', learning_rate))
self._set_hyper('decay', self._initial_decay)
self._set_hyper('beta_1', beta_1)
self._set_hyper('beta_2', beta_2)
self.epsilon = epsilon or backend_config.epsilon()
self.amsgrad = amsgrad
def _create_slots(self, var_list):
# Create slots for the first and second moments.
# Separate for-loops to respect the ordering of slot variables from v1.
for var in var_list:
self.add_slot(var, 'm')
for var in var_list:
self.add_slot(var, 'v')
if self.amsgrad:
for var in var_list:
self.add_slot(var, 'vhat')
def _prepare_local(self, var_device, var_dtype, apply_state):
super(AdamGC, self)._prepare_local(var_device, var_dtype, apply_state)
local_step = math_ops.cast(self.iterations + 1, var_dtype)
beta_1_t = array_ops.identity(self._get_hyper('beta_1', var_dtype))
beta_2_t = array_ops.identity(self._get_hyper('beta_2', var_dtype))
beta_1_power = math_ops.pow(beta_1_t, local_step)
beta_2_power = math_ops.pow(beta_2_t, local_step)
lr = (apply_state[(var_device, var_dtype)]['lr_t'] *
(math_ops.sqrt(1 - beta_2_power) / (1 - beta_1_power)))
apply_state[(var_device, var_dtype)].update(
dict(
lr=lr,
epsilon=ops.convert_to_tensor_v2(self.epsilon, var_dtype),
beta_1_t=beta_1_t,
beta_1_power=beta_1_power,
one_minus_beta_1_t=1 - beta_1_t,
beta_2_t=beta_2_t,
beta_2_power=beta_2_power,
one_minus_beta_2_t=1 - beta_2_t))
def set_weights(self, weights):
params = self.weights
# If the weights are generated by Keras V1 optimizer, it includes vhats
# even without amsgrad, i.e, V1 optimizer has 3x + 1 variables, while V2
# optimizer has 2x + 1 variables. Filter vhats out for compatibility.
num_vars = int((len(params) - 1) / 2)
if len(weights) == 3 * num_vars + 1:
weights = weights[:len(params)]
super(AdamGC, self).set_weights(weights)
def apply_gradients(self,
grads_and_vars,
name=None,
experimental_aggregate_gradients=True):
"""Apply gradients to variables.
This is the second part of `minimize()`. It returns an `Operation` that
applies gradients.
The method sums gradients from all replicas in the presence of
`tf.distribute.Strategy` by default. You can aggregate gradients yourself by
passing `experimental_aggregate_gradients=False`.
Example:
```python
grads = tape.gradient(loss, vars)
grads = tf.distribute.get_replica_context().all_reduce('sum', grads)
# Processing aggregated gradients.
optimizer.apply_gradients(zip(grads, vars),
experimental_aggregate_gradients=False)
```
Args:
grads_and_vars: List of (gradient, variable) pairs.
name: Optional name for the returned operation. Default to the name passed
to the `Optimizer` constructor.
experimental_aggregate_gradients: Whether to sum gradients from different
replicas in the presense of `tf.distribute.Strategy`. If False, it's
user responsibility to aggregate the gradients. Default to True.
Returns:
An `Operation` that applies the specified gradients. The `iterations`
will be automatically increased by 1.
Raises:
TypeError: If `grads_and_vars` is malformed.
ValueError: If none of the variables have gradients.
"""
grads_and_vars = _gradient_centeralization(grad_and_vars=grads_and_vars)
return super(AdamGC, self).apply_gradients(grads_and_vars=grads_and_vars,
name=name,
experimental_aggregate_gradients=experimental_aggregate_gradients)
def _resource_apply_dense(self, grad, var, apply_state=None):
var_device, var_dtype = var.device, var.dtype.base_dtype
coefficients = ((apply_state or {}).get((var_device, var_dtype))
or self._fallback_apply_state(var_device, var_dtype))
m = self.get_slot(var, 'm')
v = self.get_slot(var, 'v')
if not self.amsgrad:
return training_ops.resource_apply_adam(
var.handle,
m.handle,
v.handle,
coefficients['beta_1_power'],
coefficients['beta_2_power'],
coefficients['lr_t'],
coefficients['beta_1_t'],
coefficients['beta_2_t'],
coefficients['epsilon'],
grad,
use_locking=self._use_locking)
else:
vhat = self.get_slot(var, 'vhat')
return training_ops.resource_apply_adam_with_amsgrad(
var.handle,
m.handle,
v.handle,
vhat.handle,
coefficients['beta_1_power'],
coefficients['beta_2_power'],
coefficients['lr_t'],
coefficients['beta_1_t'],
coefficients['beta_2_t'],
coefficients['epsilon'],
grad,
use_locking=self._use_locking)
def _resource_apply_sparse(self, grad, var, indices, apply_state=None):
var_device, var_dtype = var.device, var.dtype.base_dtype
coefficients = ((apply_state or {}).get((var_device, var_dtype))
or self._fallback_apply_state(var_device, var_dtype))
# m_t = beta1 * m + (1 - beta1) * g_t
m = self.get_slot(var, 'm')
m_scaled_g_values = grad * coefficients['one_minus_beta_1_t']
m_t = state_ops.assign(m, m * coefficients['beta_1_t'],
use_locking=self._use_locking)
with ops.control_dependencies([m_t]):
m_t = self._resource_scatter_add(m, indices, m_scaled_g_values)
# v_t = beta2 * v + (1 - beta2) * (g_t * g_t)
v = self.get_slot(var, 'v')
v_scaled_g_values = (grad * grad) * coefficients['one_minus_beta_2_t']
v_t = state_ops.assign(v, v * coefficients['beta_2_t'],
use_locking=self._use_locking)
with ops.control_dependencies([v_t]):
v_t = self._resource_scatter_add(v, indices, v_scaled_g_values)
if not self.amsgrad:
v_sqrt = math_ops.sqrt(v_t)
var_update = state_ops.assign_sub(
var, coefficients['lr'] * m_t / (v_sqrt + coefficients['epsilon']),
use_locking=self._use_locking)
return control_flow_ops.group(*[var_update, m_t, v_t])
else:
v_hat = self.get_slot(var, 'vhat')
v_hat_t = math_ops.maximum(v_hat, v_t)
with ops.control_dependencies([v_hat_t]):
v_hat_t = state_ops.assign(
v_hat, v_hat_t, use_locking=self._use_locking)
v_hat_sqrt = math_ops.sqrt(v_hat_t)
var_update = state_ops.assign_sub(
var,
coefficients['lr'] * m_t / (v_hat_sqrt + coefficients['epsilon']),
use_locking=self._use_locking)
return control_flow_ops.group(*[var_update, m_t, v_t, v_hat_t])
def get_config(self):
config = super(AdamGC, self).get_config()
config.update({
'learning_rate': self._serialize_hyperparameter('learning_rate'),
'decay': self._serialize_hyperparameter('decay'),
'beta_1': self._serialize_hyperparameter('beta_1'),
'beta_2': self._serialize_hyperparameter('beta_2'),
'epsilon': self.epsilon,
'amsgrad': self.amsgrad,
})
return config
| true
|
90e0ba8a4e90b287245ab7fc456871491be081c5
|
Python
|
bowen903/python_study
|
/python_study_0121.py
|
UTF-8
| 435
| 3.28125
| 3
|
[] |
no_license
|
# encoding: utf-8
"""
@author: Xiaoping
@file: python_study_0121.py
@time: 2017/9/10 21:34
"""
#爱奇艺编程题 最长周长的三角形
while True:
try:
s1=raw_input().split()
print s1
s = []
for i in s1:
s.append(int(i))
s.sort()
print s
if s[0]+s[1]>s[2]:
print s[0]+s[1]+s[2]
else:
print s[1]+s[2]
except:
break
| true
|
0c01b140ac6224c274ea0c460ffc0fb1c0bd97ab
|
Python
|
curieuxjy/DS-for-PPM
|
/day3/untitled26.py
|
UTF-8
| 1,678
| 2.90625
| 3
|
[] |
no_license
|
# -*- coding: utf-8 -*-
import numpy as np
import matplotlib.pyplot as plt
from sklearn import neighbors
m = 1000
X = -1.5 + 3*np.random.uniform(size = (m,2))
y = np.zeros([m,1])
for i in range(m):
if np.linalg.norm(X[i,:], 2) <= 1:
y[i] = 1
C1 = np.where(y == 1)[0]
C0 = np.where(y == 0)[0]
theta = np.linspace(0, 2*np.pi, 100)
plt.figure(1, figsize = (8,8))
plt.plot(X[C1,0], X[C1,1], 'o', label = 'C1', markerfacecolor = "k", markeredgecolor = 'k', markersize = 4)
plt.plot(X[C0,0], X[C0,1], 'o', label = 'C0', markerfacecolor = "None", alpha = 0.3, markeredgecolor = 'k', markersize = 4)
plt.plot(np.cos(theta), np.sin(theta), '--', color = 'orange')
plt.axis([-1.5, 1.5, -1.5, 1.5])
plt.axis('equal')
plt.axis('off')
plt.show()
clf = neighbors.KNeighborsClassifier(n_neighbors = 50)
clf.fit(X, np.ravel(y))
X_new = np.array([1, 1]).reshape(1,-1)
result = clf.predict(X_new)[0]
print(result)
res = 0.01
[X1gr, X2gr] = np.meshgrid(np.arange(-1.5,1.5,res), np.arange(-1.5,1.5,res))
Xp = np.hstack([X1gr.reshape(-1,1), X2gr.reshape(-1,1)])
Xp = np.asmatrix(Xp)
inC1 = clf.predict(Xp).reshape(-1,1)
inCircle = np.where(inC1 == 1)[0]
plt.figure(2, figsize = (8, 8))
plt.plot(X[C1,0], X[C1,1], 'o', label = 'C1', markerfacecolor = "k", alpha = 0.5, markeredgecolor = 'k', markersize = 4)
plt.plot(X[C0,0], X[C0,1], 'o', label = 'C0', markerfacecolor = "None", alpha = 0.3, markeredgecolor='k', markersize = 4)
plt.plot(np.cos(theta), np.sin(theta), '--', color = 'orange')
plt.plot(Xp[inCircle][:,0], Xp[inCircle][:,1], 's', alpha = 0.5, color = 'r', markersize = 1)
plt.axis([-1.5, 1.5, -1.5, 1.5])
plt.axis('equal')
plt.axis('off')
plt.show()
| true
|
a2dda3f596017778356f63b1dbd11ec1a8ca800d
|
Python
|
Lakshit-Karsoliya/PythonProjects
|
/NoteApplication.py
|
UTF-8
| 3,707
| 3.046875
| 3
|
[] |
no_license
|
import tkinter
import os
from tkinter.simpledialog import *
from tkinter.scrolledtext import *
from tkinter.messagebox import *
from tkinter import *
import time
root=Tk()
root.update()
root.geometry("300x250+300+300")
root.minsize(height=500,width=500)
root.title("NoteApplication")
#--------THEME COLOR VARIABLES--#
background="white"
foreground="black"
#----x--THEME COLOR VAR---x-----#
fn=Label(root,text="Filename:",bg=background,fg=foreground,width=500)
fn.pack()
path=Entry(root,bg=background,fg=foreground,width=500,border=0)
path.pack()
textarea=ScrolledText(root,height=500,width=500,state='normal',bg=background,fg=foreground,border=0)
textarea.pack()
#----------FILE CLASS--------#
class File():
def save():
try:
text=textarea.get(0.0,END)
file=open(path.get()+".txt","x")
file.write(text)
file.close()
showinfo(title="message",message="file saved in"+os.getcwd())
except:
showerror(title="An Error Occur",message="Unable to save file\ninsert file name ")
def delete():
try:
os.remove(path.get()+".txt")
showinfo(title="info",message="file is deleted")
except:
showerror(title="error",message="please open a\nto delete")
#function for open file
def openfile():
textarea.delete(0.0,END)
openfile=open(path.get()+".txt","r")
a=openfile.read()
textarea.insert(0.0,a)
def exit():
root.destroy()
#-------------INFO CLASS------
class Info():
def properties():
showinfo(title="Properties",message="File name:"+path.get()+".txt\nLocation:"+os.getcwd()+"\nSize:")
def showtime():
showinfo(title ="Time",message=time.ctime())
def inserttime():
textarea.insert(END,time.ctime())
#------Theme Class-------
class Theme():
def dark():
textarea.config(background="black",foreground="white")
path.config(background="black",foreground="white")
fn.config(background="black",foreground="white")
menubar.config(background="black",foreground="white",activebackground="black",activeforeground="white")
def light():
textarea.config(background="white",foreground="black")
path.config(background="white",foreground="black")
fn.config(background="white",foreground="black")
menubar.config(background="white",foreground="black",activebackground="white",activeforeground="black")
def materialdark():
textarea.config(background="black",foreground="cyan")
path.config(background="black",foreground="yellow")
fn.config(background="black",foreground="yellow")
menubar.config(background="black",foreground="pink",activebackground="black",activeforeground="white")
def file():
file=File()
filemenu=Menu(menubar,border=0)
filemenu.add_command(label="Save",command=File.save)
filemenu.add_command(label="Open",command=File.openfile)
filemenu.add_command(label="Delete",command=File.delete)
filemenu.add_command(label="Exit",command=File.exit)
menubar.add_cascade(label="File",menu=filemenu)
root.config(menu=menubar)
def info():
info=Info()
infomenu=Menu(menubar,border=0)
infomenu.add_command(label="Properties",command=Info.properties)
infomenu.add_command(label="InsertTime",command=Info.inserttime)
infomenu.add_command(label="Time",command=Info.showtime)
menubar.add_cascade(label="Info",menu=infomenu)
root.config(menu=menubar)
def theme():
theme=Theme()
tmenu=Menu(menubar,border=0)
tmenu.add_command(label="Dark",command=Theme.dark)
tmenu.add_command(label="TangyDark",command=Theme.materialdark)
tmenu.add_command(label="Lignt",command=Theme.light)
menubar.add_cascade(label="Theme",menu=tmenu)
root.config(menu=menubar)
menubar=Menu(root,bg=background,fg=foreground,border=0,activebackground=background,activeforeground=foreground)
file()
info()
theme()
root.mainloop()
| true
|
05245c8cae3a6976fe8b11e0ad14adabf60c01c9
|
Python
|
CianciuStyles/CodeEval
|
/Easy/Capitalize Words.py
|
UTF-8
| 226
| 2.84375
| 3
|
[] |
no_license
|
import itertools
import sys
with open(sys.argv[1], 'r') as test_cases:
for test in test_cases:
if test == "":
continue
words = test.strip().split(' ')
print(' '.join([word[0].upper() + word[1:] for word in words]))
| true
|
617571ac0f179fa704572a6fd678e242bcb05a18
|
Python
|
MrSamuelLaw/Grabbit
|
/modules/database/tools.py
|
UTF-8
| 3,530
| 3.15625
| 3
|
[] |
no_license
|
import sqlite3
from sqlite3 import Cursor, Connection
from pathlib import Path
from typing import Tuple, Union
from urllib.parse import urlparse
from modules.database.models import TableModel
from modules.scrapers import GritrScraper
class Tools():
# =========== available scrapers ===========
scrapers = [
GritrScraper(),
]
# ================ functions ================
@staticmethod
def create_db(*, db_name) -> Tuple[Cursor, Connection]:
'''Creates a connection to the database and
returns the cursor and connection object'''
# establish the connection to the db
if db_name == ':memory:':
db_path = db_name
else:
root = Path(__file__).parent
db_path = root.joinpath(db_name)
connection = sqlite3.connect(db_path)
connection.row_factory = sqlite3.Row
cursor = connection.cursor()
return cursor, connection
@staticmethod
def create_table(cursor: Cursor,
connection: Connection, model: TableModel) -> None:
'''Creates a table from a model'''
# create the table from the schema
table_name, column_string = Tools.get_schema_strings(model)
cmd = f'''--sql
CREATE TABLE IF NOT EXISTS {table_name}
({column_string})
;'''
cursor.execute(cmd)
connection.commit()
@staticmethod
def get_schema_strings(model: TableModel) -> Tuple[str, str]:
'''Returns two strings, the table name
and the column labels'''
table_name = model.__table_name__ # extract table name
properties = model.schema()['properties'] # extract the column names
# formats it like -> colname TYPE, colname2 TYPE2, ...
column_string = [
f"{key} {str(val['type']).upper()}"
for key, val in properties.items()
]
column_string = ','.join(column_string) # join them into csv string
return table_name, column_string
@staticmethod
def get_column_names(model: TableModel) -> str:
properties = model.schema()['properties'] # extract the column names
column_string = [f"{key}" for key in properties.keys()]
column_string = ','.join(column_string) # join them into csv string
return column_string
@staticmethod
def find_scraper(model: TableModel) -> Union[str, ]:
'''Checks to see if a scraper for the website exists'''
path = urlparse(model.website)
name = path.netloc
name_list = [s.name for s in Tools.scrapers]
try:
index = name_list.index(name)
except IndexError:
scraper = None
else:
scraper = Tools.scrapers[index]
return scraper
@staticmethod
def add_entry(cur: Cursor, con: Connection, model: TableModel) -> None:
'''Takes an item and adds it to the database'''
# data massaging
col_dict = model.dict() # convert to dictionary
col_names = ','.join(col_dict.keys()) # pull out the column names
col_vals = tuple(col_dict.values()) # create tuple of column values
place_holder = ','.join(['?']*len(col_vals)) # create placeholder string
# cmd creation and execution
cmd = f'''--sql
INSERT INTO {model.__table_name__} ({col_names})
VALUES ({place_holder})
;'''
cur.execute(cmd, col_vals)
con.commit()
| true
|
28452e10c6b0888875a3856517a9e4c9e1057f02
|
Python
|
mikejaron1/NLP_side_project
|
/Text_Clustering.py
|
UTF-8
| 2,834
| 2.734375
| 3
|
[] |
no_license
|
import numpy as np
import pandas as pd
import nltk
import re
import os
import codecs
from sklearn import feature_extraction
# import mpld3b
from nltk.stem.snowball import SnowballStemmer
from sklearn.feature_extraction.text import TfidfVectorizer
# nltk.download()
stopwords = nltk.corpus.stopwords.words('english')
stemmer = SnowballStemmer("english")
## get in our documents
directory = './'
cnn_articles = directory+'articles/'
fox_articles = directory+'fox_articles/'
for i in os.listdir(fox_articles):
if '.txt' in i:
text = open(fox_articles+i).read().split('\n')
# print text
break
# here I define a tokenizer and stemmer which returns the set of stems in the text that it is passed
def tokenize_and_stem(text):
# first tokenize by sentence, then by word to ensure that punctuation is caught as it's own token
tokens = [word for sent in nltk.sent_tokenize(text) for word in nltk.word_tokenize(sent)]
filtered_tokens = []
# filter out any tokens not containing letters (e.g., numeric tokens, raw punctuation)
for token in tokens:
if re.search('[a-zA-Z]', token):
filtered_tokens.append(token)
stems = [stemmer.stem(t) for t in filtered_tokens]
return stems
def tokenize_only(text):
# first tokenize by sentence, then by word to ensure that punctuation is caught as it's own token
tokens = [word.lower() for sent in nltk.sent_tokenize(text) for word in nltk.word_tokenize(sent)]
filtered_tokens = []
# filter out any tokens not containing letters (e.g., numeric tokens, raw punctuation)
for token in tokens:
if re.search('[a-zA-Z]', token):
filtered_tokens.append(token)
return filtered_tokens
#not super pythonic, no, not at all.
#use extend so it's a big flat list of vocab
totalvocab_stemmed = []
totalvocab_tokenized = []
for i in text:
allwords_stemmed = tokenize_and_stem(i) #for each item in 'synopses', tokenize/stem
totalvocab_stemmed.extend(allwords_stemmed) #extend the 'totalvocab_stemmed' list
allwords_tokenized = tokenize_only(i)
totalvocab_tokenized.extend(allwords_tokenized)
# print totalvocab_stemmed
# print totalvocab_tokenized
vocab_frame = pd.DataFrame({'words': totalvocab_tokenized}, index = totalvocab_stemmed)
print 'there are ' + str(vocab_frame.shape[0]) + ' items in vocab_frame'
#define vectorizer parameters
tfidf_vectorizer = TfidfVectorizer(max_df=0.8, max_features=200000,
min_df=0.1, stop_words='english',
use_idf=True, tokenizer=tokenize_and_stem, ngram_range=(1,3))
tfidf_matrix = tfidf_vectorizer.fit_transform(text) #fit the vectorizer to synopses
print(tfidf_matrix.shape)
terms = tfidf_vectorizer.get_feature_names()
print terms
# def main():
# pass
# if __name__ == '__main__':
# main()
| true
|
41fd58d73e329e30f3e739a3d25120b8672275e6
|
Python
|
Python3pkg/GooseMPL
|
/examples/plot-cmap.py
|
UTF-8
| 742
| 2.875
| 3
|
[
"MIT"
] |
permissive
|
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
plt.style.use(['goose','goose-latex'])
x = np.linspace(0, 5, 100)
N = 21
cmap = plt.get_cmap('jet',N)
fig,ax = plt.subplots()
# N.B. to modify the aspect ratio one could replace this line by:
# fig = plt.figure(figsize=(8,6))
# ax1 = fig.add_axes([0.10,0.10,0.70,0.85])
for i,n in enumerate(np.linspace(0,2,N)):
y = np.sin(x)*x**n
ax.plot(x,y,color=cmap(i))
plt.xlabel(r'$x$')
plt.ylabel(r'$y$')
norm = mpl.colors.Normalize(vmin=0,vmax=2)
sm = plt.cm.ScalarMappable(cmap=cmap,norm=norm)
sm.set_array([])
plt.colorbar(sm,ticks=np.linspace(0,2,N),boundaries=np.arange(-0.05,2.1,.1))
plt.savefig('plot-cmap_goose-latex.svg')
| true
|
1b2f084ac7c6e973bc91f54d558539d100f68329
|
Python
|
DimitriKnd/Skillup_courseP
|
/DZ01.py
|
UTF-8
| 509
| 4.25
| 4
|
[] |
no_license
|
a = int(input("Enter 1st number: "))
b = int(input("Enter 2nd number: "))
c = int(input("Enter 3rd number: "))
d = int(input("input 1 to obtain the sum or 2 to obtain the product: "))
if d == 1 :
print("a+b+c =", a+b+c)
if d == 2:
print("axbxc =", a*b*c)
if d != 1 and d != 2:
print("you put a wrong value")
# Instead of 3 if we could use if, elif, else:
# if d == 1 :
# print("a+b+c =", a+b+c)
# elif d == 2:
# print("axbxc =", a*b*c)
# else:
# print("you put a wrong value")
| true
|
e2e5fe5943f9d0c3503a3c14b4df7ade8f90fd3d
|
Python
|
0xvon/find-similar-pokemon
|
/main.py
|
UTF-8
| 2,024
| 2.78125
| 3
|
[] |
no_license
|
import cv2
import os
def calc_feature(img_path: str, detector: cv2.ORB_create()):
IMG_SIZE = (100, 100)
img = cv2.imread(img_path, cv2.IMREAD_GRAYSCALE)
img = cv2.resize(img, IMG_SIZE)
# 特微量算出
return detector.detectAndCompute(img, None)
# 表示
def show_imgs_match(file):
img1 = cv2.imread('./material_images/me.jpg')
img2 = cv2.imread('./blog/material_images/' + file)
gray1 = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)
gray2 = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)
akaze = cv2.ORB_create()
kp1, des1 = akaze.detectAndCompute(gray1, None)
kp2, des2 = akaze.detectAndCompute(gray2, None)
bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
matches = bf.match(des1, des2)
matches = sorted(matches, key=lambda x: x.distance)
img3 = cv2.drawMatches(img1, kp1, img2, kp2, matches[:10], None, flags=2)
plt.imshow(cv2.cvtColor(img3, cv2.COLOR_BGR2RGB))
plt.show()
target_image = 'me.jpg'
image_directory = './material_images/'
bf = cv2.BFMatcher(cv2.NORM_HAMMING)
#特微量算出アルゴリズム
detector = cv2.ORB_create()
# detector = cv2.AKAZE_create()
(target_kp, target_des) = calc_feature(image_directory + target_image,
detector)
print('target: %s' % (target_image))
min_value = 1000
min_path = ''
files = os.listdir(image_directory)
for file in files:
if file == '.DS_Store' or file == target_image:
continue
comparing_img_path = image_directory + file
try:
(comparing_kp, comparing_des) = calc_feature(comparing_img_path,
detector)
#画像同士をマッチング
matches = bf.match(target_des, comparing_des)
dist = [m.distance for m in matches]
#類似度計算
ret = sum(dist) / len(dist)
except cv2.error:
ret = 100000
if min_value > ret:
min_value = ret
min_path = file
print(min_path, min_value)
show_imgs_match(min_path)
| true
|
d3b72e0f58a156a59dc7007bbfe12b6f1cc68909
|
Python
|
daelynj/Distributed-Particle-System
|
/src/client.py
|
UTF-8
| 3,231
| 2.734375
| 3
|
[] |
no_license
|
import grpc
import os
import pygame
import sys
import threading
import time
from queue import Queue
from pygame.locals import *
import proto.particle_system_pb2 as ps
import proto.particle_system_pb2_grpc as rpc
FPS = 30
WINDOW_WIDTH = 1366
WINDOW_HEIGHT = 768
WHITE = (255, 255, 255)
DARK_GREY = (105, 105, 105)
GREY = (169, 169, 169)
LIGHT_GREY = (211, 211, 211)
YELLOW = (255, 255, 0)
ORANGE = (255, 140, 0)
SKY_BLUE = (135, 206, 235)
COLORS = {
'dark-grey': DARK_GREY,
'grey': GREY,
'light-grey': LIGHT_GREY,
'yellow': YELLOW,
'orange': ORANGE
}
class Client:
def __init__(self, address='localhost', port=50051):
channel = grpc.insecure_channel(address + ':' + str(port))
self.stub = rpc.GenerateStub(channel)
self.frame_buffer = Queue()
self.generate_thread = threading.Thread(target=self.generate_particles)
self.generate_thread.start()
global CLOCK, DISPLAY
pygame.init()
CLOCK = pygame.time.Clock()
DISPLAY = pygame.display.set_mode((WINDOW_WIDTH, WINDOW_HEIGHT))
pygame.display.set_caption('Particles!')
self.render_thread = threading.Thread(target=self.render_particles)
self.render_thread.start()
def generate_particles(self):
instructions = ps.RenderInstructions()
instructions.frame_count = 300
for frame in self.stub.GenerateParticles(instructions):
particles = map(
lambda p : {'x': p.x, 'y': p.y, 'size': p.size, 'color': COLORS[p.color]},
frame.particles
)
self.frame_buffer.put(particles)
def render_particles(self):
global CLOCK, DISPLAY
while True:
try:
# gracefully handle quiting
self.checkForKeyPress()
# wait briefly for a frame in the buffer
for i in range(10):
if i == 9:
return
elif self.frame_buffer.empty():
time.sleep(0.1)
else:
break
frame = self.frame_buffer.get()
# clear the previous frame
DISPLAY.fill(SKY_BLUE)
for particle in frame:
# create a surface with transparancy
surface = pygame.Surface((250, 250))
surface.set_colorkey((0, 0, 0))
surface.set_alpha(100)
# draw the smoke partile to the surface
pygame.draw.circle(surface, particle['color'], (125, 125), int(particle['size']))
# adjust the position in the screen
position = (particle['x'] + int(WINDOW_WIDTH / 2.6), particle['y'] + WINDOW_HEIGHT - 150)
# blit the surface to the display
DISPLAY.blit(surface, (position))
# update the display and tick the clock
pygame.display.update()
CLOCK.tick(FPS)
except Exception as e:
self.terminate()
def checkForKeyPress(self):
print('here')
if len(pygame.event.get(QUIT)) > 0:
self.terminate()
keyUpEvents = pygame.event.get(KEYUP)
if len(keyUpEvents) == 0:
return
if keyUpEvents[0].key in [K_ESCAPE, K_q]:
self.terminate()
def terminate(self):
pygame.quit()
self.render_thread.join()
self.generate_thread.join()
sys.exit()
if __name__ == '__main__':
client = Client()
| true
|
bcc3f2d8aaf9c31a68ad27f36a5e4fb1237010df
|
Python
|
cylinder-lee-cn/LeetCode
|
/LeetCode/872.py
|
UTF-8
| 1,680
| 4.84375
| 5
|
[] |
no_license
|
"""
872. 叶子相似的树
请考虑一颗二叉树上所有的叶子,这些叶子的值按从左到右的顺序排列形成一个 叶值序列 。
872-1.png
举个例子,如上图所示,给定一颗叶值序列为 (6, 7, 4, 9, 8) 的树。
如果有两颗二叉树的叶值序列是相同,那么我们就认为它们是 叶相似 的。
如果给定的两个头结点分别为 root1 和 root2 的树是叶相似的,则返回 true;否则返回 false 。
提示:
给定的两颗树可能会有 1 到 100 个结点。
"""
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def leafSimilar(self, root1, root2):
"""
:type root1: TreeNode
:type root2: TreeNode
:rtype: bool
"""
def dfs(node):
if (node is not None):
if (node.left is None and node.right is None):
yield node.val
yield from dfs(node.left)
yield from dfs(node.right)
return list(dfs(root1)) == list(dfs(root2))
"""
此题解法(源自官网):
首先,让我们找出给定的两个树的叶值序列。之后,我们可以比较它们,看看它们是否相等。
要找出树的叶值序列,我们可以使用深度优先搜索。
如果结点是叶子,那么 dfs 函数会写入结点的值,然后递归地探索每个子结点。
这可以保证按从左到右的顺序访问每片叶子,因为在右子树结点之前完全探索了左子树结点。
"""
| true
|
8bfae8b384ad8dacbb3e695b8b51d54a4a1b3f6b
|
Python
|
jntp/ForecastSend2.0
|
/forecastsend.py
|
UTF-8
| 11,146
| 2.84375
| 3
|
[] |
no_license
|
# main.py is the main python file of ForecastSend2.0 application
import math
from kivy.app import App
from kivy.lang import Builder
from kivy.uix.screenmanager import ScreenManager, Screen
from kivy.properties import ObjectProperty
from kivy.uix.dropdown import DropDown
from kivy.uix.button import Button
from kivy.uix.widget import Widget
from kivy.uix.label import Label
from kivy.uix.floatlayout import FloatLayout
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.popup import Popup
from database import DataBase
from send_SMS import MakeCalls
# used to store text inputted in text boxes
class SaveText:
# for saving text selected in the "city window" drop down menu
def citySelect(self, cityText):
self.citySave = cityText
# for saving text inputted in the "medium-range update" box
def UpdateSave(self, updateText):
self.textSave = updateText
# class for drop down menu
class CustomDropDown(DropDown):
pass
# prompts user to select between "one-day precipitation" or "medium-range update" forecasts
class HomeWindow(Screen):
# when user clicks "one-day precipitation" button
def one_day_precip(self):
sm.current = "sent" # switch to one-day precipitation screen
# when user clicks "medium-range update" button
def medium_range(self):
sm.current = "city" # then switch to medium-range update screen
# prompts user to choose which city or region to send the forecast
class CityWindow(Screen):
dropDownList = ObjectProperty(None)
cityString = "Select city/region"
def __init__(self, *args, **kwargs):
super(CityWindow, self).__init__(*args, **kwargs)
self.drop_down = CustomDropDown()
# create drop down menu of select cities
dropdown = DropDown()
cities = ["All", "San Francisco/Oakland", "Davis/Sacramento", "Santa Clara Valley", "Los Angeles/Orange County", "San Diego", "New York City", \
"Baltimore"]
for city in cities:
# create button individually for each city
btn = Button(text = '%r' % city, size_hint_y = None, height = 30, pos = (25, 25), on_release = lambda btn : sv.citySelect(btn.text))
btn.bind(on_release = lambda btn : dropdown.select(btn.text)) # attach a callback which will pass the text selected as data
dropdown.add_widget(btn)
# create the main or default button
mainbutton = Button(text = 'Select city/region', size_hint = (0.5, 0.5))
mainbutton.bind(on_release = dropdown.open) # show drop down menu when released
dropdown.bind(on_select = lambda instance, x : setattr(mainbutton, 'text', x)) # assign data to button text
self.dropDownList.add_widget(mainbutton)
# when user clicks the "BACK" button
def back(self):
sm.current = "home" # go back to home screen
# when user clicks the "GO" button
def go(self):
# check if user actually selected a city/region
if hasattr(sv,'citySave'): # if user selected anything (if object has attribute)
db.get_subscribers(sv.citySave) # call database function to retrieve subscribers with matching city
sm.current = "medium_range" # switch to medium-range update main screen
else:
errorCity() # display error pop up window
# one-day precipitation: main screen for user entering parameters
class OneDayParameterWindow(Screen):
pass
# one-day precipitation: for editing the forecast
class OneDayEditWindow(Screen):
pass
# medium-range update: main window
class MediumRangeWindow(Screen):
update = ObjectProperty(None)
character_count = ObjectProperty(None)
# when user clicks the "BACK" button
def back(self):
sm.current = "city" # go back to select city screen
# when user clicks the "GO" button
def go(self):
# check if text box is blank
if self.update.text == "":
errorMedium() # display error pop up window
else:
sv.UpdateSave(self.update.text) # save the text written in the text box
sm.current = "preview" # switch to preview screen
# preview window for both types of forecasts
class PreviewWindow(Screen):
previewText = ObjectProperty(None)
characterCount = ObjectProperty(None)
selectedNames = ObjectProperty(None)
selectedNumbers = ObjectProperty(None)
selectedNames2 = ObjectProperty(None) # 3rd column of recipients text
selectedNumbers2 = ObjectProperty(None) # 4th column of recipients text
def on_enter(self, *args):
self.previewText.text = sv.textSave
text_length = len(self.previewText.text)
sms_count = math.ceil(text_length / 160)
info_string = "[b]" + "Character Count: " + str(text_length) + "\n" + "Number of Messages: " + str(sms_count) + "[/b]"
self.characterCount.text = info_string
# Loop through selectedNames and selectedNumbers of the database
columnSplit = math.floor((len(db.selectedNames) - 1) / 2) # list "splits" halfway, allowing recipients to "overflow" to next 2 columns
for index, name in enumerate(db.selectedNames):
# check if index less than or equal to half the length of selectedNames and selectedNumbers
if index <= columnSplit:
# onto left 2 columns
self.selectedNames.text = self.selectedNames.text + "\n" + db.selectedNames[index] # show in recipient box
self.selectedNumbers.text = self.selectedNumbers.text + "\n" + db.selectedNumbers[index]
# overflow to the next two columns if number of recipients exceeds 9
elif index > columnSplit:
self.selectedNames2.text = self.selectedNames2.text + "\n" + db.selectedNames[index]
self.selectedNumbers2.text = self.selectedNumbers2.text + "\n" + db.selectedNumbers[index]
# Create dynamically sized label for preview text
def fontsize(self, text, height, width):
dimAvg = (height + width) / 2 # calculate an average dimension size (between width and height)
sp = math.ceil(dimAvg * 0.02) # two percent of the screen's dimensions
# For messages larger than 320 characters
if len(text) > 320:
lenAdjust = len(text) - 160 # offset character count by 160
spDrop = math.floor(lenAdjust / 160) # calculate "drop in sp" based on character count
sp -= spDrop # initiate "drop in sp"
# calculate greater "drop in sp" for large messages and large windows (will overlap with the character counter)
if spDrop >= 3 and dimAvg > 970:
sp -= 1 # drop the font size by another 1 sp if conditions are met
# print #sp on font_size
return "{}sp".format(sp)
# Create dynamically sized label for recipients' text
def recipientFont(self, text, height, width):
dimAvg = (height + width) / 2
sp = math.ceil(dimAvg * 0.02)
count = text.count("\n") # intend to do an sp drop for count >= 7 where dimAvg == 800
if count >= 7:
countAdjust = count - 7
spDrop = math.floor(countAdjust / 2) + 1 # for every two lines, drop the font size by 1 sp
sp -= spDrop
# drop by another sp if there are 10 or more recipients in one column
if count >= 10:
sp -= 1
# calculate greater "drop in sp" for 9 lines or more and larger windows
if count >= 9 and dimAvg >= 750:
sp -= 1
# drop sp again if window is even larger
if dimAvg >= 900:
sp -= 1
# keep dropping the sp
if dimAvg >= 1000:
sp -= 1
# drop sp again for larger columns
if count >= 10:
sp -= 1
return "{}sp".format(sp)
# when user presses the "BACK" button
def back(self):
sm.current = "medium_range"
# Clear the recipients columns to prevent names from stacking onto each other
self.selectedNames.text = ""
self.selectedNumbers.text = ""
self.selectedNames2.text = ""
self.selectedNumbers2.text = ""
# when user presses the "SEND" button
def send(self):
self.warnSend()
# warning message for sending the forecast
def warnSend(self):
# Create box layout to accomodate two buttons and label
box = BoxLayout(orientation = 'vertical', padding = (17))
box.add_widget(Label(text = "You are about to send the forecast.\n This action cannot be undone.\n SMS charges will apply.\n Continue?"))
noButton = Button(text = "NO", pos_hint = {"x": 0.30}, size_hint = (0.4, 0.15))
yesButton = Button(text = "YES", pos_hint = {"x": 0.30}, size_hint = (0.4, 0.15))
box.add_widget(yesButton)
box.add_widget(noButton)
# Create pop up
warnPopup = Popup(title = "WARNING!", content = box, size_hint = (None, None), size = (400, 400), auto_dismiss = True)
yesButton.bind(on_press = self.go) # will send you to the "sent" screen
yesButton.bind(on_press = warnPopup.dismiss)
noButton.bind(on_press = warnPopup.dismiss) # stay on the preview window
warnPopup.open()
# when user confirms submission of forecast
def go(self, instance):
mc.makeCall(sv.textSave, db.selectedNumbers, db.selectedNames) # make API Call
sm.current = "sent" # switch to sent screen
# Clear the recipients text box to prevent names from stacking if user decides to send another forecast
self.selectedNames.text = ""
self.selectedNumbers.text = ""
self.selectedNames2.text = ""
self.selectedNumbers2.text = ""
# screen for when user sends the forecast
class SentWindow(Screen):
# when user presses the "YES" button
def yes(self):
# clear the selectedNames and selectedNumbers lists
db.selectedNames = []
db.selectedNumbers = []
sm.current = "home" # go back to home screen
# when user presses the "NO" button
def no(self):
App.get_running_app().stop() # exit the application
class WindowManager(ScreenManager):
pass
## Popup Windows for Errors
# for unselected city/region in city window
def errorCity():
errorPopup = Popup(title = "ERROR", content = Label(text = "Error! Please select a city/region."), size_hint = (None, None), size = (400, 400))
errorPopup.open() # show the popup
# for blank input box in medium-range update window
def errorMedium():
errorPopup = Popup(title = "ERROR", content = Label(text = "Error! Text box cannot be blank."), size_hint = (None, None), size = (400, 400))
errorPopup.open()
## Class instances
kv = Builder.load_file("main.kv") # load main.kv file
sm = WindowManager() # load WindowManager upon running
sv = SaveText() # access to functions for storing text
db = DataBase("data.txt") # load database
mc = MakeCalls() # allow access to Twilio
# create screens list that assigns name (ID) to each class
screens = [HomeWindow(name = "home"), CityWindow(name = "city"), OneDayParameterWindow(name = "one_day_main"), MediumRangeWindow(name = "medium_range"), \
PreviewWindow(name = "preview"), SentWindow(name = "sent")]
for screen in screens:
sm.add_widget(screen)
sm.current = "home" # by default, current screen goes to HomeWindow
# builds the kivy application
class ForecastSendApp(App):
def build(self):
return sm
if __name__ == "__main__":
ForecastSendApp().run()
# You left off at working on sent window. Specifically adding 'yes' and 'no' buttons to screen.
# Also don't forget to put an error message IF the forecast does not send
# Don't forget to add pop up windows for error messages
| true
|
a1501f46d7450524babb295f3369ab54d0599bf8
|
Python
|
yangjingScarlett/pythonlearning
|
/a_basicPython/1operator/compare_operator.py
|
UTF-8
| 476
| 3.546875
| 4
|
[] |
no_license
|
# coding=utf-8
a = 21
b = 10
c = 0
if a == b:
print a, " == ", b
else:
print a, " not == ", b
if a != b:
print a, " != ", b
else:
print a, " == ", b
if a < b:
print a, " < ", b
else:
print a, " not < ", b
if a > b:
print a, " > ", b
else:
print a, " not > ", b
# 修改变量 a 和 b 的值
a = 5
b = 20
if a <= b:
print a, " <= ", b
else:
print a, " not <= ", b
if b >= a:
print b, " >= ", a
else:
print b, " not >= ", a
| true
|
221732c74d774907b347eebcbf1955d593de4b1a
|
Python
|
TalWeisler/Ed-Hitting-Set
|
/Sunflower.py
|
UTF-8
| 2,335
| 2.671875
| 3
|
[] |
no_license
|
import numpy as np
import math
import copy
def sunflowerAlgorithm(h):
for i in range(h.edges - 1, -1, -1):
if not (h.e_degree[i] == 0):
h.is_dup(h.matrix[:, i], i - 1)
for i in range(h.edges - 1, -1, -1):
if h.e_degree[i] == 0 : # empty edge or edge.degree < h.d
h.delete_edge(i)
a_kernel = copy.deepcopy(h)
for i in range(a_kernel.edges - 1, -1, -1) :
if not (a_kernel.e_degree[i]== h.d): #empty edge or edge.degree < h.d
a_kernel.delete_edge_2(i)
if a_kernel.edges < math.factorial(a_kernel.d) * math.pow(a_kernel.k , a_kernel.d):
return []
else :
(core, g) = sunflowerAlgorithmRec(a_kernel, np.array([0] * h.vertices), [])
if sum(core) == 0:
return None
else:
for i in range(len(g) - 1, -1, -1):
h.delete_edge(g[i])
h.add_edge(core)
return sunflowerAlgorithm(h)
return []
def sunflowerAlgorithmRec(a_kernel, core, g):
# build G
j = 0
while sum(a_kernel.matrix[:, j]) == 0:
j = j + 1
g = [j] # first group S1
g_vec = a_kernel.matrix[:, j]
for i in range(1, a_kernel.matrix.shape[1]):
if not (a_kernel.e_degree[i] == 0): # col is not deleted
temp = g_vec
temp = np.bitwise_and(temp, a_kernel.matrix[:, i])
if sum(temp) == 0: # the group is safe to G
g_vec = g_vec + a_kernel.matrix[:, i]
g.append(i)
else:
continue
if a_kernel.k <= len(g): # found sunflower with k-petals
return core, g
else:
(v_index, v_edges) = findMaxV(a_kernel)
for i in range(a_kernel.edges - 1, -1, -1):
if not v_edges.__contains__(i):
a_kernel.delete_edge_2(i)
core[v_index] = 1
a_kernel.delete_vertex(v_index)
a_kernel.d = a_kernel.d - 1 # update d
return sunflowerAlgorithmRec(a_kernel, core, g)
def findMaxV(a_kernel):
max = 0
v = 0
for i in range(len(a_kernel.v_degree)):
if a_kernel.v_degree[i] > max:
max = a_kernel.v_degree[i]
v = i
return v, a_kernel.edges_with_v(v)
| true
|
0cdc53bffe4f2ce28e20fd4e268f109c30a53392
|
Python
|
nihalmenon/BreastCancerMalignanceClassifier
|
/main.py
|
UTF-8
| 1,345
| 2.90625
| 3
|
[] |
no_license
|
import sklearn
from sklearn.utils import shuffle
import pandas as pd
import numpy as np
from sklearn.neighbors import KNeighborsClassifier
from sklearn import linear_model, preprocessing
data = pd.read_csv("breast-w_csv.csv")
le = preprocessing.LabelEncoder()
cls = le.fit_transform(list(data["Class"]))
predict = "class"
X = list(zip(data["Clump_Thickness"], data["Cell_Size_Uniformity"], data["Cell_Shape_Uniformity"], data["Normal_Nucleoli"], data["Mitoses"]))
y = list(cls)
x_train, x_test, y_train, y_test = sklearn.model_selection.train_test_split(X, y, test_size=0.1)
model = KNeighborsClassifier(n_neighbors=9)
model.fit(x_train, y_train)
accuracy = model.score(x_test, y_test)
insertData = x_test #[(4,4,4,4,1), (1,1,5,5,1), (3,3,3,3,1)]
predicted = model.predict(insertData)
result = ["Benign", "Malignant"]
dataDescription = "Data Values (1-10): (Clump Thickness, Cell Size Uniformity, Cell Shape Uniformity, Normal Nucleoli, Mitoses)\n"
print(dataDescription)
if insertData == x_test:
print("Accuracy: {}%\n".format(round(accuracy * 100, 2)))
for x in range(len(predicted)):
if insertData == x_test:
print("Predicted:", str(result[predicted[x]]) + " |", "Data:", str(x_test[x]) + " |", "Actual:", str(result[y_test[x]]))
else:
print("Predicted:", result[predicted[x]], "\nData:", insertData[x])
| true
|
23d3cfef3d82ffd7618834d430d693299d490c80
|
Python
|
guangyaai/DARPA
|
/mid-phase1-text-modality-NetScale/src/splitDB.py
|
UTF-8
| 4,209
| 3.25
| 3
|
[] |
no_license
|
# splitDB.py
# this code splits the dataset into subsets
# these separate dbs can then be used for training/testing
# arguments: dbName perc1 perc2 .. percN
# generates N+1 db files that split dbName into the
# corresponding percentages
# each perc argument should be [1 - 99], and they should
# sum to < 100
from OpenTable_pb2 import *
import sys
from glob import glob
from numpy import *
from random import shuffle
def copyRestaurant(inRest, outRest):
"""
copy restaurant inRest into blank restaurant outRest
deep copy due to GPB design
"""
outRest.id = inRest.id
for inRev in inRest.review:
outRev = outRest.review.add()
outRev.overallRating = inRev.overallRating
outRev.foodRating = inRev.foodRating
outRev.ambianceRating = inRev.ambianceRating
outRev.serviceRating = inRev.serviceRating
outRev.noiseRating = inRev.noiseRating
outRev.text = inRev.text
def numReviews (r):
"""
returns the number of reviews for a given restaurant
# useful in map() calls
"""
return size(r.review)
def splitNReviews(restaurantList, indList, numRevs):
"""
Remove restaurants until we have at least numRevs reviews.
Return as separate list, with sublist no longer present in
original indices of items to remove are popped from indList new
subset list, shortened index list, num in subset are returned
"""
subList = RestaurantList()
collectedRevs = 0
# loop until we have enough reviews or nothing left
while collectedRevs < numRevs and size(indList) > 0:
# get next element from list
curRest = restaurantList.restaurant[indList.pop()]
# add it to sublist and update count
copyRestaurant(curRest, subList.restaurant.add())
collectedRevs = collectedRevs + numReviews(curRest)
return subList, indList, collectedRevs
def splitDatabase(db, percArr):
"""
Takes a db name string and creates split files percArr is an array
of float values which sum to 1
"""
# open the db and parse it
with open (db) as input:
data = "".join(input.readlines())
restaurantList = RestaurantList()
restaurantList.ParseFromString(data)
# get total by summing all review counts
totalRevs = map(numReviews, restaurantList.restaurant)
totalRevs = array(map(int, totalRevs))
totalRevs = totalRevs.sum()
# random ordering of restaurant indices
indList = range(1,len(restaurantList.restaurant))
shuffle(indList)
# make a new db for this split percent
for p in percArr:
newDBName = db + "."+ str(int(p*100)) + ".db"
numRevs = int(totalRevs * p)
print newDBName, ": ", numRevs, " ideal"
# samples from list without replacement
subList, indList, actualRevs = splitNReviews(restaurantList, indList, numRevs)
print newDBName, ": ", actualRevs, " actual"
# write subset as new db
f = open(newDBName, "wb")
f.write(subList.SerializeToString())
f.close()
def splitDatabasesByArgs():
"""
The main function. Takes the following command line arguments:
dbName -- a libSVM features file output
perc1, perc2, ... -- percentages, must sum to less than 100 ( the
remainder will be split into a distinct group
"""
if len(sys.argv) < 3:
print "Usage:", sys.argv[0], " dbName perc1 perc2 .."
sys.exit(-1)
dbName = sys.argv[1]
# create numpy array from percent arguments
percList = map(float, sys.argv[2:])
percList.append(0)
percArr = array(percList)
# bit of error checking
if sum(percArr) > 99:
print 'Error: percents given must sum to < 100. E.g. 25 50'
exit(-1)
percArr = percArr / 100
percArr[-1] = 1 - sum(percArr)
print percArr, percArr.sum()
# for each db name, split by the given percentages
dbs = glob(dbName)
for db in dbs:
splitDatabase(db, percArr)
if __name__ == "__main__":
splitDatabasesByArgs()
| true
|
972379ad6d0f2445567252d4c63b7bac83e08e56
|
Python
|
wammar/wammar-utils
|
/prune-long-lines.py
|
UTF-8
| 958
| 2.84375
| 3
|
[] |
no_license
|
import re
import time
import io
import sys
import argparse
from collections import defaultdict
# parse/validate arguments
argParser = argparse.ArgumentParser()
argParser.add_argument("-tokens", type=str, help="prune line if it has more than this many tokens")
argParser.add_argument("-in", "--input_filename", type=str, help="input filename")
argParser.add_argument("-out", "--output_filename", type=str, help="output filename")
argParser.add_argument("-ie", "--input_encoding", type=str, default='utf8')
argParser.add_argument("-oe", "--output_encoding", type=str, default='utf8')
args = argParser.parse_args()
counter = 0
of = io.open(args.output_filename, encoding=args.output_encoding, mode='w')
for line in io.open(args.input_filename, encoding=args.input_encoding, mode='r'):
if len(line.split()) <= int(args.tokens):
#print len(line.split())
of.write(line)
else:
counter += 1
of.close()
print '{0} lines pruned out'.format(counter)
| true
|
b38da6187db5fd5e6733b9309517c6e8db538314
|
Python
|
Sidray-Infinity/Buffer
|
/nextRight.py
|
UTF-8
| 527
| 3.640625
| 4
|
[] |
no_license
|
class Node:
def __init__(self, val: int = 0, left: 'Node' = None, right: 'Node' = None, next: 'Node' = None):
self.val = val
self.left = left
self.right = right
self.next = next
def mark(root):
if(root != None):
mark(root.right)
mark(root.left)
print(root.val)
if __name__ == "__main__":
root = Node(1)
root.left = Node(2)
root.right= Node(3)
root.left.right = Node(5)
root.left.left = Node(4)
root.right.right= Node(7)
mark(root)
| true
|
75a3f3c0db1ba0c9cd4a51b2c35eed664ef6641b
|
Python
|
art2mkl/projet_IMDB
|
/.ipynb_checkpoints/scraping-checkpoint.py
|
UTF-8
| 5,907
| 2.984375
| 3
|
[] |
no_license
|
import requests
from bs4 import BeautifulSoup
import pandas as pd
import numpy as np
# à intégrer dans le fichier .py
class Dbase:
def connect_IMDB(self,i):
url = f"https://www.imdb.com/search/title/?groups=top_250&sort=user_rating,desc&start={i}&ref_=adv_nxt"
response = requests.get(url)
html_parsed = BeautifulSoup(response.text, 'html.parser')
return html_parsed.find_all(class_='lister-item-content')
def imdb_requests(self):
tabG = self.get_elements()
return self.createDf(tabG)
def get_elements(self):
num_requests = [1, 51, 101, 151, 201]
titles_name = []
movies_grade = []
movies_year = []
voices_count = []
movies_director = []
movies_genre = []
movies_gross = []
for i in num_requests:
# recupération des données à scraper
div_content = self.connect_IMDB(i)
for div in div_content:
# APPEND values to lists
titles_name.append(div.find(class_='lister-item-header').find('a').text)
movies_grade.append(div.find(class_='ratings-bar').find('strong').text)
movies_genre.append((div.find(class_="genre").text.strip('\n').replace(' ', '')).split(','))
movies_year.append(div.find(class_="lister-item-year").text.replace('(','').replace(')','').replace('I ', ''))
voices_count.append(div.find(class_="sort-num_votes-visible").find_all('span')[1].text.replace(',', ''))
# SELECT gross and transform missing values to Nan
if len(div.find(class_="sort-num_votes-visible").find_all('span')) == 5:
movies_gross.append(div.find(class_="sort-num_votes-visible")
.find_all('span')[4].text.replace('$', '')
.replace('M', ''))
else:
movies_gross.append(np.nan)
# ITERATE on multiple directors
all_p = div.find_all('p')[2]
first_director = []
for i in all_p:
if '<a' in str(i):
first_director.append(i.text)
elif '<span' in str(i):
break
movies_director.append(first_director)
# CREATION of 3 lists
director_1 = []
director_2 = []
director_3 = []
nb_directors = []
# ITERATE on each list
for i in movies_director:
nb_directors.append(len(i))
if len(i) == 1:
director_1.append(i[0])
director_2.append(i[0])
director_3.append(i[0])
elif len(i) == 2:
director_1.append(i[0])
director_2.append(i[1])
director_3.append(i[1])
elif len(i) == 3:
director_1.append(i[0])
director_2.append(i[1])
director_3.append(i[2])
#CREATION of 3 lists
genre_1 = []
genre_2 = []
genre_3 = []
nb_genres = []
#ITERATE on each list
for i in movies_genre:
nb_genres.append(len(i))
if len(i) == 1:
genre_1.append(i[0])
genre_2.append(i[0])
genre_3.append(i[0])
elif len(i) == 2:
genre_1.append(i[0])
genre_2.append(i[1])
genre_3.append(i[1])
elif len(i) == 3:
genre_1.append(i[0])
genre_2.append(i[1])
genre_3.append(i[2])
return [titles_name,genre_1,genre_2,genre_3,nb_genres,movies_year,director_1,director_2,director_3,nb_directors,movies_grade,voices_count,movies_gross]
def createDf(self,tabG):
df1 = pd.DataFrame()
#ADDING titles list to DF
df1['titles'] = tabG[0]
#ADDING genre lists to DF
#CREATION of 3 columns on the DF
df1['genre1'] = tabG[1]
df1['genre2'] = tabG[2]
df1['genre3'] = tabG[3]
df1['nb_genres'] = tabG[4]
#MODIFICATION type to datetime
df1['released_year'] = list(map(int, tabG[5]))
#CREATION of 3 columns on the DF
df1['director1'] = tabG[6]
df1['director2'] = tabG[7]
df1['director3'] = tabG[8]
df1['nb_directors'] = tabG[9]
#MODIFICATION to float
df1['grade'] = list(map(float, tabG[10]))
#MODIFICATION type of votes in integer
df1['votes'] = list(map(int, tabG[11]))
#df1['votes'] = df1['votes'].astype(dtype='int64')
#MODIFICATION type of gross in float
df1['gross(M$)'] = list(map(float, tabG[12]))
#AJOUT DES VALEURS MANQUANTES
return self.add_values(df1)
def add_values(self,df1):
df2 = df1.copy()
for i in range(1920,2021):
#si la date en i ne contient pas que des NAN
if df2['gross(M$)'][df2['released_year'] == i].notnull().sum() != 0:
df2['gross(M$)'][df2['released_year'] == i] = df2['gross(M$)'][df2['released_year'] == i].fillna(df2['gross(M$)'][df2['released_year'] == i].mean())
#si la date en i-1 ne contient pas que des NAN
elif df2['gross(M$)'][df2['released_year'] == i-1].notnull().sum() != 0:
df2['gross(M$)'][df2['released_year'] == i] = df2['gross(M$)'][df2['released_year'] == i].fillna(df2['gross(M$)'][df2['released_year'] == i-1].mean())
#sinon utilise le mean() de i-2
else:
df2['gross(M$)'][df2['released_year'] == i] = df2['gross(M$)'][df2['released_year'] == i].fillna(df2['gross(M$)'][df2['released_year'] == i-2].mean())
return df2
| true
|
827910685def873f54e6a63cf37f9ee00a978458
|
Python
|
sgeorgiev87/QuickBaseExercise
|
/WebDriverIO/common/page_objects.py
|
UTF-8
| 6,762
| 2.796875
| 3
|
[] |
no_license
|
from WebDriverIO.common.page_objects_selectors import *
from Configuration.BasePage import *
from selenium.webdriver.common.keys import Keys
class HomePage(BasePage):
def __init__(self, driver, timeout=10):
BasePage.__init__(self, driver=driver, timeout=timeout)
def open_homepage(self, url):
self.driver.get(url)
self.visibility_of_element(HomePageSelectors.MainContainer)
print ('--> Homepage was successfully loaded!')
class Header(BasePage):
def __init__(self, driver, timeout=10):
BasePage.__init__(self, driver=driver, timeout=timeout)
def click_api_link(self):
self.click_on_element('API Link', HeaderSelectors.APILink)
def click_search_field(self):
self.click_on_element('Search Field', HeaderSelectors.SearchField)
def click_on_io_logo(self):
self.click_on_element('WebDriver IO logo', HeaderSelectors.IOLogo)
class SearchWidget(BasePage):
def __init__(self, driver, timeout=10):
BasePage.__init__(self, driver=driver, timeout=timeout)
try:
self.visibility_of_element(SearchWidgetSelectors.SearchInputField)
print ('--> Search Popup was successfully loaded!')
except:
raise Exception('#### Search popup was not loaded' + traceback.format_exc())
def search_for_text(self, text):
self.clickable_element(SearchWidgetSelectors.SearchInputField).send_keys(text)
def click_on_specific_search_result(self, exp_search_result):
time.sleep(0.2)
iteration = 1
all_search_results = self.visibility_of_elements(SearchWidgetSelectors.SearchResults)
for act_search_result in all_search_results:
if exp_search_result == act_search_result.text:
act_search_result.click()
print('--> Clicked on search result: ' + exp_search_result)
break
else:
iteration += 1
if iteration > len(all_search_results):
raise Exception('#### Could not find given search result: ' + exp_search_result + '\n' + traceback.format_exc())
def assert_clicked_result_was_loaded(self, exp_result):
assert self.visibility_of_element(GeneralSelectors.TitleHeader).text == exp_result, '#### Clicked result was not loaded'
print('--> Clicked result ' + exp_result + ' was successfully loaded!')
def assert_no_results_for_invalid_keyword(self, keyword):
exp_result = 'No results for "' + keyword + '"'
assert self.visibility_of_element(SearchWidgetSelectors.NoResultsSearchTitle).text == exp_result, '#### Wrong result for invalid keyword'
print('--> Valid error message for not found results is shown!')
def verify_recent_history_results_are_saved(self, exp_recent_results):
all_recent_results = self.visibility_of_elements(SearchWidgetSelectors.RecentSearches)
all_recent_results_text = []
for recent_result in all_recent_results:
all_recent_results_text.append(recent_result.text)
error_count = 0
for exp_result in exp_recent_results:
if exp_result not in all_recent_results_text:
print ('#### Expected search result ' + exp_result + ' is not in Recent list')
error_count += 1
if error_count != 0:
raise Exception('#### There is difference between expected and actual Recent results. Check the logs!')
else:
print('--> All expected search results are in the Recent list')
def save_recent_result_in_favourite(self, result_to_save):
self.click_on_element('Favourite button for result ' + result_to_save, SearchWidgetSelectors.favourite_button_specific_recent_result(result_to_save))
def verify_result_saved_in_favourite(self, saved_result):
try:
self.visibility_of_element(SearchWidgetSelectors.result_in_favourites(saved_result))
print('--> Result ' + saved_result + ' successfully saved in Favorites')
except:
raise Exception('#### Result ' + saved_result + ' not saved in Favourites! \n' + traceback.format_exc())
def delete_specific_recent_result(self, result_to_delete):
self.click_on_element('Delete button for result ' + result_to_delete, SearchWidgetSelectors.remove_button_specific_recent_result(result_to_delete))
def verify_deleted_result_not_in_recent_history(self, deleted_result):
counter = 0
while counter < 10:
if self.is_element_displayed(SearchWidgetSelectors.remove_button_specific_recent_result(deleted_result)):
counter +=1
time.sleep(0.5)
else:
print('--> Element ' + deleted_result + ' successfully deleted from Recent history')
break
if counter == 10:
raise Exception('#### Element not deleted from Recent history for 5 seconds!')
def close_search_widget(self):
ActionChains(self.driver).send_keys(Keys.ESCAPE).perform()
print('--> Escape key pressed with Search widget on screen!')
class APIDocumentation(BasePage):
def __init__(self, driver, timeout=10):
BasePage.__init__(self, driver=driver, timeout=timeout)
def expand_protocols_section(self):
self.click_on_element('Protocols section', APIDocumentationSelectors.ProtocolsSection)
def verify_elements_in_section_list(self, section, exp_list):
error_count = 0
act_list_elements = self.visibility_of_elements(APIDocumentationSelectors.all_elements_in_given_section(section))
act_list_strings = []
for element in act_list_elements:
act_list_strings.append(element.text) # getting the strings for all web elements in the opened section
for exp_list_element in exp_list:
if exp_list_element in act_list_strings:
act_list_strings.remove(exp_list_element) # removing checked element from actual list
else:
print('#### Expected element: ' + exp_list_element + ' is not in the ' + section + ' section!!!')
error_count += 1
if len(act_list_strings) != 0: # checking if any new elements were added into Protocols section on website
print('#### There are elements in Actual ' + section + ' section list that are not in the expected one: ' + str(
act_list_strings))
error_count += 1
if error_count == 0:
print('--> All expected elements in ' + section + ' section match with actual elements!')
else:
raise Exception('#### There are differences between expected and actual lists in ' + section + ' section. Please check the log!!!')
| true
|
81b5f9182c225ab11e3f38294193e367b4eb8b68
|
Python
|
CharlesMontgomery2/Python-Class-Exercises
|
/Files and Exceptions/welcome_guest.py
|
UTF-8
| 1,006
| 4.75
| 5
|
[] |
no_license
|
# 2) Write a while loop that prompts users for their name.
# When they enter their name, print a greeting to the screen and add a line recording their visit in a file called guest_book.txt.
# Make sure each entry appears on a new line in the file.
file = "guest_book.txt" # create a txt file
print("Enter 'quit' when you are finished.") # create a quit statement for user to end loop
while True: # while statement is true will continue to loop
name = input("\nWhat's your name? ") # loop this statement
if name.lower() == 'quit': # if the user enters the word quit
break # the loop will break out
else:
with open(file, 'a') as f: # else open file
f.write(name + "\n") # to write / add name in a new line
print(f"Hi {name.title()}, you've been added to the guest book.") # Display a welcome guest message
file = open("guest_book.txt") # create an open file variable
print("Names on the guest list are: \n", file.read()) # Display all guests in the list
| true
|
24245eabcd7a1a36b3597fbc56be664b93fc2688
|
Python
|
jonvaljean/pidev14
|
/lwcycler.py
|
UTF-8
| 1,472
| 2.515625
| 3
|
[] |
no_license
|
#!/usr/bin/python
#Simple LED-Warrior14 scratch for send data
from __future__ import print_function
import sys
import time
import smbus #use smbus for i2c
from time import sleep
from lwheadmodule import *
#modify this model according to requirements of setting
NR_ARGS = 4
#run the programm
if __name__ == "__main__":
DaliBus_Bar1 = lw14()
#if some arguments given, use this as data.
#parm is cycle file name
if len(sys.argv) == NR_ARGS+1:
filename = sys.argv[1]
onval = sys.argv[2]
offval = sys.argv[3]
sleeptime = sys.argv[4]
#If no arguments ar set or to much send this to dali
else:
print("Wrong number of parameters")
sys.exit(0)
F=open(filename)
F.seek(0)
print("in F.seek")
cmd_list = F.read().splitlines()
print("cmd_list is ", cmd_list)
while True:
for line in cmd_list:
print("line is ", line)
for cmd in line.split(','):
print("cmd is ", cmd)
dali_bus = I2C_values[net_dict[cmd[0]]]
DaliBus_Bar1.SetI2cBus(dali_bus)
dali_device = grp_dict[cmd[0]]
if cmd[1] == "on": dali_value = onval
if cmd[1] == "off": dali_value = offval
DaliBus_Bar1.SetDaliAddress(dali_device, LW14_ADR_GROUP, LW14_MODE_DACP) #Set the dali address for send data, in this case single device and DACP bit
DaliBus_Bar1.SendData(dali_value) #Send data into the dali bus
DaliBus_Bar1.WaitForReady() #Wait until DALI is ready. DON'T FORGET IT!!!!!
sleep(sleeptime)
| true
|
00d690f06d89dfd7156ac4b599023d5ae9f8c85a
|
Python
|
L200183043/Praktikum-Algopro
|
/Kegiatan 1.py
|
UTF-8
| 374
| 3.140625
| 3
|
[] |
no_license
|
x = {"Segitiga":"L = 0.5 * a * t" ,
"Persegi":"L = s * * 2" ,
"Persegi panjang":"L = p * l" ,
"lingkaran":"L = pi * r * * 2" ,
"Jajaran genjang":"L = a * t" }
print "|%-4s||%-17s||%-17s"%("No", "Nama Bangun", "Rumus Luas")
print "|%-4s||%-17s||%-17s"%("-"*4, "-"*7, "-"*17)
a = 1
for i in x:
print"|%-4s||%-17s||%-17s"%(a, i, x[i])
a += 1
| true
|
aa5bee1997a1ae3f9bd249acc000e3661460e1c8
|
Python
|
sarvex/commons
|
/src/python/twitter/common/metrics/gauge.py
|
UTF-8
| 3,774
| 3.09375
| 3
|
[
"LicenseRef-scancode-warranty-disclaimer",
"Apache-2.0"
] |
permissive
|
# ==================================================================================================
# Copyright 2011 Twitter, Inc.
# --------------------------------------------------------------------------------------------------
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this work except in compliance with the License.
# You may obtain a copy of the License in the LICENSE file, or at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==================================================================================================
from twitter.common.lang import Compatibility
# Duck-typing helpers
def gaugelike(obj):
return hasattr(obj, 'read') and callable(obj.read)
def namable(obj):
return hasattr(obj, 'name') and callable(obj.name)
def namablegauge(obj):
return gaugelike(obj) and namable(obj)
# Typed gauges.
class Gauge(object):
"""
A readable gauge that exports a value.
"""
def __init__(self, value):
self._value = value
def read(self):
return self._value
class NamedGauge(Gauge):
"""
Named gauge (gauge that exports name() method.)
"""
def __init__(self, name, value=None):
if not isinstance(name, str):
raise TypeError('NamedGauge must be named by a string, got %s' % type(name))
self._name = name
Gauge.__init__(self, value)
def name(self):
return self._name
class MutableGauge(Gauge):
"""
Mutable gauge.
"""
def __init__(self, value=None):
import threading
self._lock = threading.Lock()
Gauge.__init__(self, value)
def read(self):
with self.lock():
return self._value
def write(self, value):
with self.lock():
self._value = value
return self._value
def lock(self):
return self._lock
class Label(NamedGauge):
"""
A generic immutable key-value Gauge. (Not specifically strings, but that's
the intention.)
"""
def __init__(self, name, value):
NamedGauge.__init__(self, name, value)
class LambdaGauge(NamedGauge):
def __init__(self, name, fn):
import threading
if not callable(fn):
raise TypeError("A LambdaGauge must be supplied with a callable, got %s" % type(fn))
NamedGauge.__init__(self, name, fn)
self._lock = threading.Lock()
def read(self):
with self._lock:
return self._value()
class MutatorGauge(NamedGauge, MutableGauge):
def __init__(self, name, value=None):
NamedGauge.__init__(self, name)
MutableGauge.__init__(self, value)
class AtomicGauge(NamedGauge, MutableGauge):
"""
Something akin to AtomicLong. Basically a MutableGauge but with
atomic add, increment, decrement.
"""
def __init__(self, name, initial_value=0):
if not isinstance(initial_value, Compatibility.integer):
raise TypeError('AtomicGauge must be initialized with an integer.')
NamedGauge.__init__(self, name)
MutableGauge.__init__(self, initial_value)
def add(self, delta):
"""
Add delta to metric and return updated metric.
"""
if not isinstance(delta, Compatibility.integer):
raise TypeError('AtomicGauge.add must be called with an integer.')
with self.lock():
self._value += delta
return self._value
def increment(self):
"""
Increment metric and return updated metric.
"""
return self.add(1)
def decrement(self):
"""
Decrement metric and return updated metric.
"""
return self.add(-1)
| true
|
3312b616a7e3cc08f43c50464ee74d63a8ae3808
|
Python
|
stanleychilton/portfolio
|
/exmaple files/testcode/painter1.py
|
UTF-8
| 9,458
| 2.734375
| 3
|
[] |
no_license
|
import pygame
pygame.init()
white = (255,255,255)
red = (255,0,0)
light_red = (220,0,0)
green = (0,220,0)
light_green = (0,255,0)
blue = (0,0,255)
light_blue = (0,0,220)
black = (0,0,0)
grey = (220,220,220)
orange = (229,160,11)
light_orange = (234,165,16)
pink = (231,62,238)
light_pink = (236,67,242)
yellow = (238,228,62)
light_yellow = (242,232,67)
but_size = 25
pensize = 3
pen_type = "line"
pen_colour = black
tinyfont = pygame.font.SysFont("comicsansms", 15)
smallfont = pygame.font.SysFont("comicsansms", 25)
medfont = pygame.font.SysFont("comicsansms", 50)
largefont = pygame.font.SysFont("comicsansms", 80)
screen_size = (1000, 800)
colourchart = [red, green, blue, black, white, orange, grey, pink, yellow]
colchart_light = [light_red, light_green, light_blue, black, white, light_orange, grey, light_pink, light_yellow]
screen = pygame.display.set_mode(screen_size)
pygame.display.set_caption("Stan's painter")
screen.fill(white)
pygame.display.flip()
clock = pygame.time.Clock()
fps = 500
running = True
x_scroll1 = 10
x_scroll2 = 10
x_scroll3 = 10
bar = 10
bar_y = 200
sep = 35
points_list = []
lines_list = []
colour = [0,0,0]
fill_col = white
def button(text, x, y, width, height, inactive_colour, active_colour, action = None):
global pen_colour, pensize, lines_list, fill_col, pen_type
cur = pygame.mouse.get_pos()
click = pygame.mouse.get_pressed()
if x + width > cur[0] > x and y + height > cur[1] > y:
pygame.draw.rect(screen, active_colour, (x,y,width,height))
if click[0] == 1 and action != None:
if action == "quit":
pygame.quit()
quit()
elif action == "wipe":
lines_list = []
elif action == "+":
if pensize < 15:
pensize += 1
elif action == "-":
if pensize > 2:
pensize -= 1
elif action == "undo":
if lines_list != []:
del lines_list[-1]
elif action == "fill":
lines_list.append(["fill", pen_colour])
elif action != None and action != "quit":
pen_colour = action
else:
pygame.draw.rect(screen, inactive_colour, (x,y,width,height))
text_to_button(text,black,x,y,width,height)
def button2(text, x, y, width, height, inactive_colour, active_colour, action = None):
global pen_colour, pensize, lines_list, fill_col, pen_type
cur = pygame.mouse.get_pos()
click = pygame.mouse.get_pressed()
if x + width > cur[0] > x and y + height > cur[1] > y:
pygame.draw.rect(screen, active_colour, (x,y,width,height))
if click[0] == 1 and action != None:
if action != None:
pen_type = action
else:
pygame.draw.rect(screen, inactive_colour, (x,y,width,height))
text_to_button(text,black,x,y,width,height)
def button1(x, y, width, height, inactive_colour, active_colour, action = None):
global pen_colour, pensize, x_scroll1, x_scroll2, x_scroll3
cur = pygame.mouse.get_pos()
click = pygame.mouse.get_pressed()
if x + width > cur[0] > x and y + height + 12 > cur[1] > y - 12:
pygame.draw.rect(screen, active_colour, (x,y,width,height))
if click[0] == 1 and action != None:
if action == "scroll1":
x_scroll1 = cur[0]
elif action == "scroll2":
x_scroll2 = cur[0]
elif action == "scroll3":
x_scroll3 = cur[0]
else:
pygame.draw.rect(screen, inactive_colour, (x,y,width,height))
def text_objects(text, colour, size):
if size == "tiny":
textsurface = tinyfont.render(text, True, colour)
elif size == "small":
textsurface = smallfont.render(text, True, colour)
elif size == "med":
textsurface = medfont.render(text, True, colour)
elif size == "large":
textsurface = largefont.render(text, True, colour)
return textsurface, textsurface.get_rect()
def message_to_screen(msg, colour, x_displace = 0, y_displace = 0, size = "small"):
textSurf, textRect = text_objects(msg, colour, size)
textRect = x_displace, y_displace
screen.blit(textSurf,textRect)
def text_to_button(msg,colour,buttonx, buttony, buttonwidth, buttonhieght, size="small"):
textSurf, textRect = text_objects(msg, colour, size)
textRect.center = ((buttonx+(buttonwidth/2)), buttony+(buttonhieght/2))
screen.blit(textSurf, textRect)
while running:
pygame.draw.rect(screen, grey, (100, 0, 900, 800), 1)
pygame.draw.rect(screen, white, (0, 0, 100, 800))
pygame.draw.rect(screen, white, (100, 0, 900, 800))
for line in lines_list:
if line[0] == "fill":
pygame.draw.rect(screen, line[1], (100, 0, 900, 800))
pygame.draw.rect(screen, grey, (100, 0, 900, 800), 1)
for lines in lines_list:
if lines[0] == "line":
pygame.draw.lines(screen, lines[2], False, lines[3], lines[1])
elif lines[0] == "close":
for points in lines[3]:
pygame.draw.lines(screen, lines[2], True, (lines[3][0],points), lines[1])
pygame.draw.lines(screen, lines[2], False, lines[3], lines[1])
elif lines[0] == "squ":
pygame.draw.rect(screen, lines[2], (lines[3][0], (lines[3][-1][0]-lines[3][0][0],lines[3][-1][1]-lines[3][0][1])), lines[1])
pos = pygame.mouse.get_pos()
click = pygame.mouse.get_pressed()
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
if event.type == pygame.KEYDOWN:
keys = pygame.key.get_pressed()
if keys[pygame.K_COMMA]:
if pensize > 2:
pensize -= 1
if keys[pygame.K_PERIOD]:
if pensize < 15:
pensize += 1
if keys[pygame.K_LCTRL] and keys[pygame.K_z] and lines_list != []:
del lines_list[-1]
if 100 < pos[0] < 1000 and 1 < pos[1] < 800:
if click[0] == 1:
position = (pos[0], pos[1])
if len(points_list) == 0:
points_list.append((pos[0], pos[1]))
if pen_type == "line" or pen_type == "close":
points_list.append(position)
if pen_type == "line":
pygame.draw.lines(screen, pen_colour, False, points_list, pensize)
pygame.draw.circle(screen, pen_colour, position, pensize//3)
elif pen_type == "close":
pygame.draw.lines(screen, pen_colour, True, points_list, pensize)
elif pen_type == "squ":
pygame.draw.rect(screen, pen_colour, (points_list[0],(pos[0]-points_list[0][0], pos[1]-points_list[0][1])), pensize)
if click[0] == 0:
save = False
if points_list != []:
if pen_type == "squ":
points_list.append((pos[0],pos[1]))
lines_list.append([pen_type, pensize, pen_colour, points_list])
points_list = []
button("Clear", 10, 0, 75, but_size, white, grey, action="wipe")
button("-", 10, 25, but_size, but_size, white, grey, action="-")
button("+", 60, 25, but_size, but_size, white, grey, action="+")
button("Undo", 10, 50, 75, but_size, white, grey, action="undo")
button("Fill", 10, bar_y + (but_size * 5), 75, but_size, white, grey, action="fill")
button2("line", 10, bar_y + (but_size * 6), 75, but_size, white, grey, action="line")
button2("closed", 10, bar_y + (but_size * 7), 75, but_size, white, grey, action="close")
button2("square", 10, bar_y + (but_size * 8), 75, but_size, white, grey, action="squ")
col = 0
for x in range(3):
for i in range(3):
button(None, 10 + (but_size * x), 90 + (but_size * i), but_size, but_size, colourchart[col],
colchart_light[col], action=colourchart[col])
col += 1
pygame.draw.rect(screen, white, (40, 25, 25, 25))
button1(bar, bar_y, 75, 2, grey, grey, action="scroll1")
button1(bar, bar_y + sep, 75, 2, grey, grey, action="scroll2")
button1(bar, bar_y + sep*2, 75, 2, grey, grey, action="scroll3")
pygame.draw.rect(screen, grey, [x_scroll1 - 5, bar_y - 12, 10, 24])
pygame.draw.rect(screen, grey, [x_scroll2 - 5, (bar_y + sep) - 12, 10, 24])
pygame.draw.rect(screen, grey, [x_scroll3 - 5, (bar_y + sep*2) - 12, 10, 24])
red_scroll = (255 / 75) * (x_scroll1 - 10)
green_scroll = (255 / 75) * (x_scroll2 - 10)
blue_scroll = (255 / 75) * (x_scroll3 - 10)
colour = (int(red_scroll), int(green_scroll), int(blue_scroll))
for i in range(10, 85):
col_calc = (255 / 85) * (i - 10)
pygame.draw.rect(screen, (col_calc,0,0), [i, bar_y - 21, 1, 6])
pygame.draw.rect(screen, (0,col_calc,0), [i, (bar_y + sep) - 21, 1, 6])
pygame.draw.rect(screen, (0,0,col_calc), [i, (bar_y + sep*2) - 21, 1, 6])
button("", bar,(bar_y+sep*3)-18, 75, but_size, colour, colour, action=colour)
print(lines_list)
message_to_screen(str(pensize), black, 38,26, "tiny")
pygame.display.flip()
clock.tick(fps)
| true
|
b0c08bbdd4b99356480d9d930ad5d12b3e697c26
|
Python
|
Sophie-Williams/GameAI-DoomBot
|
/TF_DoomBot_LDQN/ReplayMemory.py
|
UTF-8
| 2,435
| 2.546875
| 3
|
[] |
no_license
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Apr 22 18:56:04 2018
@author: jk
Paul Murray
This file holds to replymemory class to enable minibatch optimazation
"""
from __future__ import division
from __future__ import print_function
from vizdoom import *
import itertools as it
from random import sample, randint, random, seed
from time import time, sleep
import numpy as np
import skimage.color, skimage.transform
import tensorflow as tf
import matplotlib.pyplot as plt
from tqdm import trange
#my other files
from DoomMain import *
from LinearDeepQNetwork import *
from settings import *
from LearningStep import *
#replay memory class to allow for the use of mini batches
class ReplayMemory:
#initializes memory to replaymemory size
def __init__(self, capacity):
channels = 1
state_shape = (capacity, resolution[0], resolution[1], channels)
self.s1 = np.zeros(state_shape, dtype=np.float32)
self.s2 = np.zeros(state_shape, dtype=np.float32)
self.a = np.zeros(capacity, dtype=np.int32)
self.r = np.zeros(capacity, dtype=np.float32)
self.isterminal = np.zeros(capacity, dtype=np.float32)
self.healths1 = np.zeros(capacity, dtype=np.float32)
self.healths2 = np.zeros(capacity, dtype=np.float32)
self.capacity = capacity
self.size = 0
self.pos = 0
#add a single transition to the replay memory consisting of a begining state
# the action taken, the end state after the action, is this is a terminal action
#the reward from the action and the players health
def add_transition(self, s1, action, s2, isterminal, reward,healths1, healths2):
self.s1[self.pos, :, :, 0] = s1
self.a[self.pos] = action
if not isterminal:
self.s2[self.pos, :, :, 0] = s2
self.isterminal[self.pos] = isterminal
self.r[self.pos] = reward
self.healths1[self.pos] = healths1;
self.healths2[self.pos] = healths2;
self.pos = (self.pos + 1) % self.capacity
self.size = min(self.size + 1, self.capacity)
#return a memory sample to use in mini batch
def get_sample(self, sample_size):
i = sample(range(0, self.size), sample_size)
return self.s1[i], self.a[i], self.s2[i], self.isterminal[i], self.r[i]
def get_last_health(self):
#if(healths1 != healths2)
#not currently in use
return 1;
| true
|
eefe5df76f4125a48d22044328b67fae79f40751
|
Python
|
tottaz/Basic-Python-RESTful-Server
|
/env/lib/python2.7/site-packages/luminoso_api/jstime.py
|
UTF-8
| 464
| 3.265625
| 3
|
[] |
no_license
|
# Load timestamp methods
from datetime import datetime
from time import mktime
def datetime2epoch(dt):
"""Convert a datetime object into milliseconds from epoch"""
return int(mktime(dt.timetuple())*1000)
def epoch2datetime(t):
"""Convert milliseconds from epoch to a local datetime object"""
return datetime.fromtimestamp(t/1000.0)
def epoch():
"""Get the current time in milliseconds from epoch"""
return datetime2epoch(datetime.now())
| true
|
e59d4bd1b37ab1d3cb3cae16d88739aa302f6c53
|
Python
|
SoftwareDeveloper007/Automate-Functional-Tests-for-Web-App-and-Chrome-Extension
|
/Steps/C011.py
|
UTF-8
| 7,060
| 2.796875
| 3
|
[] |
no_license
|
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.support import expected_conditions as EC
from selenium.common.exceptions import NoSuchElementException
import time
''' Delete Collection '''
class C011():
def __init__(self, url, email, password, collection_txt, img_txt, tag_txt):
''' --- Initialize URL, Email, Password --- '''
self.url = 'https://' + url
self.email = email
self.password = password
self.collection_txt = collection_txt
self.img_txt = img_txt
self.tag_txt = tag_txt
def startSteps(self):
pTxt = "\n-------- Step 'C011' started!!! --------------------------------------------------------------------"
print(pTxt)
self.driver = webdriver.Firefox()
self.driver.maximize_window()
''' 1. Navigate to staging.getkumbu.com '''
pTxt = "\n1. Navigate to staging.getkumbu.com\n"
print(pTxt)
try:
self.driver.get(self.url)
pTxt = "\t\t(Success)\tLoad webpage successfully"
print(pTxt)
except:
pTxt = "\t\t(Error)\tFailed to load webpage"
print(pTxt)
self.driver.quit()
return
''' 2. Input email adress: kumbutest@mailinator.com '''
pTxt = "\n2. Input email adress: kumbutest@mailinator.com\n"
print(pTxt)
try:
inputs = WebDriverWait(self.driver, 50).until(
EC.presence_of_all_elements_located((By.CSS_SELECTOR, "input.kumbu-input"))
)
except:
pTxt = "\t\t(Error)\tCan't find 'Email' and 'Password' Inputs"
print(pTxt)
self.driver.quit()
return
try:
email_input = inputs[0]
email_input.send_keys(self.email)
pTxt = "\t\t(Success)\tInput email successfully"
print(pTxt)
except:
pTxt = "\t\t(Error)\tFailed to input 'email'"
print(pTxt)
self.driver.quit()
return
''' 3. Input password: “kumbu is cool” '''
pTxt = "\n3. Input password: 'kumbu is cool'\n"
print(pTxt)
try:
pwd_input = inputs[1]
pwd_input.send_keys(self.password)
pTxt = "\t\t(Success)\tInputted 'Password' successfully"
print(pTxt)
except:
pTxt = "\t\t(Error)\tFailed to input 'Password'"
print(pTxt)
self.driver.quit()
return
''' 4. Click Sign in '''
pTxt = "\n4. Click Sign in\n"
print(pTxt)
try:
submit_btn = WebDriverWait(self.driver, 50).until(
EC.element_to_be_clickable((By.CSS_SELECTOR, "input#login-submit"))
)
submit_btn.click()
pTxt = "\t\t(Success)\tClicked 'Sign in' button. Logged in successfully"
print(pTxt)
except:
pTxt = "\t\t(Error)\tFailed to click 'Sign in' button"
print(pTxt)
self.driver.quit()
return
''' 5. Click on 'Collection for Test $TEST_NUMBER' '''
pTxt = "\n5. Click on 'Collection for Test $TEST_NUMBER'\n"
print(pTxt)
try:
collections = WebDriverWait(self.driver, 50).until(
EC.visibility_of_all_elements_located(
(By.CSS_SELECTOR, "div.collection.columns.small-12.medium-3.text-center"))
)
flag = False
for collection in collections:
if self.collection_txt in collection.text.strip():
flag = True
break
if flag:
try:
collection.click()
pTxt = "\t\t(Success)\tClicked Successfully"
print(pTxt)
except:
pTxt = "\t\t(Error)\tFailed to click"
print(pTxt)
self.driver.quit()
return
else:
pTxt = "\t\t(Failure)\tFailed to click"
print(pTxt)
self.driver.quit()
return
except:
pTxt = "\t\t(Error)\tFailed to click"
print(pTxt)
self.driver.quit()
return
memory_txt = WebDriverWait(self.driver, 50).until(
EC.presence_of_element_located(
(By.CSS_SELECTOR, "p.collection-meta.item-info"))
)
pTxt = "\nNote: '{}' is displayed\n".format(memory_txt.text.strip())
print(pTxt)
''' 6. Click the rightmost menu item '''
pTxt = "\n6. Click the rightmost menu item\n"
print(pTxt)
try:
dropdown_btns = WebDriverWait(self.driver, 50).until(
EC.visibility_of_all_elements_located((By.CSS_SELECTOR, "li.is-dropdown-submenu-parent.opens-left"))
)
actions = ActionChains(self.driver)
actions.move_to_element(dropdown_btns[1]).click().perform()
actions = ActionChains(self.driver)
actions.move_to_element(dropdown_btns[1]).click().perform()
#hover = ActionChains(self.driver).move_to_element(dropdown_btns[1])
#hover.perform()
pTxt = "\t\t(Success)\t"
print(pTxt)
except:
pTxt = "\t\t(Error)\t"
print(pTxt)
self.driver.quit()
return
''' 7. Click Delete Collection '''
pTxt = "\n7. Click Delete Collection\n"
print(pTxt)
try:
delete_collection_btn = WebDriverWait(dropdown_btns[1], 50).until(
EC.element_to_be_clickable((By.CSS_SELECTOR, "ul.menu.submenu.is-dropdown-submenu.first-sub.vertical > li > a"))
)
delete_collection_btn.click()
pTxt = "\t\t(Success)\t"
print(pTxt)
except:
pTxt = "\t\t(Error)\t"
print(pTxt)
self.driver.quit()
return
''' 8. Click Delete Collection '''
pTxt = "\n8. Click Delete Collection\n"
print(pTxt)
try:
delete_collection_btn = WebDriverWait(self.driver, 50).until(
EC.presence_of_element_located(
(By.CSS_SELECTOR, "div.collection-delete-options > a"))
)
delete_collection_btn.click()
pTxt = "\t\t(Success)\t"
print(pTxt)
except:
pTxt = "\t\t(Error)\t"
print(pTxt)
self.driver.quit()
return
self.driver.quit()
return
if __name__ == '__main__':
app = C011(url='staging.getkumbu.com', email='kumbutest@mailinator.com', password='kumbu is cool',
collection_txt='New Collection', img_txt='Test Image 6', tag_txt='Test Tag 6')
app.startSteps()
| true
|
3fc5d494421745e5d6b428271db091a77d05f1c0
|
Python
|
jessicagainesbmi203/Final_Project_Skeleton
|
/scripts/NN.py
|
UTF-8
| 5,363
| 2.671875
| 3
|
[] |
no_license
|
import numpy as np
class NeuralNetwork:
#def __init__(self, setup=[[68,25,"sigmoid",0],[25,1,"sigmoid",0]],lr=.05,seed=1,error_rate=0,bias=1,iter=500,lamba=.00001,simple=0):
def __init__(self,inputs,outputs,activation='sigmoid',lr=0.05,bias=1,iter=500,lamda=0.00001,shape=(8,3,8)):
self.activation = 'sigmoid'
self.lr = lr
self.init_bias = bias
self.shape=shape
self.weights = dict()
self.weight_correction = dict()
self.biases = dict()
self.bias_correction = dict()
self.lamda = lamda
self.z = dict()
self.a = dict()
self.inputs = inputs
self.outputs = outputs
self.iter = iter
def make_weights(self):
self.a[1] = self.inputs
for layer in range(1,len(self.shape),1):
# initialize weights to random values for each layer
weight_matrix = np.zeros((self.shape[layer-1],self.shape[layer]))
weight_correction_matrix = np.zeros((self.shape[layer-1],self.shape[layer]))
for i in range(weight_matrix.shape[0]):
for j in range(weight_matrix.shape[1]):
weight_matrix[i,j] = np.random.random() * 0.1
self.weights[layer] = weight_matrix
self.weight_correction[layer] = weight_correction_matrix
# initialize biases to the input value
self.a[layer+1] = np.zeros((self.a.get(layer).shape[0],weight_matrix.shape[1]))
bias_matrix = np.full((self.a.get(layer).shape[0],weight_matrix.shape[1]),fill_value=self.init_bias)
bias_correction_matrix = np.zeros((self.a.get(layer).shape[0],weight_matrix.shape[1]))
self.biases[layer] = bias_matrix
self.bias_correction[layer] = bias_correction_matrix
print('starting weights')
print(self.weights)
def feedforward(self):
for layer in range(1,len(self.shape),1):
z = np.dot(self.a.get(layer),self.weights.get(layer)) + self.biases.get(layer)
self.z[layer+1] = z
a = np.zeros((z.shape[0],z.shape[1]))
for i in range(a.shape[0]):
for j in range(a.shape[1]):
a[i,j] = activation(z[i,j], self.activation)
self.a[layer+1] = a
print('a')
print(self.a)
def backprop(self):
deltas = dict()
last_layer = len(self.shape)
f_prime_z = np.zeros((self.z.get(last_layer).shape[0],self.z.get(last_layer).shape[1]))
z = np.dot(np.transpose(self.weights.get(last_layer-1)),np.transpose(self.a.get(last_layer-1)))
for i in range(f_prime_z.shape[0]):
for j in range(f_prime_z.shape[1]):
f_prime_z[i,j] = der_activation(z[i,j],self.activation)
print('error')
print(self.a.get(last_layer) - self.outputs)
delta_output = np.multiply(np.transpose(self.a.get(last_layer) - self.outputs),f_prime_z)
print('weighted error')
print(delta_output)
deltas[last_layer] = delta_output
# gradient of cost function for hidden layers (all but first and last)
for layer in range(len(self.shape)-1,1,-1):
print(layer)
z = np.dot(np.transpose(self.weights.get(layer-1)), np.transpose(self.a.get(layer-1)))
f_prime_z = np.zeros((z.shape[0],z.shape[1]))
for i in range(f_prime_z.shape[0]):
for j in range(f_prime_z.shape[1]):
f_prime_z[i,j] = der_activation(z[i,j],self.activation)
delta_layer = np.multiply((np.dot(self.weights.get(layer),deltas[layer+1])),f_prime_z)
deltas[layer] = delta_layer
print('delta hidden layer')
print(delta_layer)
for layer in range(1,len(self.shape),1):
gradient_W = np.transpose(np.dot(deltas.get(layer+1),self.a.get(layer)))
print('gradient_W')
print(gradient_W)
gradient_b = np.transpose(deltas[layer+1])
print('gradient_b')
print(gradient_b)
self.weight_correction[layer] = self.weight_correction.get(layer) + gradient_W
self.bias_correction[layer] = self.bias_correction.get(layer) + gradient_b
print('weight_correction')
print(self.weight_correction)
print('bias_correction')
print(self.bias_correction)
m = self.inputs.shape[0]
new_weights = self.weights.get(layer) + self.lr * (((1/m) * self.weight_correction[layer])+ self.lamda * self.weights[layer])
new_biases = self.biases[layer] + self.lr * ((1/m) * self.bias_correction[layer])
self.weights[layer] = new_weights
self.biases[layer] = new_biases
def fit(self):
self.make_weights()
for i in range(self.iter):
self.feedforward()
self.backprop()
print('final weights')
print(self.weights)
def predict(self):
self.feedforward()
print('predicted outputs')
print(self.a)
def activation(x,type):
if type == 'sigmoid':
# sigmoid activation function
return (1 / (1 + np.exp(-x)))
def der_activation(x,type):
if type == 'sigmoid':
return (activation(x,'sigmoid')*(1-activation(x,'sigmoid')))
| true
|
0af8fcbb15cec7f5661675e4fafc96948418e7e5
|
Python
|
pints-team/pints
|
/pints/toy/_beeler_reuter_model.py
|
UTF-8
| 8,168
| 2.875
| 3
|
[
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause"
] |
permissive
|
#
# Beeler-Reuter model for mammalian ventricular action potential.
#
# This file is part of PINTS (https://github.com/pints-team/pints/) which is
# released under the BSD 3-clause license. See accompanying LICENSE.md for
# copyright notice and full license details.
#
import numpy as np
import pints
import scipy.integrate
from . import ToyModel
class ActionPotentialModel(pints.ForwardModel, ToyModel):
"""
The 1977 Beeler-Reuter model of the mammalian ventricular action potential
(AP).
This model is written as an ODE with 8 states and several intermediary
variables: for the full model equations, please see the original paper
[1]_.
The model contains 5 ionic currents, each described by a sub-model with
several kinetic parameters, and a maximum conductance parameter that
determines its magnitude.
Only the 5 conductance parameters are varied in this :class:`ToyModel`, all
other parameters are fixed and assumed to be known.
To aid in inference, a parameter transformation is used: instead of
specifying the maximum conductances directly, their natural logarithm
should be used.
In other words, the parameter vector passed to :meth:`simulate()` should
contain the logarithm of the five conductances.
As outputs, we use the AP and the calcium transient, as these are the only
two states (out of the total of eight) with a physically observable
counterpart.
This makes this a fairly hard problem.
Extends :class:`pints.ForwardModel`, :class:`pints.toy.ToyModel`.
Parameters
----------
y0
The initial state of the observables ``V`` and ``Ca_i``, where
``Ca_i`` must be 0 or greater.
If not given, the defaults are -84.622 and 2e-7.
References
----------
.. [1] Reconstruction of the action potential of ventricular myocardial
fibres. Beeler, Reuter (1977) Journal of Physiology
https://doi.org/10.1113/jphysiol.1977.sp011853
"""
def __init__(self, y0=None):
if y0 is None:
self.set_initial_conditions([-84.622, 2e-7])
else:
self.set_initial_conditions(y0)
# Initial condition for non-observable states
self._m0 = 0.01
self._h0 = 0.99
self._j0 = 0.98
self._d0 = 0.003
self._f0 = 0.99
self._x10 = 0.0004
# membrane capacitance, in uF/cm^2
self._C_m = 1.0
# Nernst reversal potentials, in mV
self._E_Na = 50.0
# Stimulus current
self._I_Stim_amp = 25.0
self._I_Stim_period = 1000.0
self._I_Stim_length = 2.0
# Solver tolerances
self.set_solver_tolerances()
def initial_conditions(self):
"""
Returns the initial conditions of this model.
"""
return [self._v0, self._cai0]
def n_outputs(self):
""" See :meth:`pints.ForwardModel.n_outputs()`. """
# membrane voltage and calcium concentration
return 2
def n_parameters(self):
""" See :meth:`pints.ForwardModel.n_parameters()`. """
# 5 conductance values
return 5
def _rhs(self, states, time, parameters):
"""
Right-hand side equation of the ode to solve.
"""
# Set-up
V, Cai, m, h, j, d, f, x1 = states
gNaBar, gNaC, gCaBar, gK1Bar, gx1Bar = np.exp(parameters)
# Equations
# INa
INa = (gNaBar * m**3 * h * j + gNaC) * (V - self._E_Na)
alpha = (V + 47) / (1 - np.exp(-0.1 * (V + 47)))
beta = 40 * np.exp(-0.056 * (V + 72))
dmdt = alpha * (1 - m) - beta * m
alpha = 0.126 * np.exp(-0.25 * (V + 77))
beta = 1.7 / (1 + np.exp(-0.082 * (V + 22.5)))
dhdt = alpha * (1 - h) - beta * h
alpha = 0.055 * np.exp(-0.25 * (V + 78)) \
/ (1 + np.exp(-0.2 * (V + 78)))
beta = 0.3 / (1 + np.exp(-0.1 * (V + 32)))
djdt = alpha * (1 - j) - beta * j
# ICa
E_Ca = -82.3 - 13.0287 * np.log(Cai)
ICa = gCaBar * d * f * (V - E_Ca)
alpha = 0.095 * np.exp(-0.01 * (V + -5)) \
/ (np.exp(-0.072 * (V + -5)) + 1)
beta = 0.07 * np.exp(-0.017 * (V + 44)) \
/ (np.exp(0.05 * (V + 44)) + 1)
dddt = alpha * (1 - d) - beta * d
alpha = 0.012 * np.exp(-0.008 * (V + 28)) \
/ (np.exp(0.15 * (V + 28)) + 1)
beta = 0.0065 * np.exp(-0.02 * (V + 30)) \
/ (np.exp(-0.2 * (V + 30)) + 1)
dfdt = alpha * (1 - f) - beta * f
# Cai
dCaidt = -1e-7 * ICa + 0.07 * (1e-7 - Cai)
# IK1
IK1 = gK1Bar * (
4 * (np.exp(0.04 * (V + 85)) - 1)
/ (np.exp(0.08 * (V + 53)) + np.exp(0.04 * (V + 53)))
+ 0.2 * (V + 23)
/ (1 - np.exp(-0.04 * (V + 23)))
)
# IX1
Ix1 = gx1Bar * x1 * (np.exp(0.04 * (V + 77)) - 1) \
/ np.exp(0.04 * (V + 35))
alpha = 0.0005 * np.exp(0.083 * (V + 50)) \
/ (np.exp(0.057 * (V + 50)) + 1)
beta = 0.0013 * np.exp(-0.06 * (V + 20)) \
/ (np.exp(-0.04 * (V + 333)) + 1)
dx1dt = alpha * (1 - x1) - beta * x1
# I_Stim
if (time % self._I_Stim_period) < self._I_Stim_length:
IStim = self._I_Stim_amp
else:
IStim = 0
# V
dVdt = -(1 / self._C_m) * (IK1 + Ix1 + INa + ICa - IStim)
# Output
output = np.array([dVdt,
dCaidt,
dmdt,
dhdt,
djdt,
dddt,
dfdt,
dx1dt])
return output
def set_initial_conditions(self, y0):
"""
Changes the initial conditions for this model.
"""
if y0[1] < 0:
raise ValueError('Initial condition of ``cai`` cannot be'
' negative.')
self._v0 = y0[0]
self._cai0 = y0[1]
def set_solver_tolerances(self, rtol=1e-4, atol=1e-6):
"""
Updates the solver tolerances.
See: https://docs.scipy.org/doc/scipy/reference/generated/scipy.integrate.odeint.html
""" # noqa
self._rtol = float(rtol)
self._atol = float(atol)
def simulate(self, parameters, times):
""" See :meth:`pints.ForwardModel.simulate()`. """
y0 = [self._v0,
self._cai0,
self._m0,
self._h0,
self._j0,
self._d0,
self._f0,
self._x10]
solved_states = scipy.integrate.odeint(
self._rhs, y0, times, args=(parameters,), hmax=self._I_Stim_length,
rtol=self._rtol, atol=self._atol)
# Only return the observable (V, Cai)
return solved_states[:, 0:2]
def simulate_all_states(self, parameters, times):
"""
Runs a simulation and returns all state variables, including the ones
that do no have a physically observable counterpart.
"""
y0 = [self._v0,
self._cai0,
self._m0,
self._h0,
self._j0,
self._d0,
self._f0,
self._x10]
solved_states = scipy.integrate.odeint(
self._rhs, y0, times, args=(parameters,), hmax=self._I_Stim_length,
rtol=self._rtol, atol=self._atol)
# Return all states
return solved_states
def suggested_parameters(self):
"""
Returns suggested parameters for this model.
The returned vector is already log-transformed, and can be passed
directly to :meth:`simulate`.
See :meth:`pints.toy.ToyModel.suggested_parameters()`.
"""
# maximum conducances, in mS/cm^2
g_Na = 4.0
g_NaC = 0.003
g_Ca = 0.09
g_K1 = 0.35
g_x1 = 0.8
return np.log([g_Na, g_NaC, g_Ca, g_K1, g_x1])
def suggested_times(self):
""" See :meth:`pints.toy.ToyModel.suggested_times()`. """
return np.arange(0, 400, 0.5)
| true
|
0b6ea6aa676345a0fd148c998945b8ba71955f1a
|
Python
|
xuefenga616/mygit
|
/ML_stu/Kaggle/CatVsDog/numpy_test.py
|
UTF-8
| 613
| 3.296875
| 3
|
[] |
no_license
|
import tensorflow as tf
import numpy as np
# a = tf.random_normal((100, 100))
# b = tf.random_normal((100, 500))
# c = tf.matmul(a, b)
# sess = tf.InteractiveSession()
# print(sess.run(c))
import matplotlib.pyplot as plt
x = np.arange(0., np.e, 0.01)
y1 = np.exp(-x)
y2 = np.log(x)
fig = plt.figure()
ax1 = fig.add_subplot(111)
ax1.plot(x, y1, "b", label="Training score")
ax1.set_ylabel('Score')
ax1.set_title("Learning Curves (Tensorflow )")
ax2 = ax1.twinx() # this is the important function
ax2.plot(x, y2, 'r', label="Cross-validation score")
ax2.set_xlim([0, np.e])
plt.legend(loc="best")
plt.show()
| true
|
fc5a455d6749c0d8bce1776acb53968fc3bc2071
|
Python
|
lanstonpeng/Squirrel
|
/lanstonpeng/Distinct_Subsequences_sub_problem.py
|
UTF-8
| 553
| 2.921875
| 3
|
[] |
no_license
|
import pdb
S = "rabbbit"
T = "rabbit"
result = []
def combination(s,n):
temp = ""
for i in range(1,len(s)):
temp = temp + combination(s[i:],n - i)
if n == 0:
result.append(temp)
return ""
else:
return temp
t = []
r = []
def combination2(start,end):
#pdb.set_trace()
if start == end:
r.append("".join(t))
return
for i in range(start,end):
t.append(S[i])
combination2(start + 1,end)
t.pop()
#combination(S,len(S) - 1)
combination2(0,len(S))
print r
| true
|
194de15022e3d667b125bc2a38641e773bb38c17
|
Python
|
wachira90/python-ssl
|
/check-ssl-expire.py
|
UTF-8
| 681
| 2.640625
| 3
|
[] |
no_license
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import OpenSSL
import ssl, socket
import argparse
# get domain
parser = argparse.ArgumentParser()
parser.add_argument("domain")
args = parser.parse_args()
domain = args.domain
# get SSL Cert info
cert = ssl.get_server_certificate((domain, 443))
x509 = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM, cert)
x509info = x509.get_notAfter()
exp_day = x509info[6:8].decode('utf-8')
exp_month = x509info[4:6].decode('utf-8')
exp_year = x509info[:4].decode('utf-8')
exp_date = str(exp_day) + "-" + str(exp_month) + "-" + str(exp_year)
print("SSL Certificate for domain", domain, "will be expired on (DD-MM-YYYY)", exp_date)
| true
|
bf91e997b76e577f6f8c536c87e2b90ef371ce3a
|
Python
|
pandeconscious/leetcode
|
/course_schedule/course_schedule.py
|
UTF-8
| 1,015
| 3.234375
| 3
|
[] |
no_license
|
class Solution(object):
def _cycle(self, node):
if self.visited[node] == 0:
self.visited[node] = 1
for nbr in self.adj_list[node]:
if self.visited[nbr] == 0:
if self._cycle(nbr):
return True
elif self.visited[nbr] == 1:
return True
self.visited[node] = 2
return False
def canFinish(self, numCourses, prerequisites):
"""
:type numCourses: int
:type prerequisites: List[List[int]]
:rtype: bool
"""
self.adj_list = []
self.visited = []
self.n = numCourses
for _ in xrange(self.n):
self.adj_list.append([])
self.visited.append(0)
for pair in prerequisites:
self.adj_list[pair[1]].append(pair[0])
for node in xrange(self.n):
if self.visited[node] == 0:
if self._cycle(node):
return False
return True
| true
|
79056ae5955608b392a6a92b9fb23993e9393e43
|
Python
|
charleschestnut/PracticasAII
|
/EXAMEN-WHOOSH/Lisa-Code.py
|
UTF-8
| 5,089
| 2.6875
| 3
|
[] |
no_license
|
'''
Created on 19.11.2018
@author: Lisa
'''
'''
Created on 19.11.2018
@author: Lisa
'''
from tkinter import *
from tkinter import messagebox
import os
from whoosh.index import create_in,open_dir
from whoosh.fields import Schema, TEXT, KEYWORD, DATETIME, NUMERIC
from whoosh.qparser import QueryParser
import urllib.request
from bs4 import BeautifulSoup
import datetime
from _datetime import date
dirindextemas="Index"
#return Schema
def get_schema():
return Schema(titulo=TEXT(stored=True), description = TEXT (stored=True), categoria=TEXT(stored=True), fecha=DATETIME(stored=True))
def extractXML():
return None
#Crea un indice desde los documentos contenidos en dirdocs
#El indice lo crea en un directorio (dirindex)
def index():
if not os.path.exists(dirindextemas):
os.mkdir(dirindextemas)
ix = create_in(dirindextemas, schema=get_schema())
writer = ix.writer()
# Extraemos los datos usando BeautifulSoup
#Missing
l = extractXML()
i=0
#Todo: Extract the attributes
#for item in l:
#writer.add_document(titulo = titulo, antetitulo = antetitulo, link = link, description= description, fecha = fecha)
#i+=1
writer.add_document(titulo = "test", description = "test", categoria = "test cag", fecha = datetime.datetime.now())
messagebox.showinfo("Temas indexados", "Se han indexado "+str(i)+ " temas")
writer.commit()
def searchByDate():
def mostrar_lista(event):
lb.delete(0,END) #borra toda la lista
ix=open_dir(dirindextemas)
with ix.searcher() as searcher:
#Change names of attribute in which you will search
query = QueryParser("fecha", ix.schema).parse(str(en.get()))
results = searcher.search(query)
for r in results:
lb.insert(END,r['titulo'])
lb.insert(END,r['fecha'])
lb.insert(END,'')
v = Toplevel()
v.title("Búsqueda por autor")
f =Frame(v)
f.pack(side=TOP)
l = Label(f, text="Introduzca un autor:")
l.pack(side=LEFT)
en = Entry(f)
en.bind("<Return>", mostrar_lista)
en.pack(side=LEFT)
sc = Scrollbar(v)
sc.pack(side=RIGHT, fill=Y)
lb = Listbox(v, yscrollcommand=sc.set)
lb.pack(side=BOTTOM, fill = BOTH)
sc.config(command = lb.yview)
def searchByTitle():
def mostrar_lista(event):
lb.delete(0,END) #borra toda la lista
ix=open_dir(dirindextemas)
with ix.searcher() as searcher:
query = QueryParser("titulo", ix.schema).parse("%"+str(en.get())+"%")
results = searcher.search(query)
for r in results:
#Change names of attributes
lb.insert(END,r['titulo'])
lb.insert(END,r['categoria'])
lb.insert(END,'')
v = Toplevel()
v.title("Búsqueda por títulos")
f =Frame(v)
f.pack(side=TOP)
l = Label(f, text="Introduzca una palabra:")
l.pack(side=LEFT)
en = Entry(f)
en.bind("<Return>", mostrar_lista)
en.pack(side=LEFT)
sc = Scrollbar(v)
sc.pack(side=RIGHT, fill=Y)
lb = Listbox(v, yscrollcommand=sc.set)
lb.pack(side=BOTTOM, fill = BOTH)
sc.config(command = lb.yview)
def searchByCategory():
def mostrar_lista(event):
lb.delete(0,END) #borra toda la lista
ix=open_dir(dirindextemas)
with ix.searcher() as searcher:
query = QueryParser("categoria", ix.schema).parse("%"+str(w.get())+"%")
results = searcher.search(query)
for r in results:
#Change names of attributes
lb.insert(END,r['titulo'])
lb.insert(END,r['fecha'])
lb.insert(END,'')
v = Toplevel()
v.title("Búsqueda por títulos")
f =Frame(v)
f.pack(side=TOP)
l = Label(f, text="Introduzca una palabra:")
l.pack(side=LEFT)
sc = Scrollbar(v)
sc.pack(side=RIGHT, fill=Y)
lb = Listbox(v, yscrollcommand=sc.set)
lb.pack(side=BOTTOM, fill = BOTH)
sc.config(command = lb.yview)
values=[]
#Todo fill Spinbox
for i in "":
values.append(i)
w = Spinbox(v, values=(values))
button = Button(v, text="Search!", command=mostrar_lista)
w.pack(side=LEFT)
button.pack(side=LEFT)
def create():
root = Tk()
menubar = Menu(root)
firstmenu = Menu(menubar, tearoff=0)
firstmenu.add_command(label="Cargar", command=index)
firstmenu.add_command(label="Exit", command=root.destroy)
menubar.add_cascade(label="Datos", menu=firstmenu)
seccondmenu = Menu(menubar, tearoff=0)
seccondmenu.add_command(label="Titulo y Descripcion", command=searchByTitle)
seccondmenu.add_command(label="Fecha", command= searchByDate)
seccondmenu.add_command(label="Categoria", command=searchByCategory)
menubar.add_cascade(label="Buscar", menu=seccondmenu)
root.config(menu=menubar)
root.mainloop()
if __name__ == '__main__':
create()
| true
|
4adc7865cf5223b1cee9b7c2aceb28871247b3a1
|
Python
|
Sridhar-R/Python-basic-programs
|
/ques4.py
|
UTF-8
| 233
| 3.515625
| 4
|
[] |
no_license
|
a = input("Enter the number between 1 and 20 : " )
b = 0
def diction():
d=dict()
for i in range(1,21):
d[i]=i**2
print d[i]
if (a < 21):
diction()
else:
print "Please Enter the number between 1 and 20 "
| true
|
cfc6f5ce716347b700be0a9c2c7b22d354cbada2
|
Python
|
alexandraback/datacollection
|
/solutions_2453486_0/Python/ymgve/prog.py
|
UTF-8
| 1,604
| 2.828125
| 3
|
[] |
no_license
|
import sys
import psyco; psyco.full()
def main():
f = open(sys.argv[1], "rb")
ncases = int(f.readline())
for i in xrange(ncases):
s = ""
for j in xrange(4):
s += f.readline().strip()
f.readline()
if len(s) != 16:
raise "WTF not proper board"
if "." in s:
complete = False
else:
complete = True
arr = [0,4, 1,4, 2,4, 3,4, 0,1, 4,1, 8,1, 12,1, 0,5, 3,3]
xwon = False
owon = False
for j in xrange(0, 10*2, 2):
xcount = 0
ocount = 0
tcount = 0
pos = arr[j]
for k in xrange(4):
if s[pos] == "X":
xcount += 1
elif s[pos] == "O":
ocount += 1
elif s[pos] == "T":
tcount += 1
pos += arr[j+1]
if xcount + tcount == 4:
xwon = True
if ocount + tcount == 4:
owon = True
if complete:
outs = "Draw"
else:
outs = "Game has not completed"
if xwon and not owon:
outs = "X won"
if owon and not xwon:
outs = "O won"
if xwon and owon:
outs = "Draw"
print "Case #%d: %s" % (i+1, outs)
main()
| true
|
d498a81c79188de4bc4eb4e6682f25215dad2fff
|
Python
|
emilberwald/mathematics
|
/tests/number_theory/test_combinatorics.py
|
UTF-8
| 7,636
| 3.09375
| 3
|
[] |
no_license
|
import itertools
from math import gamma
import numpy as np
import pytest
from pytest import raises
from mathematics.number_theory.combinatorics import *
from mathematics.tools.decorators import timeout
from .. import name_func
class TestRiffleShuffles:
def test_riffle_shuffles(self):
"""
https://en.wikipedia.org/wiki/Shuffle_algebra#Shuffle_product
"""
def expected_nof_shuffles(m, n):
return int(gamma(len(m) + len(n) + 1) / (gamma(len(m) + 1) * gamma(len(n) + 1)))
m = "ab"
n = "xy"
m_w_n = riffle_shuffles(m, n)
assert "abxy" in m_w_n
assert "axby" in m_w_n
assert "xaby" in m_w_n
assert "axyb" in m_w_n
assert "xayb" in m_w_n
assert "xyab" in m_w_n
assert len(m_w_n) == expected_nof_shuffles(m, n)
m = "012"
n = "34"
m_w_n = riffle_shuffles(m, n)
assert len(m_w_n) == expected_nof_shuffles(m, n)
class TestPermutation:
@timeout(handler=lambda: pytest.skip("timeout"), seconds=1)
def test_equivalence_of_parity_methods(self):
permutation = np.random.permutation(range(0, 10))
parities = [parity(permutation, method) for method in list(ParityMethod)]
assert len(set(parities)) == 1
@pytest.mark.parametrize(
"method,permutation,desired",
[
(method, permutation, desired)
for method in list(ParityMethod)
for permutation, desired in zip([range(0, 10), [3, 4, 5, 2, 1]], [1, -1])
],
ids=name_func,
)
@timeout(handler=lambda: pytest.skip("timeout"), seconds=1)
def test_parity_methods(self, method, permutation, desired):
assert parity(permutation, method) == desired
@pytest.mark.parametrize(
"permutation,desired",
[
([1, 2, 3, 4], [0, 0, 0, 0]),
([2, 1, 3, 4], [1, 0, 0, 0]),
([1, 3, 2, 4], [0, 1, 0, 0]),
([3, 1, 2, 4], [1, 1, 0, 0]),
([2, 3, 1, 4], [2, 0, 0, 0]),
([3, 2, 1, 4], [2, 1, 0, 0]),
([1, 2, 4, 3], [0, 0, 1, 0]),
([2, 1, 4, 3], [1, 0, 1, 0]),
([1, 4, 2, 3], [0, 1, 1, 0]),
([4, 1, 2, 3], [1, 1, 1, 0]),
([2, 4, 1, 3], [2, 0, 1, 0]),
([4, 2, 1, 3], [2, 1, 1, 0]),
([1, 3, 4, 2], [0, 2, 0, 0]),
([3, 1, 4, 2], [1, 2, 0, 0]),
([1, 4, 3, 2], [0, 2, 1, 0]),
([4, 1, 3, 2], [1, 2, 1, 0]),
([3, 4, 1, 2], [2, 2, 0, 0]),
([4, 3, 1, 2], [2, 2, 1, 0]),
([2, 3, 4, 1], [3, 0, 0, 0]),
([3, 2, 4, 1], [3, 1, 0, 0]),
([2, 4, 3, 1], [3, 0, 1, 0]),
([4, 2, 3, 1], [3, 1, 1, 0]),
([3, 4, 2, 1], [3, 2, 0, 0]),
([4, 3, 2, 1], [3, 2, 1, 0]),
],
ids=name_func,
) # https://en.wikipedia.org/wiki/Inversion_(discrete_mathematics)
@timeout(handler=lambda: pytest.skip("timeout"), seconds=1)
def test_inversion_vector(self, permutation, desired):
assert inversion_vector(permutation) == desired
@pytest.mark.parametrize(
"permutation,desired",
[
([1, 2, 3, 4], [0, 0, 0, 0]),
([2, 1, 3, 4], [0, 1, 0, 0]),
([1, 3, 2, 4], [0, 0, 1, 0]),
([3, 1, 2, 4], [0, 1, 1, 0]),
([2, 3, 1, 4], [0, 0, 2, 0]),
([3, 2, 1, 4], [0, 1, 2, 0]),
([1, 2, 4, 3], [0, 0, 0, 1]),
([2, 1, 4, 3], [0, 1, 0, 1]),
([1, 4, 2, 3], [0, 0, 1, 1]),
([4, 1, 2, 3], [0, 1, 1, 1]),
([2, 4, 1, 3], [0, 0, 2, 1]),
([4, 2, 1, 3], [0, 1, 2, 1]),
([1, 3, 4, 2], [0, 0, 0, 2]),
([3, 1, 4, 2], [0, 1, 0, 2]),
([1, 4, 3, 2], [0, 0, 1, 2]),
([4, 1, 3, 2], [0, 1, 1, 2]),
([3, 4, 1, 2], [0, 0, 2, 2]),
([4, 3, 1, 2], [0, 1, 2, 2]),
([2, 3, 4, 1], [0, 0, 0, 3]),
([3, 2, 4, 1], [0, 1, 0, 3]),
([2, 4, 3, 1], [0, 0, 1, 3]),
([4, 2, 3, 1], [0, 1, 1, 3]),
([3, 4, 2, 1], [0, 0, 2, 3]),
([4, 3, 2, 1], [0, 1, 2, 3]),
],
ids=name_func,
) # https://en.wikipedia.org/wiki/Inversion_(discrete_mathematics)
@timeout(handler=lambda: pytest.skip("timeout"), seconds=1)
def test_left_inversion_count(self, permutation, desired):
assert left_inversion_count(permutation) == desired
@pytest.mark.parametrize(
"permutation,desired",
[
([1, 2, 3, 4], [0, 0, 0, 0]),
([2, 1, 3, 4], [1, 0, 0, 0]),
([1, 3, 2, 4], [0, 1, 0, 0]),
([3, 1, 2, 4], [2, 0, 0, 0]),
([2, 3, 1, 4], [1, 1, 0, 0]),
([3, 2, 1, 4], [2, 1, 0, 0]),
([1, 2, 4, 3], [0, 0, 1, 0]),
([2, 1, 4, 3], [1, 0, 1, 0]),
([1, 4, 2, 3], [0, 2, 0, 0]),
([4, 1, 2, 3], [3, 0, 0, 0]),
([2, 4, 1, 3], [1, 2, 0, 0]),
([4, 2, 1, 3], [3, 1, 0, 0]),
([1, 3, 4, 2], [0, 1, 1, 0]),
([3, 1, 4, 2], [2, 0, 1, 0]),
([1, 4, 3, 2], [0, 2, 1, 0]),
([4, 1, 3, 2], [3, 0, 1, 0]),
([3, 4, 1, 2], [2, 2, 0, 0]),
([4, 3, 1, 2], [3, 2, 0, 0]),
([2, 3, 4, 1], [1, 1, 1, 0]),
([3, 2, 4, 1], [2, 1, 1, 0]),
([2, 4, 3, 1], [1, 2, 1, 0]),
([4, 2, 3, 1], [3, 1, 1, 0]),
([3, 4, 2, 1], [2, 2, 1, 0]),
([4, 3, 2, 1], [3, 2, 1, 0]),
],
ids=name_func,
) # https://en.wikipedia.org/wiki/Inversion_(discrete_mathematics)
@timeout(handler=lambda: pytest.skip("timeout"), seconds=1)
def test_right_inversion_count(self, permutation, desired):
assert right_inversion_count(permutation) == desired
@pytest.mark.parametrize(
"method,not_permutation",
itertools.product(
[permutation_to_adjacent_transpositions, permutation_to_cycles, permutation_to_transpositions,],
[[7, 13, 4, 5]],
),
ids=name_func,
)
@timeout(handler=lambda: pytest.skip("timeout"), seconds=1)
def test_nontcontiguous_sequence_raises_exception(self, method, not_permutation):
with raises(IndexError):
method(not_permutation)
@pytest.mark.parametrize(
"permutation, desired",
[
([2, 5, 4, 3, 1], {(1, 2, 5), (3, 4)}),
([4, 2, 7, 6, 5, 8, 1, 3], {(1, 4, 6, 8, 3, 7), (2,), (5,)}),
([4, 5, 7, 6, 8, 2, 1, 3], {(1, 4, 6, 2, 5, 8, 3, 7)}),
],
ids=name_func,
)
@timeout(handler=lambda: pytest.skip("timeout"), seconds=1)
def test_permutation_to_cycle(self, permutation, desired):
# assumes canonical order with min first is used
actual = permutation_to_cycles(permutation)
assert actual == desired
@pytest.mark.parametrize(
"permutation,desired",
[
([1, 2, 3, 4], set()),
([2, 1, 3, 4], {(1, 2)}),
([3, 2, 1, 4], {(1, 3)}),
([4, 2, 3, 1], {(1, 4)}),
([1, 3, 2, 4], {(2, 3)}),
([1, 4, 3, 2], {(2, 4)}),
([1, 2, 4, 3], {(3, 4)}),
([2, 1, 4, 3], {(1, 2), (3, 4)}),
],
ids=name_func,
)
@timeout(handler=lambda: pytest.skip("timeout"), seconds=1)
def test_permutation_to_transpositions(self, permutation, desired):
# assumes canonical order with min first is used
actual = permutation_to_transpositions(permutation)
assert actual == desired
| true
|
ba71163091e266132dcfd586e6b7ce8f5b514385
|
Python
|
alextimofeev272/rosalind
|
/PROT.py
|
UTF-8
| 1,205
| 2.609375
| 3
|
[] |
no_license
|
#coding: utf_8
#Дана цепочка РНК, мРНК(матричная РНК)
#Построить протеин
a = ('UUU','CUU','AUU','GUU','UUC','CUC','AUC','GUC','UUA','CUA','AUA','GUA')
a = a + ('UUG','CUG','AUG','GUG','UCU','CCU','ACU','GCU','UCC','CCC','ACC','GCC')
a = a + ('UCA','CCA','ACA','GCA','UCG','CCG','ACG','GCG','UAU','CAU','AAU','GAU')
a = a + ('UAC','CAC','AAC','GAC','UAA','CAA','AAA','GAA','UAG','CAG','AAG','GAG')
a = a + ('UGU','CGU','AGU','GGU','UGC','CGC','AGC','GGC','UGA','CGA','AGA','GGA')
a = a + ('UGG','CGG','AGG','GGG')
b = ('F','L','I','V','F','L','I','V','L','L','I','V')
b = b + ('L','L','M','V','S','P','T','A','S','P','T','A')
b = b + ('S','P','T','A','S','P','T','A','Y','H','N','D')
b = b + ('Y','H','N','D','0','Q','K','E','0','Q','K','E')
b = b + ('C','R','S','G','C','R','S','G','0','R','R','G')
b = b + ('W','R','R','G')
file = open('D:\python\input.txt')
file2 = open('D:\python\output.txt','w')
text = file.read()
n = len(text)
d = ''
for i in range(0,n,3):
f = text[i:i+3]
for j in range(64):
if f == a[j]:
d = d + b[j]
d = d.replace('0','\n\n')
print d
file2.write(d)
print n
file.close()
file2.close()
| true
|
cab2311fe58ae58fc507296d0865284183ddb8a7
|
Python
|
L-ingqin12/LanQiaoCup-Python
|
/src/2 基础练习/基础练习 十六进制转十进制.py
|
UTF-8
| 361
| 3.21875
| 3
|
[] |
no_license
|
#第一种无脑函数法
'''
print(int(eval('0x'+input())))
'''
#第二种要自己写进制转换的算法(这个也A了...)
def sixteen2ten(n):
ten = 0
for i in range(len(n)):
c = n[-i-1]
if c.isalpha():
c = ord(c)-55
else:
c = int(c)
ten += c*16**i
return ten
print(sixteen2ten(input()))
| true
|
ba427af0d390f07dcd9313b63e402f1e1097731a
|
Python
|
Aasthaengg/IBMdataset
|
/Python_codes/p02861/s399988311.py
|
UTF-8
| 233
| 2.921875
| 3
|
[] |
no_license
|
import math
X=list()
Y=list()
s=0
N=int(input())
for i in range(N):
x,y=map(int,input().split())
X.append(x)
Y.append(y)
for i in range(N):
for j in range(N):
s+=math.sqrt(((X[i]-X[j])**2)+((Y[i]-Y[j])**2))
print(s*(1/N))
| true
|
d3438687f1fc937617f3f218dcf0e80bfdd42fa5
|
Python
|
Nexosis/nexosisclient-py
|
/nexosisapi/vocabulary_summary.py
|
UTF-8
| 2,029
| 2.671875
| 3
|
[
"Apache-2.0"
] |
permissive
|
from nexosisapi.data_source_type import DataSourceType
import dateutil.parser
class VocabularySummary(object):
"""Summary information about a Vocabulary"""
def __init__(self, data_dict=None):
if data_dict is None:
data_dict = {}
self._id = data_dict.get('id', None)
self._data_source_name = data_dict.get('dataSourceName', None)
self._column_name = data_dict.get('columnName', None)
self._data_source_type = DataSourceType[data_dict.get('dataSourceType', 'dataSet')]
self._created_on_date = dateutil.parser.parse(data_dict.get('createdOnDate', None))
self._created_by_session_id = data_dict.get('createdBySessionId', None)
@property
def id(self):
"""The id of the Vocabulary
:return: the vocabulary id
:rtype: string
"""
return self._id
@property
def data_source_name(self):
"""The name of the data source from which the vocabulary was built
:return: the data source name
:rtype: string
"""
return self._data_source_name
@property
def column_name(self):
"""The name of the column in the data source from which the vocabulary was built
:return: the column name
:rtype: string
"""
return self._column_name
@property
def data_source_type(self):
"""The type of the data source (DataSource or View) from which the vocabulary was built
:return: the data source type
:rtype: DataSourceType
"""
return self._data_source_type
@property
def created_on_date(self):
"""The datetime that the vocabulary was created
:return: the created on date
:rtype: string
"""
return self._created_on_date
@property
def created_by_session_id(self):
"""The session id that generated the vocabulary
:return: the session id
:rtype: string
"""
return self._created_by_session_id
| true
|
28f69959a6d9b1a7ef1f0d94fb0d640b31ebc3d5
|
Python
|
siyujiang7/NBC_competition
|
/try.py
|
UTF-8
| 38
| 2.78125
| 3
|
[] |
no_license
|
n = ['a','b','c'].index('d')
print(n)
| true
|
88a05c27da11084101abef237fac1fbeefacb8fd
|
Python
|
diane630/Diane-LeetCode
|
/133. Clone Graph.py
|
UTF-8
| 1,191
| 3.46875
| 3
|
[] |
no_license
|
"""
# Definition for a Node.
class Node:
def __init__(self, val = 0, neighbors = None):
self.val = val
self.neighbors = neighbors if neighbors is not None else []
"""
class Solution:
def __init__(self):
self.visited = {}
def cloneGraph(self, node: 'Node') -> 'Node':
# dfs resursive solution
if not node:
return node
if node in self.visited:
return self.visited[node]
cloned_node = Node(node.val, [])
self.visited[node] = cloned_node
for nei_node in node.neighbors:
cloned_node.neighbors.append(self.cloneGraph(nei_node))
return cloned_node
# bfs iterative solution
if not node:
return node
visited = {}
deq = deque([node])
visited[node] = Node(node.val, [])
while deq:
cur_node = deq.popleft()
for nei in cur_node.neighbors:
if nei not in visited:
visited[nei] = Node(nei.val, [])
deq.append(nei)
visited[cur_node].neighbors.append(visited[nei])
return visited[node]
| true
|
d9977159e18cae200eb566118b1407819fd83a91
|
Python
|
WesleyCastilho/CodeSignalChallenges
|
/arrays/rotate_image/rotate_image.py
|
UTF-8
| 101
| 2.5625
| 3
|
[] |
no_license
|
class RotateImage:
def rotate_image(self):
return [list(reversed(x)) for x in zip(*self)]
| true
|
9ee97dc7b6de36ab5db8b904941dac258bdaadb2
|
Python
|
jcagumbay/python-sorting
|
/test/test_sorting.py
|
UTF-8
| 382
| 3.171875
| 3
|
[] |
no_license
|
import pytest
from src.quick import Quick
from src.selection import Selection
from src.bubble import Bubble
class TestSorting:
@pytest.mark.parametrize("instance", [Selection(), Bubble(), Quick()])
def test_sort(self, instance):
unsorted = [1, 3, 2, 7, 8, 5]
expected_result = [1, 2, 3, 5, 7, 8]
assert expected_result == instance.sort(unsorted)
| true
|
95e329554c6db8df97c632f577f51b3c148bcb5c
|
Python
|
LiamAlexis/programacion-en-python
|
/clase 1/_5_Estructuras_selectivas.py
|
UTF-8
| 1,550
| 4.5625
| 5
|
[] |
no_license
|
"""
Estructuras selectivas
"""
# operadores de comparacion
# > mayor que
# >= mayor o igual que
# < menor que
# <= menor o igual que
# == igual que
# != distinto que
# condiciones and, or y not
# if 2 > 1:
# print("2 es mayor que 1")
# if 2 >= 1:
# print("2 es mayor o igual que 1")
# if 2 < 3:
# print("2 es menor que 3")
# if 2 <= 3:
# print("2 es menor o iguel que 3")
# if 3 == 3:
# print("3 es igual a 3")
# if 3 != 4:
# print("3 es ditinto a 4")
# if not 3 == 4:
# print("3 no es igual a 4")
# if 2 < 3 and 4 > 3:
# print("Ambas condiciones son true.")
# if 2 > 3 or 4 > 3:
# print("Una o ambas condiciones son verdaderas.")
""" Tabla de verdad """
""" condicion and """
# condicion-A condicion-B Resultado
# T T T
# T F F
# F F F
# F T F
""" condicion or """
# condicion-A condicion-B Resultado
# T T T
# T F T
# F F F
# F T T
""" Selectiva if else
"""
# if 2 >= 5:
# print("La condición es verdadera")
# else:
# print("La condición es falsa!")
""" Selectiva if elif else
Con este se puede simular un switch
"""
if 2 > 3:
print("Se cumple la primera condición.")
elif 2 < 3:
print("Se cumple la segunda condición.")
else:
print("No se cumplio ninguna de las condiciones.")
| true
|
5a885230c92420610bfc14b9bc0d35c6e79b9aa3
|
Python
|
Anthncara/MEMO-PersonnalChallenges
|
/Python Challenges/SecondsToMinutesConverter/TimeConverteribrahim.py
|
UTF-8
| 883
| 3.890625
| 4
|
[] |
no_license
|
def convertMillis(millis):
seconds=(millis//1000)%60
minutes=(millis//(1000*60))%60
hours=(millis//(1000*60*60))%24
d = [hours,minutes,seconds]
return (d)
print("### This program converts milliseconds into hours, minutes, and seconds ###")
print("To exit the program, please type 'exit'")
print("Please enter the milliseconds (should be greater than zero) :")
a = 0
while a == 0:
x = input ()
b = x.isnumeric()
if b == True:
x = int(x)
print(convertMillis(x))
elif b == False :
x = str(x)
x = x.title()
if x == "Exit":
print("Exiting the program... Good Bye")
a = 1
break
else:
print ("Not Valid Input!!!")
print("To exit the program, please type 'exit'")
print("Please enter the milliseconds (should be greater than zero) :")
| true
|
64608e8e0efc2816e92c07cdce2fbb336a57b776
|
Python
|
richardtguy/catanex
|
/app/orderbook.py
|
UTF-8
| 2,299
| 2.765625
| 3
|
[] |
no_license
|
import datetime
from operator import attrgetter
from queue import Queue
import logging
from app import models, db, app
import config
class Messenger():
"""
Add messages about executed trades to queue to send to clients by websocket connections
"""
def __init__(self, queue):
self.q = queue
def send_message(self, message):
self.q.put(message)
class Exchange():
"""
Match orders in the database and handle trades
"""
def __init__(self, traded_stocks, messenger):
self.traded_stocks = traded_stocks
self.messenger = messenger
def trade(self, stock):
"""
Attempt to match best bid/ best ask pairs and complete trades. Return None if
no matching pair found.
"""
# get best bid & best ask for selected stock
orders = models.Order.query.filter_by(stock=stock)
bid = orders.filter_by(side='bid').order_by(models.Order.limit.desc()).first()
ask = orders.filter_by(side='ask').order_by(models.Order.limit).first()
# match best bid/ best ask for trade if possible
if (bid and ask) and (bid.limit >= ask.limit):
app.logger.info('Found matching bid/ask pair')
order_pair = sorted([bid, ask], key=attrgetter('volume'))
volume = order_pair[0].volume
order_pair.sort(key=attrgetter('timestamp'))
price = order_pair[0].limit
# check in case bidder has insufficient funds
if bid.owner.balance < (price * volume):
# cancel trade and try again
return True
else:
app.logger.info('Executing trade')
# execute trade
bid.owner.balance = bid.owner.balance - (volume * price)
ask.owner.balance = ask.owner.balance + (volume * price)
for order in (bid, ask):
order.volume = order.volume - volume
if order.volume == 0:
db.session.delete(order)
db.session.commit()
# send messages to buyer and seller
buy_msg = "{} bought {} {} at ${} each!".format(bid.owner.name,
volume, bid.stock, price)
sell_msg = "{} sold {} {} at ${} each!".format(ask.owner.name,
volume, ask.stock, price)
self.messenger.send_message(buy_msg)
self.messenger.send_message(sell_msg)
# log trade in ticker
t = models.Trade(stock=bid.stock, volume=volume, price=price)
db.session.add(t)
db.session.commit()
return True
# no more matched orders, return
return None
| true
|
71c34a2954d646f2dc0544b42fc18750bac0e548
|
Python
|
astrax/FormationPythonHAP2019-2020
|
/docs/.src/cours2/solution/ex7.py
|
UTF-8
| 58
| 3.421875
| 3
|
[] |
no_license
|
x = []
for n in range(21):
x.append(n**2 + 1)
print(x)
| true
|
fd05fbc0df8e74f4863b2b6fa6c367d74c1f7471
|
Python
|
libo999/DataFrameJK
|
/p5.py
|
UTF-8
| 2,202
| 3.578125
| 4
|
[] |
no_license
|
# -*- coding: utf-8 -*-
"""
Created on Thu Dec 24 14:26:47 2020
@author: Libo
作业题第5题
"""
def greedy_algorithm_p5(task_num, start_time, lasting_time):
time_now = 0 # 表示当前时间节点
task_finished_num = 0 # 已经完成的任务数量
tasks_list = [] # 每个时间节点安排的任务
max_start_time = max(start_time)
while task_finished_num != task_num:
if time_now < max_start_time: # 找出当前时刻可以开始的任务列表
options_index = []
for i in range(len(start_time)):
if start_time[i] <= time_now:
options_index.append(start_time.index(start_time[i]))
else:
options_index = [start_time.index(i) for i in start_time]
if len(options_index) > 0: # 确定当前时刻安排的任务(剩余工作量最小的)
option_tasks = [
lasting_time[i] for i in options_index if lasting_time[i] > 0
]
temp = min(option_tasks)
temp_index = lasting_time.index(temp)
lasting_time[temp_index] = temp - 1
tasks_list.append(temp_index)
else:
tasks_list.append(-999) # 不安排任务
task_finished_num = len([i for i in lasting_time if i == 0])
time_now += 1
# 计算完成所有任务需要花费的时长
total_time = 0
tasks = [start_time.index(i) for i in start_time]
for task in tasks:
max_time = max(
[idx + 1 for idx, val in enumerate(tasks_list) if val == task])
total_time = total_time + max_time
return total_time
if __name__ == "__main__":
# 输入任务数、开始时刻列表和任务持续时间列表
tasks_num = int(input()) # 任务数
start_time = input() # 开始时刻列表
lasting_time = input() # 任务持续时间列表
start_time = [int(i) for i in start_time.split(' ')]
lasting_time = [int(i) for i in lasting_time.split(' ')]
# 调用贪心算法,返回总时长
total_time = greedy_algorithm_p5(tasks_num, start_time, lasting_time)
print(total_time)
| true
|
000f2dcbe1b9f3886cd7392b44c28464cf20d4e0
|
Python
|
enderyildirim/python_demo
|
/utils.py
|
UTF-8
| 262
| 2.71875
| 3
|
[] |
no_license
|
import yaml
class ConfigParser:
@staticmethod
def parse(path=None):
with open(path or "config.yaml", 'r') as stream:
try:
return yaml.safe_load(stream)
except Exception as err:
print(err)
| true
|
9e83018645c7ac55055d00e9463363ea3c13108b
|
Python
|
wisec/restler-fuzzer
|
/restler/test_servers/test_socket.py
|
UTF-8
| 1,967
| 2.9375
| 3
|
[
"LicenseRef-scancode-generic-cla",
"MIT"
] |
permissive
|
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
""" Mock TCP socket that forwards requests/responses to/from a test server """
from test_servers.unit_test_server.unit_test_server import *
from engine.transport_layer.response import HttpResponse
class TestSocket(object):
__test__ = False
def __init__(self, server_type: str):
""" Initializes the TestSocket and creates the appropriate test
server to be used.
@param server_type: The test server ID that identifies which test
server to be used.
"""
if server_type == 'unit_test':
self._server = UnitTestServer()
else:
err_msg = f"Invalid test server specified: {server_type}"
print(err_msg)
raise Exception(err_msg)
def connect(self, address):
""" Stub for socket connect.
Calls the test server's connect function
@return: None
"""
self._server.connect()
def sendall(self, message):
""" Takes over the responsibilities of a TCP socket's sendall function.
The caller of this function will "send" its message as though it would a
regular TCP message. This message will then be forwarded to the appropriate
test server for parsing.
@param message: The message that was to be sent to the server
@type message: Str
@return: None
"""
self._server.parse_message(message)
def recv(self) -> HttpResponse:
""" Takes over the responsibilities of a TCP socket's recv function.
Returns the test server's response.
@return: The test server's response
"""
return self._server.response
def close(self):
""" Takes over the responsibilities of a TCP socket's close function.
Calls the test server's close function
@return: None
"""
self._server.close()
| true
|
c4ffe5e7f229ee9ff1728c37b6e3813265c4d2f0
|
Python
|
eanikolaev/funnelsort
|
/gen_data.py
|
UTF-8
| 195
| 2.796875
| 3
|
[] |
no_license
|
import random
FILENAME = "data"
MAX = 100500
if __name__ == '__main__':
f = open(FILENAME, 'w')
for i in range(MAX):
f.write( str(random.randint(0,MAX)) + ' ' )
f.close()
| true
|
f0dc9c08f6222924e7d80842a41af9824ba6ff4d
|
Python
|
narimiran/advent_of_code_2016
|
/python/day_02.py
|
UTF-8
| 1,551
| 3.484375
| 3
|
[] |
no_license
|
with open('./inputs/02.txt', 'r') as infile:
puzzle = infile.readlines()
DIRECTIONS = {
'R': 1,
'L': -1,
'D': 1j,
'U': -1j,
}
KEYPAD_1 = [
[1, 2, 3],
[4, 5, 6],
[7, 8, 9]
]
KEYPAD_2 = [
[0, 0, 1, 0, 0],
[0, 2, 3, 4, 0],
[5, 6, 7, 8, 9],
[0, 'A','B','C', 0],
[0, 0, 'D', 0, 0]
]
def find_solutions(second_part=False):
def is_inside(pos):
if not second_part:
return all(abs(coord) <= 1 for coord in {pos.real, pos.imag})
else:
return abs(pos.real) + abs(pos.imag) <= 2
def get_key(pos):
return str(keypad[int(pos.imag)][int(pos.real)])
if not second_part:
keypad = KEYPAD_1
offset = 1+1j # the center of the keypad
pos = 0+0j # start from the 5, in the center
else:
keypad = KEYPAD_2
offset = 2+2j # the center of the keypad
pos = -2+0j # start from the 5, two left from the center
key_positions = []
for line in puzzle:
for direction in line.strip():
new = pos + DIRECTIONS[direction]
pos = new if is_inside(new) else pos
key_positions.append(pos)
return ''.join(get_key(pos+offset) for pos in key_positions)
print("Ok, I've memorized the bathroom code:", find_solutions())
print('....')
print("Hmmm, this well-designed keypad is not the one I was expecting.")
print("But let me try to open it with the same instuctions as before.")
print("Here's the new code:", find_solutions(second_part=True))
| true
|
382a5a3c1e91c1612281ff8c6e8602a525f23b91
|
Python
|
karsyboy/dc29_badge_generator
|
/badge_code_gen.py
|
UTF-8
| 1,209
| 3.078125
| 3
|
[] |
no_license
|
# PySerial is required to run this script make sure to install it with python3 and as sudo
import serial,io
ser = serial.Serial('/dev/ttyACM0', 9600, timeout=1) # Make sure the serial device is set to the proper port
sio = io.TextIOWrapper(io.BufferedRWPair(ser, ser))
badge_str_1 = "CHANGE-ME" # First for characters of a badge response for your badge
badge_str_2 = "CHANGE-ME" # Characters 9 through 32 from the same badge response used for badge_str_1
# Generates a list of all hex combinations to test with
def gen_all_hex():
i = 0
while i < 16**4:
yield "{:04X}".format(i)
i += 1
badge_id = []
for s in gen_all_hex():
badge_id.append(s)
# Starts session with defcon badge
sio.write(str("\n\r"))
sio.flush()
badge_count = input("Enter current badge count: ")
badge_count = int(badge_count)
# While loop to run through all possible badge combinations
while badge_count <= 65500:
sio.write(str("5"))
badge_full_id = badge_str_1 + badge_id[badge_count] + badge_str_2
print(badge_full_id)
sio.write(str(badge_full_id))
sio.write("\n\r")
sio.write("\n\r")
sio.flush()
badge_count += 1
print(badge_count)
print("Job Done!!!")
ser.close()
| true
|
59a291c07bf24e5bdc90a1c3b7bf34fb185376e6
|
Python
|
alexlu07/AI_from_scratch
|
/NeuralNetwork/number_network.py
|
UTF-8
| 795
| 2.625
| 3
|
[] |
no_license
|
from network import Network
import numpy as np
import pandas as pd
class NumberNetwork(Network):
def __init__(self):
super().__init__(784, 128, 128, 10)
def train_with_dataset(self):
ip = np.load('mnist/train-images.npy')
ip = ip.reshape([ip.shape[0], -1])
ip = ip.astype("float64")
ip /= 255
actual = pd.get_dummies(np.load('mnist/train-labels.npy'))
actual = actual.to_numpy()
self.train(ip, actual)
def test_with_dataset(self):
ip = np.load('mnist/train-images.npy')
ip = ip.reshape([ip.shape[0], -1])
ip = ip.astype("float64")
ip = ip / 255
actual = pd.get_dummies(np.load('mnist/train-labels.npy'))
actual = actual.to_numpy()
return self.test(ip, actual)
| true
|
6aea1f96edadbc3e1f5bc311388487aafc8fbbf7
|
Python
|
mujizi/algo
|
/src/predict/bayes.py
|
UTF-8
| 977
| 2.5625
| 3
|
[] |
no_license
|
# -*- coding: utf-8 -*-
# @Time : 2018/9/3 下午12:11
# @Author : Benqi
import numpy as np
from base import Base
import util.load_data as load_data
class Bayes(Base):
"""bunch.metrix: like list, every list element is a sample data
predicted: 1d numpy.array,like a list
"""
def __init__(self, dic_config={}):
Base.__init__(self, dic_config)
def load_data(self):
self.logging.info(self.dic_config['data_path'])
self.bunch = load_data.read_bunch(self.dic_config['data_path'])
self.metrix = self.bunch.metrix
def predict(self):
self.result = self.model.predict(self.metrix)
if self.dic_config['format'] == 'proba':
self.predict_result = self.model.predict_proba(self.metrix)[:, 0]
else:
self.predict_result = self.model.predict(self.metrix)
def dump(self):
np.savetxt(self.dic_config['predict_path'], self.predict_result, fmt='%s', delimiter=',')
| true
|
9bf4fa24446e052b78d3dd85594b3720ffa7666c
|
Python
|
gschen/where2go-python-test
|
/1906101038江来洪/day20191112/Test_2_10.py
|
UTF-8
| 249
| 3.109375
| 3
|
[] |
no_license
|
#输出华氏-摄氏温度转换表
l,u = map(int,input('请输入两个数:').split())
if l>u:
print('Invalid')
else:
print('fahr celsius')
f = l
while l<=f<=u:
c = 5*(f-32)/9
print(f,' ''%.1f'% c)
f += 2
| true
|
fd13ca4f237ea6d5d15b51ba50dcaa991a342900
|
Python
|
gunal89/python-Interview_workout
|
/class_static_mtd.py
|
UTF-8
| 1,130
| 2.859375
| 3
|
[] |
no_license
|
class clsstatic():
var1 = 'PARAMAGURU'
var2 = 'JAVA'
def set_ins(self):
self.var1 = "MUTHU"
def print_ins(self):
print "varl : ",self.var1
@classmethod
def set_cls(cls):
cls.var2 = 'PYTHON'
@classmethod
def print_cls(cls):
print "Var2 :", cls.var2
@staticmethod
def stst_mtd(buildver):
if buildver == '3.7.0.22':
print "Current build version {}".format(buildver)
else :
print "{} is not Current build version".format(buildver)
print "\n ### Instance Method ###\n"
obj_c = clsstatic()
print "1st check (ins_mtd) obj_c ::", obj_c.var1
obj_c.print_ins()
obj_set = clsstatic()
obj_set.set_ins()
print "Recheck (ins_mtd) obj_c ::", obj_c.var1
print "\n ### Class metod ###\n"
obj_x = clsstatic()
print "1st check(cls method) the obj_x ::",obj_x.var2
obj_y=clsstatic()
obj_y.set_cls()
print obj_y.var2
print "Recheck (cls method) the obj_x ::",obj_x.var2
print " \n### Static Method ### \n"
obj_s = clsstatic()
obj_s.stst_mtd('3.7.0.21')
| true
|
900c2d8ed30eccc38f96339375d12fcdcfc9fd6f
|
Python
|
1ucian0/qiskit-terra
|
/qiskit/exceptions.py
|
UTF-8
| 3,582
| 2.703125
| 3
|
[
"Apache-2.0"
] |
permissive
|
# This code is part of Qiskit.
#
# (C) Copyright IBM 2017, 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""
===============================================
Top-level exceptions (:mod:`qiskit.exceptions`)
===============================================
All Qiskit-related errors raised by Qiskit are subclasses of the base:
.. autoexception:: QiskitError
.. note::
Errors that are just general programming errors, such as incorrect typing, may still raise
standard Python errors such as ``TypeError``. :exc:`QiskitError` is generally for errors raised
in usage that is particular to Qiskit.
Many of the Qiskit subpackages define their own more granular error, to help in catching only the
subset of errors you care about. For example, :mod:`qiskit.circuit` almost exclusively uses
:exc:`.CircuitError`, while both :exc:`.QASM2ExportError` and :exc:`.QASM2ParseError` derive from
:exc:`.QASM2Error` in :mod:`qiskit.qasm2`, which is in turn a type of :exc:`.QiskitError`.
Qiskit has several optional features that depend on other packages that are not required for a
minimal install. You can read more about those, and ways to check for their presence, in
:mod:`qiskit.utils.optionals`. Trying to use a feature that requires an optional extra will raise a
particular error, which subclasses both :exc:`QiskitError` and the Python built-in ``ImportError``.
.. autoexception:: MissingOptionalLibraryError
Two more uncommon errors relate to failures in reading user-configuration files, or specifying a
filename that cannot be used:
.. autoexception:: QiskitUserConfigError
.. autoexception:: InvalidFileError
"""
from typing import Optional
class QiskitError(Exception):
"""Base class for errors raised by Qiskit."""
def __init__(self, *message):
"""Set the error message."""
super().__init__(" ".join(message))
self.message = " ".join(message)
def __str__(self):
"""Return the message."""
return repr(self.message)
class QiskitUserConfigError(QiskitError):
"""Raised when an error is encountered reading a user config file."""
message = "User config invalid"
class MissingOptionalLibraryError(QiskitError, ImportError):
"""Raised when an optional library is missing."""
def __init__(
self, libname: str, name: str, pip_install: Optional[str] = None, msg: Optional[str] = None
) -> None:
"""Set the error message.
Args:
libname: Name of missing library
name: Name of class, function, module that uses this library
pip_install: pip install command, if any
msg: Descriptive message, if any
"""
message = [f"The '{libname}' library is required to use '{name}'."]
if pip_install:
message.append(f"You can install it with '{pip_install}'.")
if msg:
message.append(f" {msg}.")
super().__init__(" ".join(message))
self.message = " ".join(message)
def __str__(self) -> str:
"""Return the message."""
return repr(self.message)
class InvalidFileError(QiskitError):
"""Raised when the file provided is not valid for the specific task."""
| true
|
49944fa1d504075d1cf1df2a6dcde8e5db594800
|
Python
|
Gasan66/Coursera
|
/The Basics of Python Programming/solution2.py
|
UTF-8
| 150
| 3.328125
| 3
|
[] |
no_license
|
def sum(a, b):
if b > 0:
b -= 1
a += 1
return sum(a, b)
return a
x, y = int(input()), int(input())
print(sum(x, y))
| true
|
b11266823c60a4725fee173d50490058572ec073
|
Python
|
0000duck/METR4202-T6
|
/robot_ws/catkin_ws/src/servo_node/scripts/servo_control.py~
|
UTF-8
| 3,108
| 2.78125
| 3
|
[] |
no_license
|
#!/usr/bin/env python3
import rospy
import RPi.GPIO as GPIO
import time
from std_msgs.msg import String
from control_logic_node.msg import CurrentJointState, DesJointState
from sensor_msgs.msg import JointState
ROBOT_FREQ = 10
class Servo_Controller:
# Desired joint states for all the servos
def cb_desired_js(self, data):
self.pos.name = ["joint_1", "joint_2", "joint_3", "joint_4"]
self.pos.position = [data.thetas[0], data.thetas[1], data.thetas[2], data.thetas[3]]
self.pos.velocity = [0.50, 1.00, 1.00, 1.00]
# Sets the position of the SG90 servo
self.SG90 = data.thetas[4]
# Current dynamixel joint states
def cb_current_js(self, data):
joint1 = data.position[0]
joint2 = data.position[1]
joint3 = data.position[2]
joint4 = data.position[3]
self.currentJointPos = [joint1, joint2, joint3, joint4, self.SG90]
# Closes the gripper when the threshold pi/2 is passed
def gripperControl(self, rad):
if rad < 1.57:
self.duty_cycle = self.openPose
else:
self.duty_cycle = self.closePose
return self.duty_cycle
def test(self):
self.testPos = JointState()
self.testPos.name = ["joint_1", "joint_2", "joint_3", "joint_4"]
self.testPos.position = [j1, j2, j3, j4]
self.testPos.velocity = [0.50, 1.00, 1.00, 1.00]
# Sets the position of the SG90 servo
self.testSG90 = j5
def __init__(self):
rospy.init_node('actuator_controller', anonymous=False)
rate = rospy.Rate(ROBOT_FREQ)
self.pos = JointState()
self.currentJointPos = CurrentJointState()
# Comms with Dynamixels
self.dynamixel_pub = rospy.Publisher("desired_joint_state", JointState, queue_size=10)
self.dynamixel_sub = rospy.Subscriber("joint_states", JointState, self.cb_current_js)
# Comms with Trajectory Planner
rospy.Subscriber("Trajectory_DesJS", DesJointState, self.cb_desired_js)
# Comms with Control Logic and Forward Kinematics
self.current_joint_pub = rospy.Publisher("Actuator_CurrentJS", CurrentJointState, queue_size=10)
# Duty cycle to close the gripper
self.openPose = 8.1
# Duty cycle to open the gripper
self.closePose = 7.2
self.SG90 = self.openPose
# Configure the Pi to use pin names (i.e. BCM) and allocate I/O
GPIO.setmode(GPIO.BCM)
GPIO.setup(13, GPIO.OUT)
# Create PWM channel on pin 13 with a frequency of 50Hz
pwm_servo = GPIO.PWM(13, 50)
pwm_servo.start(self.closePose) # Duty cycle of 7.2 is the gripper open pose
while not rospy.is_shutdown():
self.dynamixel_pub.publish(self.pos)
pwm_servo.ChangeDutyCycle(self.gripperControl(self.SG90))
self.current_joint_pub.publish(self.currentJointPos)
rate.sleep()
if __name__ == "__main__":
try:
Servo_Controller()
rospy.spin()
except rospy.ROSInterruptException:
GPIO.cleanup()
pass
| true
|
ec2f134b00548fad3bb54f5a85dc78bf7f062256
|
Python
|
danipj/unicamp-mc906
|
/project_3/tcgdataset/tcgcrop.py
|
UTF-8
| 848
| 2.53125
| 3
|
[] |
no_license
|
import os
import cv2
root_folder = "./dataset"
folders_path = os.listdir(root_folder)
target_size = (300, 238)
standard_size = (240,330)
# percorre todas as pastas no root folder
for folder in folders_path:
root_path = os.path.join(root_folder,folder)
files_path = os.listdir(root_path)
for file_path in files_path:
file_full_path = os.path.join(root_path, file_path)
print("Cropping "+file_full_path)
img = cv2.imread(file_full_path, cv2.IMREAD_COLOR)
img_resized = cv2.resize(img, standard_size)
crop_img = img_resized[38:170, 26:214]
img_resized_final_v2_agora_vai = cv2.resize(crop_img, target_size)
img_border = cv2.copyMakeBorder(img_resized_final_v2_agora_vai, 31, 31, 0, 0, cv2.BORDER_CONSTANT, value=(255, 255, 255))
cv2.imwrite(file_full_path, img_border)
| true
|
7e8f79b28116b1a33da78a79caf295f6a6dc4e55
|
Python
|
jonasanso/surveys
|
/surveys/tests.py
|
UTF-8
| 1,093
| 2.765625
| 3
|
[] |
no_license
|
from django.test import TestCase
from surveys.models import Survey, SurveyResponse
from surveys.exceptions import NoMoreAvailablePlacesError
class SurveyResponseModelTests(TestCase):
def test_reduce_available_places_after_creating_survey_response(self):
"""
Creating a survey response must reduce by one the number of availabel places.
"""
survey = Survey(name="first", available_places=10, user_id=1)
survey.save()
response = SurveyResponse(survey=survey, user_id=2)
response.save()
survey = Survey.objects.get(pk=survey.pk)
self.assertIs(survey.available_places, 9)
def test_raise_no_more_available_places_error_when_places_is_zero(self):
"""
Creating a survey response must raise no more available error when places is zero.
"""
with self.assertRaises(NoMoreAvailablePlacesError):
survey = Survey(name="no more", available_places=0, user_id=1)
survey.save()
response = SurveyResponse(survey=survey, user_id=2)
response.save()
| true
|
f8f1e28ee4d088be10c7b9174d5df6fb855d97b6
|
Python
|
andrew-hsiao/SDC
|
/T1/lenet.py
|
UTF-8
| 4,878
| 3.46875
| 3
|
[] |
no_license
|
"""
LeNet Architecture
HINTS for layers:
Convolutional layers:
tf.nn.conv2d
tf.nn.max_pool
For preparing the convolutional layer output for the
fully connected layers.
tf.contrib.flatten
"""
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
from tensorflow.contrib.layers import flatten
EPOCHS = 10
BATCH_SIZE = 50
# LeNet architecture:
# INPUT -> CONV -> ACT -> POOL -> CONV -> ACT -> POOL -> FLATTEN -> FC -> ACT -> FC
#
# Don't worry about anything else in the file too much, all you have to do is
# create the LeNet and return the result of the last fully connected layer.
def LeNet(x):
# Reshape from 2D to 4D. This prepares the data for
# convolutional and pooling layers.
x = tf.reshape(x, (-1, 28, 28, 1))
# Pad 0s to 32x32. Centers the digit further.
# Add 2 rows/columns on each side for height and width dimensions.
x = tf.pad(x, [[0, 0], [2, 2], [2, 2], [0, 0]], mode="CONSTANT")
# TODO: Define the LeNet architecture.
# Return the result of the last fully connected layer.
weights = {
"layer_1":tf.Variable(tf.truncated_normal([5, 5, 1, 6])),
"layer_2":tf.Variable(tf.truncated_normal([5, 5, 6, 16])),
"layer_3":tf.Variable(tf.truncated_normal([400, 120])),
"layer_4":tf.Variable(tf.truncated_normal([120, 10]))
}
bias = {
"layer_1":tf.Variable(tf.zeros(6)),
"layer_2":tf.Variable(tf.zeros(16)),
"layer_3":tf.Variable(tf.zeros(120)),
"layer_4":tf.Variable(tf.zeros(10))
}
#(32,32,1) -conv-> (28,28,6) -pool-> (14,14,6)
act_1 = tf.nn.conv2d(x, weights['layer_1'], strides=[1, 1, 1, 1], padding="VALID") + bias['layer_1']
act_1 = tf.nn.relu(act_1)
act_1 = tf.nn.max_pool(act_1, [1, 2, 2, 1], [1, 2, 2, 1], "VALID")
#(14,14,6) -conv-> (10,10,16) -pool-> (5,5,16)
act_2 = tf.nn.conv2d(act_1, weights['layer_2'], strides=[1, 1, 1, 1], padding="VALID") + bias['layer_2']
act_2 = tf.nn.relu(act_2)
act_2 = tf.nn.max_pool(act_2, [1, 2, 2, 1], [1, 2, 2, 1], "VALID")
#(5, 5, 16) -flatten-> 5*5*16=400
act_2 = flatten(act_2)
#400 -FC-> 120
act_3 = tf.add(tf.matmul(act_2, weights['layer_3']), bias['layer_3'])
act_3 = tf.nn.relu(act_3)
#120 -FC-> 10
act_4 = tf.add(tf.matmul(act_3, weights['layer_4']), bias['layer_4'])
return act_4
# MNIST consists of 28x28x1, grayscale images
x = tf.placeholder(tf.float32, (None, 784))
# Classify over 10 digits 0-9
y = tf.placeholder(tf.float32, (None, 10))
fc2 = LeNet(x)
loss_op = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(fc2, y))
opt = tf.train.AdamOptimizer()
train_op = opt.minimize(loss_op)
correct_prediction = tf.equal(tf.argmax(fc2, 1), tf.argmax(y, 1))
accuracy_op = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
def eval_data(dataset):
"""
Given a dataset as input returns the loss and accuracy.
"""
# If dataset.num_examples is not divisible by BATCH_SIZE
# the remainder will be discarded.
# Ex: If BATCH_SIZE is 64 and training set has 55000 examples
# steps_per_epoch = 55000 // 64 = 859
# num_examples = 859 * 64 = 54976
#
# So in that case we go over 54976 examples instead of 55000.
steps_per_epoch = dataset.num_examples // BATCH_SIZE
num_examples = steps_per_epoch * BATCH_SIZE
total_acc, total_loss = 0, 0
sess = tf.get_default_session()
for step in range(steps_per_epoch):
batch_x, batch_y = dataset.next_batch(BATCH_SIZE)
loss, acc = sess.run([loss_op, accuracy_op], feed_dict={x: batch_x, y: batch_y})
total_acc += (acc * batch_x.shape[0])
total_loss += (loss * batch_x.shape[0])
return total_loss/num_examples, total_acc/num_examples
if __name__ == '__main__':
# Load data
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
steps_per_epoch = mnist.train.num_examples // BATCH_SIZE
num_examples = steps_per_epoch * BATCH_SIZE
# Train model
for i in range(EPOCHS):
for step in range(steps_per_epoch):
batch_x, batch_y = mnist.train.next_batch(BATCH_SIZE)
loss = sess.run(train_op, feed_dict={x: batch_x, y: batch_y})
val_loss, val_acc = eval_data(mnist.validation)
print("EPOCH {} ...".format(i+1))
print("Validation loss = {:.3f}".format(val_loss))
print("Validation accuracy = {:.3f}".format(val_acc))
print()
# Evaluate on the test data
test_loss, test_acc = eval_data(mnist.test)
print("Test loss = {:.3f}".format(test_loss))
print("Test accuracy = {:.3f}".format(test_acc))
| true
|
23786af68b57cd9cea0bce8c2d898dbd02907eb9
|
Python
|
David-Byrne/CART-ML
|
/driver.py
|
UTF-8
| 1,268
| 3.46875
| 3
|
[] |
no_license
|
import random
from statistics import mean, stdev
from cart import Cart
def main():
# read in and preprocess data
with open("owls15.csv") as file:
content = file.readlines()
data = []
for entry in content:
readings = entry.rstrip("\n").split(",")
attributes = [float(r) for r in readings[0:-1]]
attributes.append(readings[-1])
# add back in the classes
data.append(attributes)
# build and print CART decision tree
Cart(data).visualise_model()
# test accuracy 10 times and display the results
acc_scores = []
for i in range(0,10):
random.shuffle(data)
split = len(data) * 2//3
training_data = data[0:split]
test_data = data[split:]
cart = Cart(training_data)
accuracy = cart.test_accuracy(test_data)
print("Round {}, Accuracy is {:.4f}".format(i+1, accuracy))
acc_scores.append(accuracy)
cart.save_actual_vs_predicted_results(test_data)
print("")
print("CART classifier has an accuracy of {:.2f}%, +/- {:.2f}%"
.format(mean(acc_scores) * 100, stdev(acc_scores) * 2 * 100))
# Giving +/- 2 standard deviations to show confidence interval of result
if __name__ == '__main__':
main()
| true
|
2df2295e596ddf4014b1fc5af0ce973c78a36c64
|
Python
|
keskinselim/Numbers
|
/Numbers/Numbers.py
|
UTF-8
| 367
| 4.25
| 4
|
[] |
no_license
|
25*25
#This is correct but you can not anything like this if you want to see it you should print it
print(25*25)
# or
number=25*25
print(number)
#in python you can use " + " , " - " , " * " , " / "
print(25+25)
print(25-5)
print(12*12)
print(12/4)
#you can check it type
type(34657)
#this is int but if you write in " " its gonna be string
type("34657")
| true
|
92d666ae828ba6f885514652177c6ac1f616e323
|
Python
|
legauchy/tp1TLI
|
/trace.py
|
UTF-8
| 4,473
| 2.671875
| 3
|
[] |
no_license
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
from math import *
def trace(function, xmin, xmax, nstep, output):
#output.write("x, %s\n" % function)
output.write("%!\n")
func_save = function
function = eval("lambda x:" + function)
x_values = []
y_values = []
ymin = sys.maxint
ymax = -sys.maxint
step = 1.*(xmax-xmin)/nstep
for i in range(nstep+1):
x = xmin + i*step
try:
y = function(x)
x_values.append(x)
y_values.append(y)
if ymin > y:
ymin = y
if ymax < y:
ymax = y
except:
sys.stderr.write("%s is not a valid function for x=%s\n" % (func_save, x) )
continue
### Traits gris clair ###
output.write("gsave\n")
output.write("/Times-Roman findfont\n")
output.write("18 scalefont\n")
output.write("setfont\n")
output.write("newpath\n")
output.write("0.5 setgray\n")
output.write("[2] 1 setdash\n")
step = (ymax - ymin) / 6.0
for i in range(0, 7):
x = 106
y = 196 + 400 * ((ymin+i*step) - ymin) / (ymax - ymin)
output.write("%s %s moveto\n" % (x, y))
output.write("%s %s lineto\n" % (x+400, y))
output.write("%s %s moveto\n" % (x-40, y-4))
output.write("(%s) show\n" % str(int((ymin+i*step)*100)/100.0))
step = (xmax - xmin) / 6.0
for i in range(0, 7):
x = 106 + 400 * ((xmin+i*step) - xmin) / (xmax - xmin)
y = 196
output.write("%s %s moveto\n" % (x, y))
output.write("%s %s lineto\n" % (x, y+400))
output.write("%s %s moveto\n" % (x-20, y-16))
output.write("(%s) show\n" % str(int((xmin+i*step)*100)/100.0))
output.write("stroke\n")
output.write("grestore\n")
### Contours ###
output.write("newpath\n")
output.write("%s %s moveto\n" % (306, 396))
output.write("%s %s moveto\n" % (306-200, 396))
output.write("%s %s lineto\n" % (306+200, 396))
output.write("%s %s moveto\n" % (306, 396-200))
output.write("%s %s lineto\n" % (306, 396+200))
output.write("%s %s lineto\n" % (106, 396+200))
output.write("%s %s lineto\n" % (106, 396-200))
output.write("%s %s lineto\n" % (506, 396-200))
output.write("%s %s lineto\n" % (506, 396+200))
output.write("%s %s lineto\n" % (306, 396+200))
output.write("stroke\n")
### Fonction ###
if len(x_values) > 0:
x = 106 + 400 * (x_values[0] - xmin) / (xmax - xmin)
y = 196 + 400 * (y_values[0] - ymin) / (ymax - ymin)
output.write("newpath\n")
output.write("%s %s moveto\n" % (x, y))
for i in range(len(x_values)):
x = 106 + 400 * (x_values[i] - xmin) / (xmax - xmin)
y = 196 + 400 * (y_values[i] - ymin) / (ymax - ymin)
output.write("%s %s lineto\n" % (x, y))
output.write("stroke\n")
output.write("showpage\n")
def main(argv=None):
if argv is None:
argv = sys.argv
import getopt
try:
options, argv = getopt.getopt(argv[1:], "o:hm:M:", ["output=","help","xmin=","xmax="])
except getopt.GetoptError as message:
sys.stderr.write("%s\n" % message)
sys.exit(1)
# Affiche le message d'aide
for option, value in options:
if option in ["-h", "--help"]:
sys.stderr.write("Usage : ./trace.py ([output=]) (o:) \"func(x)\" \n")
sys.stderr.write("-o, --output : specifier un fichier pour la sortie \n")
sys.stderr.write("-m, --xmin : borne inférieure de l'interval de définition \n")
sys.stderr.write("-M, --xmax : borne supérieure de l'interval de définition \n")
sys.stderr.write("func(x) définition de la fonction à tracer en fontion de 'x' \n")
sys.exit(1)
if len(argv) != 1:
sys.stderr.write("Usage : ./trace.py ([output=]) (o:) \"func(x)\" \n")
sys.stderr.write("-o, --output : specifier un fichier pour la sortie \n")
sys.stderr.write("-m, --xmin : borne inférieure de l'interval de définition \n")
sys.stderr.write("-M, --xmax : borne supérieure de l'interval de définition \n")
sys.stderr.write("func(x) définition de la fonction à tracer en fontion de 'x' \n")
sys.exit(1)
function = argv[0]
if len(function) == 0:
sys.stderr.write("function is empty \n")
sys.exit(1)
output = sys.stdout
xmin, xmax = 0.0, 1.0
for option, value in options:
if option in ["-o", "--output"]:
output = file(value, "w")
elif option in ["-m", "--xmin"]:
xmin = float(value)
elif option in ["-M", "--xmax"]:
xmax = float(value)
else:
assert False, "option " + option + "non définie"
if xmax < xmin:
sys.stderr.write("xmax : %s ne doit pas être plus petit que xmin : %s\n" %(xmax, xmin) )
sys.exit(1)
trace(function, xmin, xmax, 100, output)
if __name__ == "__main__":
sys.exit(main())
| true
|
981e4e3bd53e0566118aa5369d5ac1cdcf9a4ef3
|
Python
|
sevenlabs/pjabberd
|
/pjabberd.py
|
UTF-8
| 6,147
| 2.640625
| 3
|
[] |
no_license
|
"""Main module for starting the server"""
import os, sys
import pjs.conf.conf
import logging
from pjs.db import DB, sqlite
class PJSLauncher:
"""The one and only instance of the server. This controls all other
components.
"""
def __init__(self):
"""Initializes the server data"""
self.servers = []
self.c2sport = 5222
self.s2sport = 5269
self.hostname = 'localhost'
self._c2s, self._s2s = (None, None)
def run(self):
"""Creates one C2S and one S2S server and runs them.
Also initializes the threadpools for each server.
"""
# imports happen here, because otherwise we could create a cycle
from pjs.server import C2SServer, S2SServer
self._c2s = C2SServer(self.hostname, self.c2sport, self)
self.servers.append(self._c2s)
self._s2s = S2SServer(self.hostname, self.s2sport, self)
self.servers.append(self._s2s)
from pjs.connection import LocalTriggerConnection
# see connection.LocalTriggerConnection.__doc__
self.triggerConn = LocalTriggerConnection(self.hostname, self.c2sport)
def notifyFunc():
"""Function that gets executed when a job in a threadpool
completes.
"""
self.triggerConn.send(' ')
self._c2s.createThreadpool(5, notifyFunc)
self._s2s.createThreadpool(5, notifyFunc)
def stop(self):
"""Shuts down the servers"""
self.triggerConn.handle_close()
self._c2s.handle_close(True)
self._s2s.handle_close(True)
def getC2SServer(self):
"""Returns the C2S server"""
return self._c2s
def getS2SServer(self):
"""Returns the S2S server"""
return self._s2s
def populateDB():
"""Creates a sample database"""
con = DB()
c = con.cursor()
try:
c.execute("CREATE TABLE IF NOT EXISTS jids (id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,\
jid TEXT NOT NULL,\
password TEXT NOT NULL,\
UNIQUE(jid))")
c.execute("CREATE TABLE IF NOT EXISTS roster (userid INTEGER REFERENCES jids NOT NULL,\
contactid INTEGER REFERENCES jids NOT NULL,\
name TEXT,\
subscription INTEGER DEFAULT 0,\
PRIMARY KEY (userid, contactid)\
)")
c.execute("CREATE TABLE IF NOT EXISTS offline (fromid INTEGER REFERENCES jids NOT NULL,\
toid INTEGER REFERENCES jids NOT NULL,\
time TIMESTAMP,\
content TEXT\
)")
c.execute("CREATE TABLE IF NOT EXISTS rostergroups (groupid INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,\
userid INTEGER REFERENCES jids NOT NULL,\
name TEXT NOT NULL,\
UNIQUE(userid, name)\
)")
c.execute("CREATE TABLE IF NOT EXISTS rostergroupitems\
(groupid INTEGER REFERENCES rostergroup NOT NULL,\
contactid INTEGER REFERENCES jids NOT NULL,\
PRIMARY KEY (groupid, contactid))")
c.execute("INSERT OR IGNORE INTO jids (jid, password) VALUES ('foo@localhost', 'foo')")
c.execute("INSERT OR IGNORE INTO jids (jid, password) VALUES ('bar@localhost', 'bar')")
c.execute("INSERT OR IGNORE INTO jids (jid, password) VALUES ('test@localhost', 'test')")
c.execute("INSERT OR IGNORE INTO jids (jid, password) VALUES ('admin@localhost', 'admin')")
con.commit()
# c.execute("INSERT INTO roster (userid, contactid, subscription) VALUES (1, 2, 8)")
# c.execute("INSERT INTO roster (userid, contactid, subscription) VALUES (2, 1, 8)")
# c.execute("INSERT INTO rostergroups (userid, name) VALUES (1, 'friends')")
# c.execute("INSERT INTO rostergroups (userid, name) VALUES (1, 'weirdos')")
# c.execute("INSERT INTO rostergroupitems (groupid, contactid) VALUES (1, 2)")
except sqlite.OperationalError, e:
if e.message.find('already exists') >= 0: pass
else: raise e
c.close()
if __name__ == '__main__':
reload(sys)
sys.setdefaultencoding('utf-8')
launcher = PJSLauncher()
pjs.conf.conf.launcher = launcher
# TODO: move all of this into a config file + parser
# logFileName = 'server-log'
# logDir = 'log'
# logLoc = os.path.join(logDir, logFileName)
# logLevel = logging.DEBUG
# def configLogging(filename=logFileName, level=logLevel,
# format='%(asctime)s %(levelname)-8s %(message)s'):
# try:
# logging.basicConfig(filename=filename, level=level, format=format)
# except IOError:
# print >> sys.stderr, 'Could not create a log file. Logging to stderr.'
# logging.basicConfig(level=level, format=format)
# if os.path.exists('log'):
# if os.path.isdir('log') and os.access('log', os.W_OK):
# configLogging(logLoc)
# else:
# print >> sys.stderr, 'Logging directory is not accessible'
# configLogging()
# else:
# try:
# os.mkdir('log')
# configLogging(logLoc)
# except IOError:
# print >> sys.stderr, 'Could not create logging directory'
# configLogging()
logging.basicConfig(level=logging.DEBUG, format='%(asctime)s %(levelname)-8s %(message)s')
populateDB()
launcher.run()
logging.info('server started')
import pjs.async.core
try:
pjs.async.core.loop()
except KeyboardInterrupt:
# clean up
logging.info("KeyboardInterrupt sent. Shutting down...")
logging.shutdown()
| true
|
731781526c31f51761469c0e0d0204492c510d3a
|
Python
|
mobinrg/rpi_spark_drives
|
/JMRPiSpark/Drives/Display/RPiDisplay.py
|
UTF-8
| 5,466
| 2.75
| 3
|
[
"MIT"
] |
permissive
|
# -*- coding: utf-8 -*-
#
# The MIT License (MIT)
#
# Copyright (c) 2018 Kunpeng Zhang
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# #########################################################
#
# Base Display
# by Kunpeng Zhang
# v1.0.0 2018.3.20
#
import RPi.GPIO as GPIO
class RPiDiaplay:
"""!
RPiDiaplay is a hardware abstraction of the display,
You need to create a subclass from inherit it and
use the new subclass implement initialization and
operations of display chip.
"""
width = None
height = None
# SPI interface
_spi = None
_spi_mosi = None
_spi_dc = None
_spi_cs = None
_spi_reset = None
_spi_clk = None
# display buffer
_buffer = None
def _command(self, commands):
"""!
Send command to hardware bus of display chip ( I2C, SPI, others )
waitting for subclasses implement
"""
# """Send command to spi bus of display chip, most DC pin need set to LOW """
# if self._spi == None: raise "Do not setting SPI"
# GPIO.output( self._spi_dc, 0 )
# self._spi.writebytes( commands )
raise NotImplementedError
def _data(self, data):
"""!
Send data to hardware bus of display chip ( I2C, SPI, others )
waitting for subclasses implement
"""
# """Send data to spi bus of display chip, most DC pin need set to HIGH """
# if self._spi == None: raise "Do not setting SPI"
# GPIO.output( self._spi_dc, 1 )
# self._spi.writebytes( data )
raise NotImplementedError
def _init_config(self, width, height, spi=None, spiMosi= None, spiDC=None, spiCS=None, spiReset=None, spiClk=None):
"""!
SPI hardware and display width, height initialization.
"""
self._spi = spi
self._spi_mosi = spiMosi
self._spi_dc = spiDC
self._spi_cs = spiCS
self._spi_reset = spiReset
self._spi_clk = spiClk
self.width = width
self.height = height
def _init_io(self):
"""!
GPIO initialization.
Set GPIO into BCM mode and init other IOs mode
"""
GPIO.setwarnings(False)
GPIO.setmode( GPIO.BCM )
pins = [ self._spi_dc ]
for pin in pins:
GPIO.setup( pin, GPIO.OUT )
def _init_display(self):
"""!
Display hardware initialization.
waitting for subclasses implement
"""
raise NotImplementedError
def __init__ ( self, width, height, spi=None, spiMosi= None, spiDC=None, spiCS=None, spiReset=None, spiClk=None ):
"""!
Initialize the RPiDiaplay object instance
and config GPIO and others
"""
self._init_config(width, height, spi, spiMosi, spiDC, spiCS, spiReset, spiClk)
def clear(self, fill = 0x00):
"""!
Clear buffer data and other data
RPiDiaplay object just implemented clear buffer data
"""
self._buffer = [ fill ] * ( self.width * self.height )
def on(self):
"""!
Power on display.
waitting for subclasses implement
"""
raise NotImplementedError
def off(self):
"""!
Power off display.
waitting for subclasses implement
"""
raise NotImplementedError
def init(self):
"""!
Change contrast of display.
waitting for subclasses implement
"""
raise NotImplementedError
def reset(self):
"""!
Reset display.
waitting for subclasses implement
"""
raise NotImplementedError
def setContrast(self, contrast):
"""!
Change contrast of display.
waitting for subclasses implement
"""
raise NotImplementedError
def setBrightness(self, brightness):
"""!
Change brightness of display.
waitting for subclasses implement
"""
raise NotImplementedError
def display(self, buffer = None):
"""!
Send an buffer data to display.
waitting for subclasses implement
"""
raise NotImplementedError
def setImage(self, image):
"""!
Set an image to display. the image can be PIL Image object or other image object.
waitting for subclasses implement
"""
raise NotImplementedError
| true
|
4576375a8cbd4a1898ef589c3565c242f443e8ea
|
Python
|
chulkx/ejercicios_python_new
|
/Clase11/alquiler.py
|
UTF-8
| 790
| 3.46875
| 3
|
[] |
no_license
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Oct 27 03:17:42 2021
@author: chulke
"""
import numpy as np
import matplotlib.pyplot as plt
def ajuste_lineal_simple(x,y):
a = sum(((x - x.mean())*(y-y.mean()))) / sum(((x-x.mean())**2))
b = y.mean() - a*x.mean()
return a, b
superficie = np.array([150.0, 120.0, 170.0, 80.0])
alquiler = np.array([35.0, 29.6, 37.4, 21.0])
a, b = ajuste_lineal_simple(superficie, alquiler)
_x = np.linspace(start = 0, stop = 200, num = 1000)
_y = _x*a + b
g = plt.scatter(x = superficie, y = alquiler)
plt.title('precio_alquiler ~ superficie')
plt.plot(_x, _y, c = 'green')
plt.xlabel('Superficie')
plt.ylabel('Precio')
plt.show()
errores = alquiler - (a*superficie + b)
print(errores)
print("ECM:", (errores**2).mean())
| true
|
7c3cb57fb66833a4c54314f35b4a309f523e3973
|
Python
|
programmerQI/python
|
/CSCI2824/hw7/selSort.py
|
UTF-8
| 444
| 3.3125
| 3
|
[] |
no_license
|
def selSort(list):
l = len(list)
cnt = 0
for i in range(0, l):
min = list[i]
id = i;
for j in range(i + 1, l):
cnt = cnt + 1
if list[j] < min:
cnt = cnt + 2
min = list[j]
id = j
a = list[i]
list[i] = min
list[id] = a
cnt = cnt + 3
return (list, cnt)
print(selSort([2, 1]))
print(selSort([3, 2, 1]))
| true
|
deac99b3ee7fdef6453a837dd110740cd30fbc73
|
Python
|
sontolesquad/Mrsontolexv2
|
/module/SpamMail.py
|
UTF-8
| 4,396
| 2.59375
| 3
|
[
"Apache-2.0",
"MIT"
] |
permissive
|
#!/usr/bin/python
# - Spammer-Email
# | Author: P4kL0nc4t
# | Date: 13/11/2017
# | Reupload: Mrsontolex
# | Date: 03/06/2018
# | Editing author will not make you the real coder :)
import argparse
import requests
import time
import datetime
import random
import string
import smtplib
print """\
/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\|
| Name: Mrsontolex No System Is Safe |
| Team: PERSIT2K18 |
| Hobi: Yang Kamu suka aku pun ikut suka |
|___________________________________________________________|
"""
parser = argparse.ArgumentParser(description="Spammer (Email) is a tool used to spam an email address by sending an email repeatedly using an SMTP server.", epilog="If you had stuck, you can mail me at p4kl0nc4t@obsidiancyberteam.id")
parser.add_argument("to", metavar="to", help="the email address to spam")
parser.add_argument("subject", help="body of the email to send")
parser.add_argument("body", help="body of the email to send")
parser.add_argument("frommail", metavar="from", help="mail from. Not all SMTP server accepts this, prefer using provided email address")
parser.add_argument("host", help="the SMTP server host")
parser.add_argument("port", type=int, help="the SMTP server port")
parser.add_argument("--ssl", help="the SMTP server requires SSL", action="store_true")
parser.add_argument("--username", help="username for SMTP server auth")
parser.add_argument("--password", help="password for SMTP server auth")
args = parser.parse_args()
def showstatus(message, type="new"):
now = datetime.datetime.now().strftime("%H:%M:%S")
icon = "*"
if type == "warn":
icon = "!"
elif type == "new":
icon == "*"
message = "[" + icon + "][" + now + "]" + message
return message
def wrapsbrace(string, endspace=False):
if endspace == True:
return "[" + string + "] "
else:
return "[" + string + "]"
def sleep(x):
try:
time.sleep(x)
except KeyboardInterrupt:
print "\r" + showstatus(wrapsbrace("except", True) + "KeyboardInterrupt thrown! Exiting . . .", "warn")
exit()
def main():
print showstatus(wrapsbrace("spammer-init", True) + "Spammer target: {}".format(args.to))
print showstatus(wrapsbrace("info", True) + "Message length: {}".format(str(len(args.body))))
i = 1
while True:
try:
server = smtplib.SMTP(host=args.host, port=args.port)
if args.ssl:
server = smtplib.SMTP_SSL(host=args.host, port=args.port)
if args.username and args.password:
server.login(args.username, args.password)
message = "Subject: {}\r\n\r\n{}".format(args.subject, args.body)
server.sendmail(args.frommail, args.to, message)
except smtplib.SMTPServerDisconnected:
print showstatus(wrapsbrace("SMTPServerDisconnected", True) + "SMTP server unexpectedly disconnected! Trying again . . .", "warn")
continue
except smtplib.SMTPResponseException as e:
print showstatus(wrapsbrace("SMTPResponseException", True) + "SMTP error code: {}, trying again . . .".format(e.smtp_code), "warn")
continue
except smtplib.SMTPSenderRefused:
print showstatus(wrapsbrace("SMTPSenderRefused", True) + "Sender address refused! Exiting . . .", "warn")
exit()
except smtplib.SMTPRecipientsRefused:
print showstatus(wrapsbrace("SMTPRecipientsRefused", True) + "Recipient address refused! Exiting . . .", "warn")
exit()
except smtplib.SMTPDataError:
print showstatus(wrapsbrace("SMTPDataError", True) + "The SMTP server refused to accept the message data! Exiting . . .", "warn")
exit()
except smtplib.SMTPConnectError:
print showstatus(wrapsbrace("SMTPConnectError", True) + "Error while establishing connection with server! Exiting . . .", "warn")
exit()
except smtplib.SMTPHeloError:
print showstatus(wrapsbrace("SMTPHeloError", True) + "The server refused our HELO message! Exiting . . .", "warn")
exit()
except smtplib.SMTPAuthenticationError:
print showstatus(wrapsbrace("SMTPAuthenticationError", True) + "SMTP credential authentication error! Exiting . . .", "warn")
exit()
else:
print showstatus(wrapsbrace("sent", True) + "Mail sent! increment:{}".format(i))
i += 1
try:
server.quit()
except:
exit()
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
print "\r" + showstatus(wrapsbrace("except", True) + "KeyboardInterrupt thrown! Exiting . . .")
exit()
| true
|
d0e2a7d11407d8773b1a2866156a3d7bafd8700b
|
Python
|
VicDCruz/minimax-game-implementation
|
/Tictactoe.py
|
UTF-8
| 2,163
| 3.90625
| 4
|
[] |
no_license
|
"""
Programa que implementa reglas básicas para jugar el juego de Gato (Tic-Tac-Toe)
"""
from Gameboard import Gameboard
from copy import deepcopy
SCORE = 10
def canMove(board):
"""
Checar si hay movimientos disponibles
"""
for i in range(3):
for j in range(3):
if (board[i][j] == "\t"):
return True
return False
def evaluate(board, player, opponent):
"""
Evaluar el tablero actual\n
player es el jugador que pierde (-SCORE) o gana (-SCORE)
"""
# if not canMove(board):
# return 0
# Victoria para player en una fila
for row in range(3):
if board[row][0] == board[row][1] and board[row][1] == board[row][2]:
if board[row][0] == player:
return SCORE
elif board[row][0] == opponent:
return -SCORE
# Victoria para player en una columna
for col in range(3):
if board[0][col] == board[1][col] and board[1][col] == board[2][col]:
if board[0][col] == player:
return SCORE
elif board[0][col] == opponent:
return -SCORE
# Victoria para player en una diagonal
if board[0][0] == board[1][1] and board[1][1] == board[2][2]:
if board[0][0] == player:
return SCORE
elif board[0][0] == opponent:
return -SCORE
if board[0][2] == board[1][1] and board[1][1] == board[2][0]:
if board[0][2] == player:
return SCORE
elif board[0][2] == opponent:
return -SCORE
# Else if none of them have won then return 0
return 0
def generateMoves(snapshot, isPlayer):
"""
Crear tableros con nuevos movimientos para evaluar nuevos escenarios
"""
output = []
for x in range(3):
for y in range(3):
if (snapshot.board[x][y] == "\t"):
game = Gameboard(snapshot.width, snapshot.height,
snapshot.player, snapshot.opponent)
game.board = deepcopy(snapshot.board)
game.addChip(isPlayer, x, y)
output.append(game)
return output
| true
|
9be434c4ebad805e5544f99c21b28934626bfb87
|
Python
|
ISISComputingGroup/ibex_utils
|
/installation_and_upgrade/ibex_install_utils/ca_utils.py
|
UTF-8
| 2,034
| 2.703125
| 3
|
[] |
no_license
|
import os
from genie_python.utilities import dehex_and_decompress
class CaWrapper:
"""
Wrapper around genie python's channel access class providing some useful abstractions.
"""
def __init__(self):
"""
Setting instrument is necessary because genie_python is being run from a network drive so it may not know
where it is.
"""
self.g = None
def _get_genie(self):
# Do import locally (late) as otherwise it writes logs to c:\instrument\var which interferes with VHD deploy.
if self.g is not None:
return self.g
from genie_python import genie as g
self.g = g
self.g.set_instrument(os.getenv("MYPVPREFIX"), import_instrument_init=False)
return g
def get_local_pv(self, name):
"""
Get PV with the local PV prefix appended
Args:
name (str): Name of the PV to get.
Returns:
None if the PV was not connected
"""
return self._get_genie().get_pv(name, is_local=True)
def get_object_from_compressed_hexed_json(self, name):
"""
Gets an object from a compressed hexed json PV
Args:
name (str): Name of the PV to get
Returns:
None if the PV was not available, otherwise the decoded json object
"""
data = self.get_local_pv(name)
if data is None:
return None
else:
return dehex_and_decompress(data)
def get_blocks(self):
"""
Returns:
A collection of blocks, or None if the PV was not connected
"""
return self._get_genie().get_blocks()
def cget(self, block):
"""
Returns:
A collection of blocks, or None if the PV was not connected.
"""
return self._get_genie().cget(block)
def set_pv(self, *args, **kwargs):
"""
Sets the value of a PV.
"""
return self._get_genie().set_pv(*args, **kwargs)
| true
|
7fc570f662eb19c3bc930711f8517007c9fa4281
|
Python
|
Bigpig4396/PyTorch-Deep-Deterministic-Policy-Gradient-DDPG
|
/DDPG_GPU.py
|
UTF-8
| 7,533
| 2.90625
| 3
|
[] |
no_license
|
import random
import gym
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import matplotlib.pyplot as plt
class ReplayBuffer:
def __init__(self, capacity):
self.capacity = capacity
self.buffer = []
self.position = 0
def push(self, state, action, reward, next_state, done):
if len(self.buffer) < self.capacity:
self.buffer.append(None)
self.buffer[self.position] = (state, action, reward, next_state, done)
self.position = (self.position + 1) % self.capacity
def sample(self, batch_size):
batch = random.sample(self.buffer, batch_size)
state, action, reward, next_state, done = map(np.stack, zip(*batch))
return state, action, reward, next_state, done
def __len__(self):
return len(self.buffer)
class QNetwork(nn.Module):
def __init__(self, num_inputs, num_actions):
super(QNetwork, self).__init__()
self.fc1 = nn.Linear(num_inputs + num_actions, 512)
self.fc2 = nn.Linear(512, 256)
self.fc3 = nn.Linear(256, 128)
self.fc4 = nn.Linear(128, 1)
def forward(self, state, action):
x = torch.cat([state, action], 1)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = F.relu(self.fc3(x))
x = self.fc4(x)
return x
class PolicyNetwork(nn.Module):
def __init__(self, state_dim, action_dim):
super(PolicyNetwork, self).__init__()
self.fc1 = nn.Linear(state_dim, 256)
self.fc2 = nn.Linear(256, 128)
self.fc3 = nn.Linear(128, action_dim)
def forward(self, state):
# print(state.size())
x = F.relu(self.fc1(state))
x = F.relu(self.fc2(x))
x = torch.tanh(self.fc3(x))
return x
class DDPG(object):
def __init__(self, state_dim, action_dim):
self.state_dim = state_dim
self.action_dim = action_dim
self.q_1_net = QNetwork(state_dim, action_dim).to(device)
self.q_2_net = QNetwork(state_dim, action_dim).to(device)
self.target_q_1_net = QNetwork(state_dim, action_dim).to(device)
self.target_q_2_net = QNetwork(state_dim, action_dim).to(device)
for target_param, param in zip(self.target_q_1_net.parameters(), self.q_1_net.parameters()):
target_param.data.copy_(param.data)
for target_param, param in zip(self.target_q_2_net.parameters(), self.q_2_net.parameters()):
target_param.data.copy_(param.data)
self.soft_tau = 1e-2
self.p_net = PolicyNetwork(state_dim, action_dim).to(device)
self.q_criterion = nn.MSELoss()
self.q_1_optimizer = optim.Adam(self.q_1_net.parameters(), lr=1e-3)
self.q_2_optimizer = optim.Adam(self.q_2_net.parameters(), lr=1e-3)
self.p_optimizer = optim.Adam(self.p_net.parameters(), lr=3e-4)
self.gamma = 0.99
def get_action(self, state, epsilon):
a = self.p_net.forward(torch.from_numpy(state).float().to(device))
a = a + epsilon * torch.randn(self.action_dim)
a = torch.clamp(a, min=-1, max=1)
return a.detach().cpu().numpy()
def train(self, batch):
state = batch[0] # array [64 1 2]
action = batch[1] # array [64, ]
reward = batch[2] # array [64, ]
next_state = batch[3]
done = batch[4]
state = torch.from_numpy(state).float().to(device)
action = torch.from_numpy(action).float().view(-1, self.action_dim).to(device)
next_state = torch.from_numpy(next_state).float().to(device)
next_action = self.p_net.forward(next_state).to(device)
reward = torch.FloatTensor(reward).float().unsqueeze(1).to(device)
q1 = self.q_1_net.forward(state, action)
q2 = self.q_2_net.forward(state, action)
# q_min = torch.min(q1, q2)
next_q1 = self.target_q_1_net.forward(next_state, next_action)
next_q2 = self.target_q_2_net.forward(next_state, next_action)
next_q_min = torch.min(next_q1, next_q2)
est_q = reward + self.gamma * next_q_min
q_loss = self.q_criterion(q1, est_q.detach())
self.q_1_optimizer.zero_grad()
q_loss.backward()
self.q_1_optimizer.step()
q_loss = self.q_criterion(q2, est_q.detach())
self.q_2_optimizer.zero_grad()
q_loss.backward()
self.q_2_optimizer.step()
new_a = self.p_net.forward(state)
q1 = self.q_1_net.forward(state, new_a)
q2 = self.q_2_net.forward(state, new_a)
q_min = torch.min(q2, q1)
p_loss = -q_min.mean()
self.p_optimizer.zero_grad()
p_loss.backward()
self.p_optimizer.step()
for target_param, param in zip(self.target_q_1_net.parameters(), self.q_1_net.parameters()):
target_param.data.copy_(target_param.data * (1.0 - self.soft_tau) + param.data * self.soft_tau)
for target_param, param in zip(self.target_q_2_net.parameters(), self.q_2_net.parameters()):
target_param.data.copy_(target_param.data * (1.0 - self.soft_tau) + param.data * self.soft_tau)
def load_model(self):
print('load model')
self.q_1_net = torch.load('DDPG_q_net1.pkl').to(device)
self.q_2_net = torch.load('DDPG_q_net2.pkl').to(device)
self.target_q_1_net = torch.load('DDPG_target_q_net1.pkl').to(device)
self.target_q_2_net = torch.load('DDPG_target_q_net2.pkl').to(device)
self.p_net = torch.load('DDPG_policy_net.pkl').to(device)
def save_model(self):
torch.save(self.q_1_net, 'DDPG_q_net1.pkl')
torch.save(self.q_2_net, 'DDPG_q_net2.pkl')
torch.save(self.target_q_1_net, 'DDPG_target_q_net1.pkl')
torch.save(self.target_q_2_net, 'DDPG_target_q_net2.pkl')
torch.save(self.p_net, 'DDPG_policy_net.pkl')
if __name__ == '__main__':
device = 'cuda' if torch.cuda.is_available() else 'cpu'
env = gym.make("MountainCarContinuous-v0")
# env = gym.make("Pendulum-v0")
state_dim = env.observation_space.shape[0]
action_dim = env.action_space.shape[0]
print('state size:', env.observation_space.shape)
print('action size:', env.action_space.shape)
agent = DDPG(state_dim, action_dim)
agent.load_model()
max_epi_iter = 100
max_MC_iter = 200
batch_size = 64
replay_buffer = ReplayBuffer(50000)
train_curve = []
for epi in range(max_epi_iter):
state = env.reset()
acc_reward = 0
for MC_iter in range(max_MC_iter):
# print("MC= ", MC_iter)
env.render()
# action1 = agent.get_action(state, 1.0-(epi/max_epi_iter))
action1 = agent.get_action(state, 0.0)
next_state, reward, done, info = env.step(action1)
acc_reward = acc_reward + reward
replay_buffer.push(state, action1, reward, next_state, done)
state = next_state
if len(replay_buffer) > batch_size:
# print('train')
agent.train(replay_buffer.sample(batch_size))
if done:
break
print('Episode', epi, 'reward', acc_reward)
train_curve.append(acc_reward)
if epi % 50 == 0:
agent.save_model()
plt.plot(train_curve, linewidth=1, label='DDPG')
plt.show()
| true
|
7ee9445de3c9556e76f18975565400d96ee3945e
|
Python
|
BGCX067/faint-graphics-editor-svn-to-git
|
/tags/release-0.6/build/genhelp.py
|
UTF-8
| 19,507
| 2.625
| 3
|
[
"Apache-2.0"
] |
permissive
|
# Copyright 2012 Lukas Kemmer
#
# Licensed under the Apache License, Version 2.0 (the "License"); you
# may not use this file except in compliance with the License. You
# may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
import re
import os
# Section-title regexes
title1 = re.compile("^= (.*) =$")
title2 = re.compile("^== (.*) ==$")
title3 = re.compile("^=== (.*) ===$")
# Summary of a page
re_summary = re.compile('^summary\:"(.+)"$')
# Instruction to summarize child pages
re_summarize_children = re.compile('^child_summary$')
# Label for cross-referencing
label = re.compile("^label\:(.*?)$")
# Bullet points for bullet lists
bullet = re.compile("^\* (.*)$")
# Image filename from images/
image_name = re.compile("image\:(.*?)\s")
# Image filename from graphics/
re_graphic = re.compile("graphic\:(.*?)\s")
# Font styles
bold = re.compile("\*(.*?)\*")
italic = re.compile("\'(.*?)\'")
# Centered text
center = re.compile("/(.*?)/")
# Horizontal line
hr = re.compile("^---$")
# Reference to a label
ref = re.compile("(?<!\\\\)-(.*?)(?<!\\\\)-")
# A table row (with one or more cells)
table_row = re.compile("^\|\|.*\|\|$")
# Table style definition
table_style = re.compile("^tablestyle:([1-9])$")
# Table widths definition
table_widths = re.compile("^tablewidths:(.*)$")
# Verbatim text (ignore markup?)
verb_start = "{{{"
verb_end = "}}}"
g_table_style = 0
g_table_widths = []
# Even or odd table row, for styling rows
g_table_odd = False
# Page background
color_background = "#FAFDD5"
# Backgrounds for even and odd table rows
color_table_even = color_background
color_table_odd = "#F9FCC4"
color_link="#4D6DF3"
# Labels found in the parsed files, for cross-referencing
labels = {}
def match_title( line ):
"""Returns a match object if the line is matched by any title
regex, otherwise None"""
for num, title in enumerate( [title1, title2, title3]):
match = title.match(line)
if match:
return "".join( [ item.to_html() for item in parse_rest(match.group(1)) ]), num + 1
return None
class Bullet:
def __init__( self, items ):
self.items = items
def to_html( self, **kwArgs ):
return '<li>%s</li>' % "".join( [item.to_html() for item in self.items])
def __repr__( self ):
return "Bullet()"
class Image:
def __init__( self, path ):
self.path = path
def to_html( self, **kwArgs ):
return '<img src="images/%s"></img> ' % self.path
def __repr__( self ):
return "Image(%s)" % self.path
class Graphic:
def __init__( self, path ):
self.path = path
def to_html( self, **kwArgs ):
return '<img src="../graphics/%s"></img> ' % self.path
def __repr__( self ):
return "Graphic(%s)" % self.path
class Label:
def __init__( self, label, title, url ):
self.label = label
self.title = title
self.url = url
def to_html( self, **kwArgs ):
return '<a name="%s"></a>' % self.label
def __repr__( self ):
return "Label(%s)" % self.label
class Text:
html_replacements = [("\\-", "-"), # Escaped dash/hyphen (i.e. not reference)
("\\", "<br>"), # \ for line-break
("->", "→")] # Ascii arrow as html-right arrow
def __init__( self, text ):
self.text = text
def to_html( self, **kwArgs ):
html_str = self.text
for src, replacement in self.html_replacements:
html_str = html_str.replace(src, replacement)
return html_str
def __repr__( self ):
return "Text(...)"
class Page:
def __init__( self, filename, sourcename, title, summary, items ):
self.items = items
self.title = title
self.summary = summary
self.filename = filename
self.sourcename = sourcename
def first_title( self ):
for item in self.items:
if item.__class__ == Title:
return item.text
return ""
class ChildSummary:
def __init__(self):
pass
def to_html(self, **kwArgs ):
return ""
def __repr__( self ):
return "ChildSummary"
class Title:
def __init__( self, text, level ):
self.text = text
self.level = level
def to_html( self, **kwArgs ):
return "<h%d>%s</h%d>" % (self.level, self.text, self.level)
def __repr__( self ):
return "Title"
class Paragraph:
def to_html( self, **kwArgs ):
return "<p>"
class Tagged:
def __init__( self, tag, content ):
self.tag = tag
self.content = content
def to_html( self, **kwArgs ):
html = "<%s>%s</%s>" % (self.tag, "".join( item.to_html() for item in parse_rest(self.content)), self.tag)
return html
class Reference:
def __init__( self, name, label=None ):
self.name = name
self.target_label = None
self.custom_label = label
def to_html( self, **kwArgs ):
if self.target_label is None:
self.target_label = labels.get( self.name, None )
if self.target_label is None:
print "Warning: undefined label: %s" % self.name
return self.name + "??"
if self.custom_label is not None:
title = self.custom_label
else:
title = self.target_label.title
return '<a href="%s">%s</a>' % (self.target_label.url, title )
def tbl_str_width(num, widths):
"""Returns the width-argument for an html table cell using the width from
the list of widths.
Returns width="[some number]" or the empty-string if no width is
specified for this num"""
if num >= len(widths):
return ""
return 'width="%s"' % widths[num]
class Table:
def __init__( self, line ):
self.cells = line.split("||")[1:-1]
def to_html( self, **kwArgs ):
fmt_cells = ["".join( [ item2.to_html() for item2 in item] ) for item in [ parse_rest(item) for item in self.cells ] ]
row = kwArgs["row"]
style = kwArgs["tablestyle"]
tablewidths = kwArgs["tablewidths"]
widths = []
for num, cell in enumerate(fmt_cells):
if num < len(tablewidths):
widths.append(tablewidths[num])
html = "<tr>"
for num, cell in enumerate(fmt_cells):
if style == 3 and row == 0:
html = html + '<td bgcolor="#F5E49C" %s>%s</td>' % (tbl_str_width(num, widths), cell )
elif row % 2 == 0 and ( ( style == 3 and row != 0 ) or style == 2 ):
html = html + '<td bgcolor="%s" %s>%s</td>' % ( color_table_odd, tbl_str_width(num, widths), cell )
else:
html = html + '<td %s>%s</td>' % (tbl_str_width(num,widths), cell)
html = html + "</tr>"
return html
class TableStyle:
def __init__( self, style ):
self.style = int(style)
def to_html( self, **kwArgs ):
return ""
class Instruction:
"""A configuration item for the output"""
def __init__( self, name, values ):
self.name = name
self.values = values
def to_html( self, **kwArgs ):
return ""
class Header:
def __init__( self, targetname, link ):
self.targetname = targetname
self.link = link
self.title = ""
def set_title( self, title ):
self.title = title
def to_html( self, **kwArgs ):
return '<a href="%s">Previous</a> - %s<hr>' % (self.link, self.title )
class Footer:
def __init__( self, targetname, link ):
self.targetname = targetname
self.link = link
self.title = ""
def set_title( self, title ):
self.title = title
def to_html( self, **kwArgs ):
return '<hr><a href="%s">Next</a> - %s' % (self.link, self.title )
class HR:
def to_html(self, **kwArgs):
return '<hr>'
def list_split( regex, data, marker ):
ret = []
for item in data:
if item.startswith("["):
ret.append(item)
continue
for num, splat in enumerate(regex.split(item)):
if (num + 1) % 2 == 0:
ret.append( marker + splat )
elif len(splat) > 0:
ret.append( splat )
return ret
def to_object( txt ):
if txt.startswith("[R]"):
content = txt[3:]
content = content.split(',')
if len(content) == 1:
return Reference( content[0])
else:
return Reference( content[0], content[1])
if txt.startswith("[IMG]"):
return Image( txt[5:] )
if txt.startswith("[GRAPHIC]"):
return Graphic( txt[9:] )
if txt.startswith("[I]"):
return Tagged( 'i', txt[3:] )
if txt.startswith("[B]"):
return Tagged( 'b', txt[3:] )
if txt.startswith("[C]"):
return Tagged( 'center', txt[3:] )
return Text(txt)
def parse_rest( rest ):
if rest.__class__ != [].__class__:
rest = [rest]
rest = list_split( ref, rest, "[R]")
rest = list_split( image_name, rest, "[IMG]")
rest = list_split( re_graphic, rest, "[GRAPHIC]")
rest = list_split( italic, rest, "[I]")
rest = list_split( bold, rest, "[B]")
rest = list_split( center, rest, "[C]")
return [to_object(item) for item in rest ]
def parse_table_widths( s ):
"""Returns a list of the widths in the string
for group(1) of a matched table_widths regex"""
return [ item.strip() for item in s.split(",") ]
def parse_file( filename, prev, next, labels ):
src = open(filename)
lines = src.readlines()
src.close()
page = lines[0]
target_filename = page.split(":")[1].strip()
title = target_filename.split('"')[1]
target_filename = target_filename.split(' ')[0]
summary = ""
doc = [] # The list of items in the page
if prev is not None:
doc.append(Header( prev, prev.replace(".txt", ".html") ) )
in_list = False
verbatim = False
last_title = None
for line in lines[1:]:
if verbatim:
if line.strip() == "}}}":
verbatim = False
continue
else:
doc.append(Text(line))
continue
if line.strip() == "{{{":
verbatim = True
continue
m = re_summary.match( line )
if m is not None:
summary = m.group(1)
continue
m = re_summarize_children.match(line)
if m is not None:
doc.append( ChildSummary() )
continue
m = hr.match( line )
if m is not None:
doc.append( HR() )
continue
m = match_title( line )
if m is not None:
last_title = Title( m[0], m[1] )
doc.append( last_title )
continue
m = bullet.match( line )
if m is not None:
doc.append( Bullet( parse_rest( m.group(1) ) ) )
continue
m = label.match( line )
if m is not None:
name = m.group(1).strip()
if last_title is not None:
labelTitle = last_title.text
else:
labelTitle = title
lbl = Label( name, labelTitle, target_filename + "#" + name )
doc.append( lbl )
labels[name] = lbl
continue
m = table_style.match( line )
if m is not None:
doc.append( TableStyle( m.group(1)) )
continue
m = table_widths.match( line )
if m is not None:
doc.append( Instruction( "tablewidths", parse_table_widths( m.group(1)) ) )
continue
if line.strip() == "":
doc.append( Paragraph() )
continue
m = table_row.match(line)
if m is not None:
doc.append( Table( line ) )
continue
content = [line]
content = list_split( center, content, "[C]")
content = list_split( image_name, content,"[IMG]")
content = list_split( re_graphic, content,"[GRAPHIC]")
content = list_split( bold, content, "[B]")
content = list_split( italic, content, "[I]")
content = list_split( ref, content, "[R]")
for item in content:
if item.startswith("[IMG]"):
doc.append( Image( item[5:] ) )
elif item.startswith("[GRAPHIC]"):
doc.append( Graphic( item[9:] ) )
elif item.startswith("[B]"):
doc.append( Tagged( "b", item[3:] ) )
elif item.startswith("[U]"):
doc.append( Tagged( "u", item[3:] ) )
elif item.startswith("[I]"):
doc.append( Tagged( "i", item[3:] ) )
elif item.startswith("[C]"):
doc.append( Tagged( "center", item[3:] ) )
else:
doc.append( to_object( item ) )
if next is not None:
doc.append(Footer( next, next.replace(".txt", ".html") ) )
else:
doc.append(HR())
return Page(target_filename, os.path.split(filename)[-1], title, summary, doc)
def write_child_summary( out, pages, childPages ):
out.write('<table border="0" cellpadding="5">')
for child in childPages:
out.write('<tr><td><a href="%s">%s</a></td><td width="10"></td><td>%s</td></tr>' % (pages[child].filename, pages[child].title, pages[child].summary))
out.write('</table>')
def write( outDir, pages, pageHierarchy, labels ):
global g_table_style
global g_table_widths
row_num = 0
for sourcename in pages:
print sourcename
g_table_style = 0
g_table_widths = []
page = pages[sourcename]
outfile = open(os.path.join(outDir, page.filename), 'w')
outfile.write('<html><head><title>%s</title></head><body bgcolor="%s" leftmargin="50" link="%s">' % (page.title, color_background, color_link))
in_table = False
in_list = False
for item in page.items:
if item.__class__ == TableStyle:
g_table_style = item.style
if item.__class__ == Instruction:
if item.name == "tablewidths":
g_table_widths = item.values
if item.__class__ == Table:
if not in_table:
row_num = 0
if g_table_style == 3 or g_table_style == 2:
outfile.write('<table border="0" cellpadding="5" width="80%">' )
else:
outfile.write('<table border="0" cellpadding="5">' )
in_table = True
global g_table_odd
g_table_odd = not g_table_odd
else:
row_num += 1
elif in_table:
outfile.write('</table>')
row_num = 0
in_table = False
if item.__class__ == Bullet:
if not in_list:
in_list = True
outfile.write("<ul>\n")
elif in_list:
outfile.write("</ul>\n")
in_list = False
if item.__class__ == ChildSummary:
write_child_summary(outfile, pages, pageHierarchy[sourcename])
outfile.write( item.to_html( tablestyle=g_table_style, tablewidths=g_table_widths,row=row_num ) )
outfile.write("</body></html>")
def _need_generate(srcRoot, dstRoot, sources, other):
for f in [ os.path.join(dstRoot, f) for f in other]:
if not os.path.isfile(f):
# Atleast one non-html page needs to be regenerated
return True
for sourceFile in [ os.path.join(srcRoot, page) for page in sources]:
targetFile = os.path.join(dstRoot, page.replace(".txt", ".html"))
if not os.path.isfile(targetFile) or os.path.getmtime(targetFile) < os.path.getmtime(sourceFile):
# An html page is older than its source text or does not exist
return True
return False
def get_files(contentLines):
lines = [line.strip() for line in contentLines]
return [line.replace(">", "") for line in lines if not len(line) == 0 ]
def read_contents_source(contents_path):
f = open(contents_path)
lines = [line.strip() for line in f.readlines()]
return [line for line in lines if not len(line) == 0]
def write_contents(contentLines, dst_path, pages):
contents_file = open(dst_path, 'w')
for line in contentLines:
if line.startswith(">"):
contents_file.write(">")
pageInfo = pages[line.replace(">","")]
contents_file.write("%s;%s\n" % (pageInfo.title, pageInfo.filename))
def parse_contents(contentLines):
pages = {}
mainPage = ""
for line in contentLines:
line = line.strip()
if not line.startswith(">"):
mainPage = line
pages[line] = []
else:
pages[line[1:]] = []
pages[mainPage].append(line[1:])
return pages
def run():
helpRoot = "../help"
helpSource = "../help/source"
# Retrieve what files to parse as pages from the contents file
content = read_contents_source(os.path.join(helpSource, "contents.txt"))
source_pages = get_files(content)
pageHierarchy = parse_contents(content)
if not _need_generate(helpSource, helpRoot, source_pages, ["contents.dat"]):
print " Up to date."
return
pages = {}
for num, page in enumerate(source_pages):
if num == 0:
# No previous page for the first page
prev = None
else:
prev = source_pages[num - 1]
if num == len(source_pages) -1:
# No next page for the last page
next = None
else:
next = source_pages[num + 1]
pages[page] = parse_file(os.path.join(helpSource, page), prev, next, labels)
for source_name in source_pages:
page = pages[source_name]
labels[ source_name ] = Label( source_name, page.title, page.filename )
for sourcepage in pages:
last = pages[sourcepage].items[-1]
if last.__class__ == Footer:
last.set_title( pages[last.targetname].first_title() )
first = pages[sourcepage].items[0]
if first.__class__ == Header:
first.set_title( pages[first.targetname].first_title() )
# Write all pages
write( helpRoot, pages, pageHierarchy, labels )
# Write the contents (for the tree view)
write_contents( content, os.path.join(helpRoot, "contents.dat"), pages )
if __name__ == '__main__':
run()
| true
|
298ffe533f97b7753e2a4bb5539f1b098c9dddd8
|
Python
|
HWal/RPi_HAN_Receive_Web_Relay_Output
|
/Python_AMS/copyprices_2.py
|
UTF-8
| 752
| 2.65625
| 3
|
[] |
no_license
|
# Generate log files and save on USB stick
# To be executed as cron job every day at 23:55
import datetime
import time
import sys
import shutil
# Extract today's date as string
toDay = datetime.date.today()
shortDateToday = toDay.strftime("%Y") + toDay.strftime("%m") + toDay.strftime("%d")
# Save file for long term logging of EUR price data to log folder
source = '/var/www/html/data/prices_EUR_today.data'
destination = '/media/pi/ABCDEFGHI/' + shortDateToday + '_EUR.data'
shutil.copy(source, destination)
# Save file for long term logging of NOK price data to log folder
source = '/var/www/html/data/prices_NOK_today.data'
destination = '/media/pi/ABCDEFGHI/prices/' + shortDateToday + '_NOK.data'
shutil.copy(source, destination)
sys.exit()
| true
|
28a800a41007909557f5dc0bb42ebbcea46fc117
|
Python
|
janbohinec/gen-i
|
/AdventOfCode/2017/Day9/day9.py
|
UTF-8
| 997
| 3.328125
| 3
|
[] |
no_license
|
# -*- coding: utf-8 -*-
"""
Created on Fri Dec 23 17:51:22 2017
@author: Jan
"""
import numpy as np
import pandas as pd
from itertools import count
import time
## Advent of Code 2017, Day 9
data = open('day9.txt', 'r')
def goThrough(data):
data = data.read()
cancel = False
garbage = False
vsota = 0
vsota_garbage = 0
nivo = 0
for char in data:
if cancel:
cancel = False
elif char == '!':
cancel = True
elif char == '>':
garbage = False
elif garbage:
vsota_garbage += 1
elif char == '<':
garbage = True
elif char == '{':
nivo += 1
elif char == '}':
vsota += nivo
nivo -= 1
return vsota, vsota_garbage
t1 = time.time()
print('Test')
print(goThrough(data))
print('2nd part test')
print()
print('2nd part anwswer')
t2 = time.time()
print('Program run for {0} sec.'.format(round(t2-t1,2)))
| true
|
5df3b43f47496c93088cb0d8a7e915140b605f92
|
Python
|
AdamF42/MLSamples
|
/activationFunctions/softmax.py
|
UTF-8
| 324
| 3.03125
| 3
|
[] |
no_license
|
import numpy as np
import matplotlib.pyplot as plt
def softmax(z):
z_exp = np.exp(z)
sum_z_exp = np.sum(z_exp)
return np.array([round(i/sum_z_exp, 3) for i in z_exp])
def graph(formula, x_range):
x = np.array(x_range)
y = formula(x)
plt.plot(x, y)
plt.show()
graph(lambda x: softmax(x), range(-6, 6))
| true
|
f09ed864014b758389aca83e03dc5a41ff8c4dec
|
Python
|
SafonovMikhail/python_000577
|
/001146StepikPyBegin/Stepik001146PyBeginсh11p02st09TASK08_20210203.py
|
UTF-8
| 524
| 3.96875
| 4
|
[
"Apache-2.0"
] |
permissive
|
'''
Дополните приведенный код, используя операторы конкатенации (+) и
умножения списка на число (*), так чтобы он вывел список:
[1, 2, 3, 1, 2, 3, 6, 6, 6, 6, 6, 6, 6, 6, 6, 7, 8, 9, 10, 11, 12, 13].
numbers1 = [1, 2, 3]
numbers2 = [6]
numbers3 = [7, 8, 9, 10, 11, 12, 13]
print()
'''
numbers1 = [1, 2, 3]
numbers2 = [6]
numbers3 = [7, 8, 9, 10, 11, 12, 13]
numbers = numbers1 * 2 + numbers2 * 9 + numbers3
print(numbers)
| true
|
47163e09cf543094f1999a93a5d0ff7910fc3d58
|
Python
|
asevans48/DeduplicationUtils
|
/blocking/blocking.py
|
UTF-8
| 7,548
| 3.21875
| 3
|
[] |
no_license
|
"""
A blocking record iterator using minLSH hash. Redis can be used to store
records for matching.
@author Andrew Evans
"""
import re
from datasketch import MinHash, MinHashLSH
from nltk.tokenize import word_tokenize
from sql.record.pgrecord_iterator import PGRecordIterator
class BlockingRecordIterator:
"""
Blocking record iterator returning the record and matching row ids
"""
def __init__(self,
id_name,
cursor_name,
conn,
query,
threshold,
storage_config,
is_letter=True,
is_text=False,
session_size=2000,
num_perm=128):
"""
A blocking record iterator
:param id_name: The id column name
:param cursor_name: Name of the cursor for streaming
:param conn: The psycopg2 connection
:param query: Query to obtain the records
:param threshold: Jaccard similarity threshold for matching records
:param storage_config: Storage config for datasketch
:param is_letter: Whether to use letter shingles instead of words
:param is_text: Whether this is a text
:param seession_size: Size of the session
:param num_perm: Number of permutations
"""
self.__id_name = id_name
self.__cursor_name = cursor_name
self.__conn = conn
self.__query = query
self.session_size = session_size
self.__threshold = threshold
self.__storage_config = storage_config
self.__record_it = None
self.__lsh = None
self.__is_text = is_text
self.__is_letter = is_letter
self.__num_perm = num_perm
self.__curr_it = None
self.__record_it = None
self.__hashes = []
def close(self):
"""
Close as necessary
"""
if self.__record_it is not None:
self.__record_it.close_cursor()
if self.__conn:
self.__conn.close()
def __enter__(self):
"""
Make closeable
:return: Closeable version of self
"""
return self
def __exit__(self, exc_type, exc_val, exc_tb):
"""
Close the connection
"""
self.close()
def _record_to_string(self, r, delimiter=""):
"""
Converts a record to a string
:param r: The record to convert
:param delimiter: join delimiter defaulting to no length string
:return: A string representation of all items in teh record
"""
vals = []
keys = list(r.keys())
if self.__id_name in keys:
idx = keys.index(self.__id_name)
keys.pop(idx)
for k in keys:
val = r[k]
if val:
vals.append(val)
return delimiter.join([str(x) for x in vals])
def _get_record_text(self, r):
"""
Obtain the record text
:param r: The record
:return: The text from the non-id fields
"""
txt = None
rc = r.copy()
keys = rc.keys()
if self.__id_name in rc.keys():
rc.pop(self.__id_name)
keys = rc.keys()
for key in keys:
if txt is not None:
txt = rc[key]
else:
tval = rc[key]
txt = "{} {}".format(txt, tval)
return txt
def _split_str_to_chars(self, val):
"""
Converts a string to a char set
:param val: String to convert
:return: A list of chars
"""
if val:
chrs = []
for c in val:
chrs.append(c)
return chrs
else:
return []
def _split_record_words(self, val):
"""
Split a record to comparative words
:param val: Split a record to words instead of letters
:return: The words
"""
rstr = self._record_to_string(val, " ")
return rstr.split(" ")
def _create_word_shingles(self, val):
"""
Create shingles with word tokenizer.
:param val: The value to split
:return: Obtain word shingles
"""
return word_tokenize(val)
def _get_shingle(self, r):
"""
Obtain the shingle for lsh
:param r: The input row
:return: resulting set
"""
if self.__is_letter:
rstr = self._record_to_string(r)
rstr = re.sub("\s+", "", rstr)
cset = self._split_str_to_chars(rstr)
elif self.__is_text:
txt = self._get_record_text(r)
cset = [
x for x in txt.split(" ") if x is not None and len(x.trim()) > 0]
else:
cset = self._split_record_words(r)
return cset
def _get_min_hash(self, r):
"""
:param r: The incoming row
:return: resulting min hash
"""
cset = self._get_shingle(r)
m = MinHash(self.__num_perm)
for c in cset:
m.update(c.encode('utf-8'))
return m
def _create_hashes(self, it):
"""
Create the hashes and insert into session.
:param it: The record iterator
:return: Whether the end was reached or not
"""
i = 0
run = True
with self.__lsh.insertion_session() as session:
while i < self.session_size and run:
try:
r = next(it)
keys = r.keys()
if self.__id_name in keys:
key_val = r[self.__id_name]
m = self._get_min_hash(r)
session.insert(key_val, m)
i += 1
except StopIteration:
run = False
return run
def setup_lsh(self):
"""
Create Minhash lsh
"""
if self.__storage_config:
self.__lsh = MinHashLSH(threshold=self.__threshold, num_perm=self.__num_perm, storage_config=self.__storage_config)
else:
raise ValueError("Storage Backend Required Due to Use of Session")
def get_iter(self):
"""
Obtain a record iterator and the iterator itself
:return: The iterator class and the iterator itself
"""
it = PGRecordIterator(
self.__conn, self.__query, itersize=self.session_size, name=self.__cursor_name)
return (it, iter(it))
def __iter__(self):
"""
Create the iterator
:return:
"""
if self.__curr_it is None:
self.setup_lsh()
itcls, it = self.get_iter()
self._create_hashes(it)
it.close_cursor()
cname = "{}{}".format(self.__cursor_name, "_matches")
it.set_cursor_name(cname)
it = iter(itcls)
self.__curr_it = it
self.__record_it = itcls
return self
def __next__(self):
"""
Get the next record and related hash.
:return: A tuple of the record and the related hash ids
"""
try:
nrow = next(self.__curr_it, None)
if nrow:
m = self._get_min_hash(nrow)
vals = self.__lsh.query(m)
return (nrow, vals)
else:
raise StopIteration
except StopIteration:
raise StopIteration
| true
|
96b84dccbd3467dc35cefc29ea18acf671a80139
|
Python
|
thesniya/pythonProgram
|
/language_fundamentals/flow_control/if-else_samples/if_else.py
|
UTF-8
| 386
| 4.1875
| 4
|
[] |
no_license
|
'''num=int(input('enter value'))
if(num>0):
print('num is positive')
elif(num<0):
print('num is negative')
else:
print('num is 0')'''
num1=int(input('enter 1st value'))
num2=int(input('enter 2nd value'))
if(num1>num2):
print(num1,'is greater')
elif(num1<num2):
print(num2,'is greater')
elif(num1==num2):
print('both are equal')
else:
print('invalid number')
| true
|
149b107ded76f17220dc8057927a0d024fe88523
|
Python
|
emersonff/CMEECourseWork
|
/Week2/Code/lc1.py
|
UTF-8
| 1,695
| 4.03125
| 4
|
[] |
no_license
|
#!/usr/bin/env python3
"""Write three different
lists containing the latin names, common names and mean body masses for
each species in birds, respectively"""
#(1) Write three separate list comprehensions that create three different
# lists containing the latin names, common names and mean body masses for
# each species in birds, respectively.
# (2) Now do the same using conventional loops (you can choose to do this
# before 1 !).
__appname__ = ""
__author__ = "Xiang Li"
__version__ = "0.0.1"
__license__ = "none"
###imports
import sys
###global variables
birds = ( ('Passerculus sandwichensis','Savannah sparrow',18.7),
('Delichon urbica','House martin',19),
('Junco phaeonotus','Yellow-eyed junco',19.5),
('Junco hyemalis','Dark-eyed junco',19.6),
('Tachycineata bicolor','Tree swallow',20.2),
)
###functions
def lc():
"""list comprehensions"""
#global birds
latin_name = [i[0] for i in birds] # list that stores latin names
common_name = [i[1] for i in birds] # list that stores common names
mass = [i[2] for i in birds] # list stores body masses
print(latin_name)
print(common_name)
print(mass)
return 0
def loops():
"""conventional loops"""
#global birds
latin_name = list()
common_name = list()
mass = list()
for i in birds:
latin_name.append(i[0])
common_name.append(i[1])
mass.append(i[2])
print(latin_name)
print(common_name)
print(mass)
return 0
def main(argv):
"""main function"""
lc()
print("\n")
loops()
return 0
if __name__ == "__main__":
status = main(sys.argv)
sys.exit(status)
| true
|
53a02ab90d437f39c919127f152aa4b3b3dae5ac
|
Python
|
Arturo-Valdez/PYTHON
|
/Funciones/ejerciciosb2/ejercicio3.py
|
UTF-8
| 352
| 4.59375
| 5
|
[] |
no_license
|
"""
Programa que compruebe si una variable esta vacia y si esta vacia
, rellenarla con texto en munusculas y mostrarlo en mayusculas
"""
texto = ""
if len(texto.strip()) <= 0:#strip sirve para eliminar espacios
texto = "hola soy un texto en minusculas"
print(texto.upper())
else:
print(f"La variable tiene contenido {texto}")
| true
|
52225714e0632db507634a06381631b6e2612f50
|
Python
|
albusdemens/Twitter-mining-project
|
/Exam_ Python code file/s131135\Twitter_Topic_Mining.py
|
UTF-8
| 2,983
| 3.46875
| 3
|
[
"MIT",
"Python-2.0"
] |
permissive
|
# This program try to answer what people are talking about right now by the following two steps
# First, grab the most popular topics in Twitter
# Second, mine the tweets of a specific topic to have a deeper looking inside the trending issues
# It will not work unless you fill in belowing empty string values that are defined as placeholders.
# Go to http://dev.twitter.com/apps/new to create an app and get values for these credentials that you'll need to provide
# It takes you less than ten minutes to create the necessary app and get accesss to mining an exiting world
import twitter,json,nltk
from collections import Counter
import matplotlib.pyplot as plt
# Obtain the access to data in Twitter
OAUTH_TOKEN = ''
OAUTH_TOKEN_SECRET = ''
CONSUMER_KEY = ''
CONSUMER_SECRET = ''
auth = twitter.oauth.OAuth(OAUTH_TOKEN, OAUTH_TOKEN_SECRET, CONSUMER_KEY, CONSUMER_SECRET)
my_api = twitter.Twitter(auth=auth)
# Gain the 10 most popular topics around the world and in your chosen place, in this case UK. Try to find common ones
# According to The Yahoo! Where On Earth, the entire world's ID is 1, while different places's IDs vary
# See more information at http://developer.yahoo.com/geo/geoplanet/guide/concepts.html
WORLD_ID = 1
PLACE_ID = 23424975 #UK e.g. 23424977 #US
# Denmark 23424796, doesn't work in this case. return to details: {"errors":[{"message":"Sorry, that page does not exist","code":34}]}
world_trends = my_api.trends.place(_id=WORLD_ID)
place_trends = my_api.trends.place(_id=PLACE_ID)
print json.dumps(world_trends, indent=1)
print
print json.dumps(place_trends, indent=1)
# See if there exist common trends between two data sets
world_trends_set = set([trend['name'] for trend in world_trends[0]['trends']])
place_trends_set = set([trend['name'] for trend in place_trends[0]['trends']])
common_trends = place_trends_set.intersection(world_trends_set)
print common_trends
# Set this variable q to a trending topic. It is strongly recommanded picking one from the result of codes above
# The query below, 'Joe Hart' was a trending topic when I was running the program
# Grab tweets on topic q, answer "what are people talking about the topic right now?" by analysing content of tweets
topic = 'Joe Hart'
num = 100
mining_results = my_api.search.tweets(q=topic, count=num)
statuses = mining_results['statuses']
status_texts = [ status['text']
for status in statuses ]
words = [ w
for t in status_texts
for w in t.split() ]
w_length=len(words)
unique_w_length=len(set(words))
print "words length is", w_length
print "unique words length is" ,unique_w_length
print "lexical diversity is" ,1.0*unique_w_length/w_length
print
freq_dist=nltk.FreqDist(words)
print freq_dist.keys()[:20]
print
print freq_dist.keys()[-20:]
word_counts = sorted(Counter(words).values(), reverse=True)
plt.loglog(word_counts)
plt.ylabel("Frequency")
plt.xlabel("Word Rank")
| true
|
5f0ca097f5c907dc1bea3a364880b50a47b6c142
|
Python
|
Amnay/ntt
|
/software_python/nttFuncs.py
|
UTF-8
| 2,335
| 2.734375
| 3
|
[] |
no_license
|
from nttUtils import *
def addPadding(n, vec):
res = vec.copy()
res.extend([0] * (n-len(vec)))
return res
def delPadding(vec, m, n = 1):
t = m + n - 1
return vec[:t]
def preprocess(a, b):
veclen = int(math.pow(2, math.ceil(math.log(len(a)+len(b)-1, 2))))
x = addPadding(veclen, a)
y = addPadding(veclen, b)
return x, y
def params(*vectors):
mod = max([find_mod(v) for v in vectors])
root = find_root(len(vectors[0]), mod)
return root, mod
def run(a, b, transform, product, Itransform):
x, y = preprocess(a, b)
root, mod = params(x, y)
c = transform(x, root, mod)
d = transform(y, root, mod)
e = product(c,d, mod)
f = Itransform(e, root, mod)
g = delPadding(f, len(a), len(b))
return (g, root, mod)
def pointValue_product(C, D, mod):
return [(c * d) % mod for c, d in zip(C, D)]
def straight_tf(vector, root, mod):
y = []
for i in range(len(vector)):
y_n = 0
for j, val in enumerate(vector):
y_n += val * pow(root, i * j, mod)
y_n %= mod
y.append(y_n)
return y
def straight_Itf(vector, root, mod):
g = reciprocal(root, mod)
scaler = reciprocal(len(vector), mod)
y = straight_tf(vector, g, mod)
return [((y_n * scaler) % mod) for y_n in y]
def radix2_tf(vector, root, mod):
b = bitRevCopy(vector)
n = len(b)
for s in range(1, int(math.log(n,2)) + 1): # level from bottom to top
m = pow(2,s) # number of coeffs per node
g_m = pow(root, int(n/m)) # root^[nodes on that level]
for k in range(0, n, m): # node index 0:m:n (skip m)
g = 1
for j in range(int(m/2)): # each coeff within half-node
left = b[k + j]
right = (g * b[k + j + int(m/2)]) % mod
b[k + j] = (left + right) % mod # first half
b[k + j + int(m/2)] = (left - right) % mod # second half
g = (g * g_m) % mod
return b
def radix2_Itf(vector, root, mod):
g = reciprocal(root, mod)
scaler = reciprocal(len(vector), mod)
y = radix2_tf(vector, g, mod)
return [((y_n * scaler) % mod) for y_n in y]
| true
|
c4aa69f69a687b1cb250501f577fba85789fda25
|
Python
|
Alex-GCX/multitask
|
/processing/processing-pool.py
|
UTF-8
| 1,054
| 3.25
| 3
|
[] |
no_license
|
from multiprocessing import Pool
import time
import os
import random
def worker(msg):
start_time = time.time()
print('----------%s开始执行,进程号%d' % (msg, os.getpid()))
time.sleep(random.random())
end_time = time.time()
print('----------%s执行结束, 耗时%0.2f' % (msg, (end_time - start_time)))
# 异常测试
print('捕获下面的print异常前')
try:
print(1 + 'end')
except Exception as e:
print('捕获到异常')
print('不捕获下面的print异常')
print(1 + 'end')
print('不捕获异常后')
def main():
# 定义进程池,最大进程数为3
pool = Pool(3)
for i in range(1, 8):
# 每次循环将会用空闲出来的子进程去调用目标
pool.apply_async(worker, (i, ))
print('------start------')
# 关闭进程池,关闭后不再接收新的请求
pool.close()
# 等待pool中所有进程结束,必须放在close()后面
pool.join()
print('------end------')
if __name__ == '__main__':
main()
| true
|
ddfe62fb8fabf25b39d05f8bd552937f495c8dd0
|
Python
|
SWU-1008/swu-car
|
/src/vcu_pkg/scripts/vcu_control_node.py
|
UTF-8
| 733
| 2.78125
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# 发布 /turtle1/cmd_vel 话题,消息类型 geometry_msg::Twist
import rospy
from geometry_msgs.msg import Twist
from Can_Utils import CanUtil
can_util = CanUtil()
def callback(twist):
can_util.drive(twist)
rospy.loginfo(twist)
def vuc_controller():
# ROS node init
rospy.init_node('vuc_control_node', anonymous=True)
# 创建一个 Subscriber,订阅名为 /turtle1/cmd_vel 的 topic,消息类型为 geometry_msgs::Twist,队列长度为 1
rospy.Subscriber('/turtle1/cmd_vel', Twist, callback, queue_size=1)
rospy.spin()
if __name__ == '__main__':
try:
vuc_controller()
except rospy.ROSInterruptException:
pass
| true
|
26a8dfd57a4226b5ea9a9f70652358118a723f54
|
Python
|
MSchauperl/propertyestimator
|
/propertyestimator/substances.py
|
UTF-8
| 19,960
| 3.171875
| 3
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
"""
An API for defining and creating substances.
"""
import abc
import math
from enum import Enum
import numpy as np
from propertyestimator import unit
from propertyestimator.utils.serialization import TypedBaseModel
class Substance(TypedBaseModel):
"""Defines the components, their amounts, and their roles in a system.
Examples
--------
A neat liquid containing only a single component:
>>> liquid = Substance()
>>> liquid.add_component(Substance.Component(smiles='O'), Substance.MoleFraction(1.0))
A binary mixture containing two components, where the mole fractions are explicitly stated:
>>> binary_mixture = Substance()
>>> binary_mixture.add_component(Substance.Component(smiles='O'), Substance.MoleFraction(0.2))
>>> binary_mixture.add_component(Substance.Component(smiles='CO'), Substance.MoleFraction(0.8))
The infinite dilution of one molecule within a bulk solvent or mixture may also be specified
by defining the exact number of copies of that molecule, rather than a mole fraction:
>>> benzene = Substance.Component(smiles='C1=CC=CC=C1', role=Substance.ComponentRole.Solute)
>>> water = Substance.Component(smiles='O', role=Substance.ComponentRole.Solvent)
>>>
>>> infinite_dilution = Substance()
>>> infinite_dilution.add_component(component=benzene, amount=Substance.ExactAmount(1)) # Infinite dilution.
>>> infinite_dilution.add_component(component=water, amount=Substance.MoleFraction(1.0))
In this example we explicitly flag benzene as being the solute and the water component the solvent.
This enables workflow's to easily identify key molecules of interest, such as the molecule which should
be 'grown' into solution during solvation free energy calculations.
"""
class ComponentRole(Enum):
"""An enum which describes the role of a component in the system,
such as whether the component is a solvent, a solute, a receptor etc.
These roles are mainly only used by specific protocols to identify
the correct species in a system, such as when doing docking or performing
solvation free energy calculations.
"""
Solvent = 'Solvent'
Solute = 'Solute'
Ligand = 'Ligand'
Receptor = 'Receptor'
Undefined = 'Undefined'
class Component(TypedBaseModel):
"""Defines a single component in a system, as well as properties
such as it's relative proportion in the system.
"""
@property
def identifier(self):
"""str: A unique identifier for this component, which is either a
smiles descriptor or the supplied label."""
return self._smiles or self._label
@property
def label(self):
"""str: A string label which describes this compound, for example, CB8."""
return self._label
@property
def smiles(self):
"""str: The smiles pattern which describes this component, which may be None
for complex (e.g protein) molecules."""
return self._smiles
@property
def role(self):
"""ComponentRole: The role of this component in the system, such as a
ligand or a receptor."""
return self._role
def __init__(self, smiles=None, label=None, role=None):
"""Constructs a new Component object with either a label or
a smiles string, but not both.
Notes
-----
The `label` and `smiles` arguments are mutually exclusive, and only
one can be passed while the other should be `None`.
Parameters
----------
smiles: str
A SMILES descriptor of the component
label: str
A string label which describes this compound, for example, CB8.
role: ComponentRole, optional
The role of this component in the system. If no role is specified,
a default role of solvent is applied.
"""
if label == smiles:
label = None
assert ((label is None and smiles is not None) or
(label is not None and smiles is None) or
(label is None and smiles is None))
label = label if label is not None else smiles
self._label = label
self._smiles = smiles
self._role = role or Substance.ComponentRole.Solvent
def __getstate__(self):
return {
'label': self.label,
'smiles': self.smiles,
'role': self.role
}
def __setstate__(self, state):
self._label = state['label']
self._smiles = state['smiles']
self._role = state['role']
def __str__(self):
return self.identifier
def __hash__(self):
return hash((self.identifier, self._role))
def __eq__(self, other):
return hash(self) == hash(other)
def __ne__(self, other):
return not (self == other)
class Amount(abc.ABC):
"""An abstract representation of the amount of a given component
in a substance.
"""
@property
def value(self):
"""The value of this amount."""
return self._value
@property
def identifier(self):
"""A string identifier for this amount."""
raise NotImplementedError()
def __init__(self, value=None):
"""Constructs a new Amount object."""
self._value = value
@abc.abstractmethod
def to_number_of_molecules(self, total_substance_molecules, tolerance=None):
"""Converts this amount to an exact number of molecules
Parameters
----------
total_substance_molecules: int
The total number of molecules in the whole substance. This amount
will contribute to a portion of this total number.
tolerance: float, optional
The tolerance with which this amount should be in. As an example,
when converting a mole fraction into a number of molecules, the
total number of molecules may not be sufficiently large enough to
reproduce this amount.
Returns
-------
int
The number of molecules which this amount represents,
given the `total_substance_molecules`.
"""
raise NotImplementedError()
def __getstate__(self):
return {'value': self._value}
def __setstate__(self, state):
self._value = state['value']
def __str__(self):
return self.identifier
def __eq__(self, other):
return np.isclose(self._value, other.value)
def __ne__(self, other):
return not (self == other)
def __hash__(self):
return hash(self.identifier)
class MoleFraction(Amount):
"""Represents the amount of a component in a substance as a
mole fraction."""
@property
def value(self):
"""float: The value of this amount."""
return super(Substance.MoleFraction, self).value
@property
def identifier(self):
return f'{{{self._value:.6f}}}'
def __init__(self, value=1.0):
"""Constructs a new MoleFraction object.
Parameters
----------
value: float
A mole fraction in the range (0.0, 1.0]
"""
if value <= 0.0 or value > 1.0:
raise ValueError('A mole fraction must be greater than zero, and less than or '
'equal to one.')
if math.floor(value * 1e6) < 1:
raise ValueError('Mole fractions are only precise to the sixth '
'decimal place within this class representation.')
super().__init__(value)
def to_number_of_molecules(self, total_substance_molecules, tolerance=None):
# Determine how many molecules of each type will be present in the system.
number_of_molecules = self._value * total_substance_molecules
fractional_number_of_molecules = number_of_molecules % 1
if np.isclose(fractional_number_of_molecules, 0.5):
number_of_molecules = int(number_of_molecules)
else:
number_of_molecules = int(round(number_of_molecules))
if number_of_molecules == 0:
raise ValueError('The total number of substance molecules was not large enough, '
'such that this non-zero amount translates into zero molecules '
'of this component in the substance.')
if tolerance is not None:
mole_fraction = number_of_molecules / total_substance_molecules
if abs(mole_fraction - self._value) > tolerance:
raise ValueError(f'The mole fraction ({mole_fraction}) given a total number of molecules '
f'({total_substance_molecules}) is outside of the tolerance {tolerance} '
f'of the target mole fraction {self._value}')
return number_of_molecules
class ExactAmount(Amount):
"""Represents the amount of a component in a substance as an
exact number of molecules.
The expectation is that this amount should be used for components which
are infinitely dilute (such as ligands in binding calculations), and hence
do not contribute to the total mole fraction of a substance"""
@property
def value(self):
"""int: The value of this amount."""
return super(Substance.ExactAmount, self).value
@property
def identifier(self):
return f'({int(round(self._value)):d})'
def __init__(self, value=1):
"""Constructs a new ExactAmount object.
Parameters
----------
value: int
An exact number of molecules.
"""
if not np.isclose(int(round(value)), value):
raise ValueError('The value must be an integer.')
super().__init__(value)
def to_number_of_molecules(self, total_substance_molecules, tolerance=None):
return self._value
@property
def identifier(self):
"""str: A unique str representation of this substance, which encodes all components
and their amounts in the substance."""
component_identifiers = [component.identifier for component in self._components]
component_identifiers.sort()
sorted_component_identifiers = [component.identifier for component in self._components]
sorted_component_identifiers.sort()
identifier_split = []
for component_identifier in sorted_component_identifiers:
component_amounts = sorted(self._amounts[component_identifier], key=lambda x: type(x).__name__)
amount_identifier = ''.join([component_amount.identifier for component_amount in component_amounts])
identifier = f'{component_identifier}{amount_identifier}'
identifier_split.append(identifier)
return '|'.join(identifier_split)
@property
def components(self):
"""list of Substance.Component: A list of all of the components in this substance."""
return self._components
@property
def number_of_components(self):
"""int: The number of different components in this substance."""
return len(self._components)
def __init__(self):
"""Constructs a new Substance object."""
self._amounts = {}
self._components = []
@classmethod
def from_components(cls, *components):
"""Creates a new `Substance` object from a list of components.
This method assumes that all components should be present with
equal mole fractions.
Parameters
----------
components: Substance.Component or str
The components to add to the substance. These may either be full
`Substance.Component` objects or just the smiles representation
of the component.
Returns
-------
Substance
The substance containing the requested components in equal amounts.
"""
if len(components) == 0:
raise ValueError('At least one component must be specified')
mole_fraction = 1.0 / len(components)
return_substance = cls()
for component in components:
if isinstance(component, str):
component = Substance.Component(smiles=component)
return_substance.add_component(component, Substance.MoleFraction(mole_fraction))
return return_substance
def add_component(self, component, amount):
"""Add a component to the Substance. If the component is already present in
the substance, then the mole fraction will be added to the current mole
fraction of that component.
Parameters
----------
component : Substance.Component
The component to add to the system.
amount : Substance.Amount
The amount of this component in the substance.
"""
assert isinstance(component, Substance.Component)
assert isinstance(amount, Substance.Amount)
if isinstance(amount, Substance.MoleFraction):
total_mole_fraction = amount.value
for component_identifier in self._amounts:
total_mole_fraction += sum([amount.value for amount in self._amounts[component_identifier] if
isinstance(amount, Substance.MoleFraction)])
if np.isclose(total_mole_fraction, 1.0):
total_mole_fraction = 1.0
if total_mole_fraction > 1.0:
raise ValueError(f'The total mole fraction of this substance {total_mole_fraction} exceeds 1.0')
if component.identifier not in self._amounts:
self._components.append(component)
existing_amount_of_type = None
all_amounts = [] if component.identifier not in self._amounts else self._amounts[component.identifier]
remaining_amounts = []
# Check to see if an amount of the same type already exists in
# the substance, such that this amount should be appended to it.
for existing_amount in all_amounts:
if not type(existing_amount) is type(amount):
remaining_amounts.append(existing_amount)
continue
existing_amount_of_type = existing_amount
break
if existing_amount_of_type is not None:
# Append any existing amounts to the new amount.
amount = type(amount)(existing_amount_of_type.value + amount.value)
remaining_amounts.append(amount)
self._amounts[component.identifier] = frozenset(remaining_amounts)
def get_amounts(self, component):
"""Returns the amounts of the component in this substance.
Parameters
----------
component: str or Substance.Component
The component (or it's identifier) to retrieve the amount of.
Returns
-------
list of Substance.Amount
The amounts of the component in this substance.
"""
assert isinstance(component, str) or isinstance(component, Substance.Component)
identifier = component if isinstance(component, str) else component.identifier
return self._amounts[identifier]
def get_molecules_per_component(self, maximum_molecules, tolerance=None):
"""Returns the number of molecules for each component in this substance,
given a maximum total number of molecules.
Parameters
----------
maximum_molecules: int
The maximum number of molecules.
tolerance: float, optional
The tolerance within which this amount should be represented. As
an example, when converting a mole fraction into a number of molecules,
the total number of molecules may not be sufficiently large enough to
reproduce this amount.
Returns
-------
dict of str and int
A dictionary of molecule counts per component, where each key is
a component identifier.
"""
number_of_molecules = {}
remaining_molecule_slots = maximum_molecules
for index, component in enumerate(self._components):
amounts = self._amounts[component.identifier]
for amount in amounts:
if not isinstance(amount, Substance.ExactAmount):
continue
remaining_molecule_slots -= amount.value
if remaining_molecule_slots < 0:
raise ValueError(f'The required number of molecules {maximum_molecules - remaining_molecule_slots} '
f'exceeds the provided maximum number ({maximum_molecules}).')
for component in self._components:
number_of_molecules[component.identifier] = 0
for amount in self._amounts[component.identifier]:
number_of_molecules[component.identifier] += amount.to_number_of_molecules(remaining_molecule_slots,
tolerance)
return number_of_molecules
@staticmethod
def calculate_aqueous_ionic_mole_fraction(ionic_strength):
"""Determines what mole fraction of ions is needed to yield
an aqueous system of a given ionic strength.
Parameters
----------
ionic_strength: unit.Quantity
The ionic string in units of molar.
Returns
-------
float
The mole fraction of ions.
"""
# Taken from YANK:
# https://github.com/choderalab/yank/blob/4dfcc8e127c51c20180fe6caeb49fcb1f21730c6/Yank/pipeline.py#L1869
water_molarity = (998.23 * unit.gram / unit.litre) / (18.01528 * unit.gram / unit.mole)
ionic_mole_fraction = ionic_strength / (ionic_strength + water_molarity)
return ionic_mole_fraction
def __getstate__(self):
return {
'components': self._components,
'amounts': self._amounts
}
def __setstate__(self, state):
self._components = state['components']
self._amounts = state['amounts']
def __str__(self):
return self.identifier
def __hash__(self):
sorted_component_identifiers = [component.identifier for component in self._components]
sorted_component_identifiers.sort()
component_by_id = {component.identifier: component for component in self._components}
string_hash_split = []
for identifier in sorted_component_identifiers:
component_role = component_by_id[identifier].role
component_amounts = sorted(self._amounts[identifier], key=lambda x: type(x).__name__)
amount_identifier = ''.join([component_amount.identifier for component_amount in component_amounts])
string_hash_split.append(f'{identifier}_{component_role}_{amount_identifier}')
string_hash = '|'.join(string_hash_split)
return hash(string_hash)
def __eq__(self, other):
return isinstance(other, Substance) and hash(self) == hash(other)
def __ne__(self, other):
return not (self == other)
| true
|
feba3827a20502ff9a19c8a2fc5eb0e65275b383
|
Python
|
foxscotch/advent-of-code
|
/2017/09/p1.py
|
UTF-8
| 1,181
| 3.53125
| 4
|
[
"MIT"
] |
permissive
|
# Python 3.6.1
# Requires: anytree 2.4.2
from anytree import AnyNode as Node, PreOrderIter
def get_input():
groups = ""
with open("input.txt", "r") as f:
i = 0
stream = f.read()
garbage = False
while True:
if i == len(stream):
break
char = stream[i]
if not garbage and char == "<":
garbage = True
elif garbage and char == ">":
garbage = False
elif garbage and char == "!":
i += 1
elif not garbage and char != ",":
groups += char
i += 1
return groups
def build_tree(groups):
root = Node()
parent = root
for char in groups[1:-1]:
if char == "{":
node = Node()
node.parent = parent
parent = node
elif char == "}":
parent = parent.parent
return root
def score_tree(tree):
score = 0
for node in PreOrderIter(tree):
score += node.depth + 1
return score
def main():
tree = build_tree(get_input())
print(score_tree(tree))
if __name__ == "__main__":
main()
| true
|
f704f8f6fdfd8c6ceee91af043d5413d44ab2714
|
Python
|
sclwh/FSRMASS
|
/main.py
|
UTF-8
| 647
| 2.546875
| 3
|
[] |
no_license
|
def on_button_pressed_a():
global MODE
MODE = 1
basic.show_string("FSR")
input.on_button_pressed(Button.A, on_button_pressed_a)
def on_button_pressed_b():
global MODE
MODE = 2
basic.show_string("VEL")
input.on_button_pressed(Button.B, on_button_pressed_b)
MASS = 0
B = 0
M = 0
FSR = 0
MODE = 0
MODE = 0
def on_forever():
global FSR, M, B, MASS
FSR = 100000
if MODE == 1:
M = -2.16
B = 1151
MASS = (FSR - M) / B
basic.show_number(MASS)
if MODE == 2:
M = -0.476
B = 1411
MASS = (FSR - M) / B
basic.show_number(MASS)
basic.forever(on_forever)
| true
|