blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2 values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 220 values | src_encoding stringclasses 30 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 2 10.3M | extension stringclasses 257 values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1bc9840697ceb026d50ea0fff544febc007b4d99 | 0c1cd8ae58f8b0b8eda2b682ac071cd31c25bfa0 | /scripts/layout_constraint.py | 7fe105a047e34c5e1320fa9bef47bd9847a12fb1 | [
"MIT"
] | permissive | gemsi/cocoa | b3a64fae5bb9112b9eec0653e5dc230592827c65 | 834a1ee556bbafed27bc0b2b5a2ca3be271e2b01 | refs/heads/master | 2023-01-28T17:13:22.873526 | 2020-12-09T05:35:44 | 2020-12-09T05:35:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,267 | py | #!env python3
from generate import Component, Property, Method, Return, Param, init_method
if __name__ == "__main__":
w = Component(
Type="appkit.LayoutConstraint",
super_type='foundation.Object',
description="the relationship between two user interface objects that must be satisfied by the constraint-based layout system",
properties=[
Property(name='active', Type='bool',
description='the active state of the constraint'),
Property(name='firstItem', Type='foundation.Object', readonly=True,
description='the first object participating in the constraint'),
Property(name='firstAttribute', Type='int', readonly=True, go_alias_type='LayoutAttribute',
description='the attribute of the first object participating in the constraint'),
Property(name='relation', Type='int', readonly=True, go_alias_type='LayoutRelation',
description='the relation between the two attributes in the constraint'),
Property(name='secondItem', Type='foundation.Object', readonly=True,
description='the second object participating in the constraint'),
Property(name='secondAttribute', Type='int', readonly=True, go_alias_type='LayoutAttribute',
description='the attribute of the second object participating in the constraint'),
Property(name='multiplier', Type='float64', readonly=True,
description='the multiplier applied to the second attribute participating in the constraint'),
Property(name='constant', Type='float64', readonly=True,
description='the constant added to the multiplied second attribute participating in the constraint'),
Property(name='firstAnchor', Type='appkit.LayoutAnchor', readonly=True,
description='the first anchor that defines the constraint'),
Property(name='secondAnchor', Type='appkit.LayoutAnchor', readonly=True,
description='the second anchor that defines the constraint'),
Property(name='priority', Type='float32', go_alias_type='LayoutPriority',
description='the priority of the constraint'),
Property(name='identifier', Type='string', description='the name that identifies the constraint'),
Property(name='shouldBeArchived', Type='bool', getter_prefix_is=False,
description='whether the constraint should be archived by its owning view'),
],
methods=[
Method(
name='constraintWithItem',
static=True,
params=[
Param(name='view1', Type='foundation.Object'),
Param(name='attr1', Type='int', go_alias='LayoutAttribute', objc_param_name='attribute'),
Param(name='relation', Type='int', go_alias='LayoutRelation', objc_param_name='relatedBy'),
Param(name='view2', Type='foundation.Object', objc_param_name='toItem'),
Param(name='attr2', Type='int', go_alias='LayoutAttribute', objc_param_name='attribute'),
Param(name='multiplier', Type='float64', objc_param_name='multiplier'),
Param(name='c', Type='float64', objc_param_name='constant'),
],
description='creates a constraint that defines the relationship between the specified attributes of the given views',
),
Method(
name='activateConstraints',
static=True,
params=[
Param(name='constraints', Type="appkit.LayoutConstraint", array=True),
],
description='activates each constraint in the specified array',
),
Method(
name='deactivateConstraints',
static=True,
params=[
Param(name='constraints', Type="appkit.LayoutConstraint", array=True),
],
description='deactivates each constraint in the specified array',
),
]
)
w.generate_code()
| [
"dongliu@kuaishou.com"
] | dongliu@kuaishou.com |
cb7ba224121f3de25c0055cc0bc1096c98c01fbb | 34c4951edc118b3763eeca9734d47ea8c10d0654 | /preprocessing/plotting.py | 11c344406208071f2a7fe831e055ffa0c9dcaa77 | [] | no_license | yiren-liu/retailer-search | 5739af9682f6c53ed05edaed4b95174d4493b56c | 5e2b8f9ee3fde56363db417e1ac06543225990ec | refs/heads/master | 2022-04-04T20:14:53.991264 | 2019-11-18T04:14:29 | 2019-11-18T04:14:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,980 | py | # %%
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import pickle
# %%
def draw_word_num_pic():
with open('descriptions_results_num_params.sav', 'rb') as f:
tmp = pickle.load(f)
des_num = tmp[0]
resu_num = tmp[1]
des_num_reta = tmp[2]
des_num_manu = tmp[3]
des_num_other = tmp[4]
resu_num_reta = tmp[5]
resu_num_manu = tmp[6]
resu_num_other = tmp[7]
sns.kdeplot(des_num, shade=True) # bins=60,histtype="stepfilled", alpha=.8
plt.title('Words number distribution of website desriptions')
plt.xlabel('Number of words ')
plt.ylabel('Density')
plt.show()
plt.clf()
sns.kdeplot(resu_num, shade=True)
plt.title('Words number distribution of search results')
plt.xlabel('Number of words ')
plt.ylabel('Density')
plt.show()
# print(len(des_num))
# print(pd.DataFrame({"description":des_num, "result":resu_num}).describe())
sns.kdeplot(des_num_reta, shade=True) # bins=60,histtype="stepfilled", alpha=.8
plt.title('Words number distribution of retailer website desriptions')
plt.xlabel('Number of words ')
plt.ylabel('Density')
plt.show()
plt.clf()
sns.kdeplot(des_num_manu, shade=True) # bins=60,histtype="stepfilled", alpha=.8
plt.title('Words number distribution of manufacturer website desriptions')
plt.xlabel('Number of words ')
plt.ylabel('Density')
plt.show()
plt.clf()
sns.kdeplot(des_num_other, shade=True) # bins=60,histtype="stepfilled", alpha=.8
plt.title('Words number distribution of other website desriptions')
plt.xlabel('Number of words ')
plt.ylabel('Density')
plt.show()
plt.clf()
sns.kdeplot(resu_num_reta, shade=True) # bins=60,histtype="stepfilled", alpha=.8
plt.title('Words number distribution of retailer search results')
plt.xlabel('Number of words ')
plt.ylabel('Density')
plt.show()
plt.clf()
sns.kdeplot(resu_num_manu, shade=True) # bins=60,histtype="stepfilled", alpha=.8
plt.title('Words number distribution of manufacturer search results')
plt.xlabel('Number of words ')
plt.ylabel('Density')
plt.show()
plt.clf()
sns.kdeplot(resu_num_other, shade=True) # bins=60,histtype="stepfilled", alpha=.8
plt.title('Words number distribution of other search results')
plt.xlabel('Number of words ')
plt.ylabel('Density')
plt.show()
plt.clf()
df_all = pd.DataFrame({'descriptions': des_num, 'results': resu_num})
df_reta = pd.DataFrame({'retailer descriptions': des_num_reta, 'retailer results': resu_num_reta})
df_manu = pd.DataFrame({'manufacturer descriptions': des_num_manu, 'manufacturer results': resu_num_manu})
df_other = pd.DataFrame({'other descriptions': des_num_other, 'other results': resu_num_other})
dfs = [df_all, df_reta, df_manu, df_other]
for df in dfs:
print(df.describe())
# %%
draw_word_num_pic()
# %%
# %%
| [
"724139392@qq.com"
] | 724139392@qq.com |
b02bca5c918def6a70efac656fdcaa65b903a14d | 35a2a3f5fa6573c32e411d399a60e6f67ae51556 | /tests/python/mkl/test_mkldnn.py | 2caf7af7eb4c4c9e8eb2cdcff9d02cfcbd19da5f | [
"Apache-2.0",
"BSD-2-Clause-Views",
"Zlib",
"BSD-2-Clause",
"BSD-3-Clause",
"Intel"
] | permissive | TuSimple/mxnet | 21c1b8fedd1a626cb57189f33ee5c4b2b382fd79 | 4cb69b85b4db8e1492e378c6d1a0a0a07bd737fb | refs/heads/master | 2021-01-09T07:59:24.301512 | 2019-07-27T00:56:52 | 2019-07-27T00:56:52 | 53,660,918 | 33 | 47 | Apache-2.0 | 2019-07-27T01:09:17 | 2016-03-11T10:56:36 | Python | UTF-8 | Python | false | false | 7,303 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
MKL-DNN related test cases
"""
import sys
import os
import numpy as np
import mxnet as mx
from mxnet.test_utils import assert_almost_equal
from mxnet import gluon
from mxnet.gluon import nn
curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))
sys.path.append(os.path.join(curr_path, '../unittest/'))
from common import with_seed
def test_mkldnn_model():
model = os.path.join(os.path.dirname(os.path.realpath(__file__)), "data",
"test_mkldnn_test_mkldnn_model_model1.json")
shape = (32, 3, 300, 300)
ctx = mx.cpu()
sym = mx.sym.load(model)
args = sym.list_arguments()
shapes = sym.infer_shape(data=shape)
def get_tensors(args, shapes, ctx):
return {x: mx.nd.ones(y, ctx) for x, y in zip(args, shapes)}
inputs = get_tensors(args, shapes[0], ctx)
grads = get_tensors(args, shapes[0], ctx)
try:
exe = sym.bind(ctx, inputs, args_grad=grads)
for _ in range(2):
exe.forward(is_train=True)
for y in exe.outputs:
y.wait_to_read()
exe.backward()
for y in exe.grad_arrays:
y.wait_to_read()
except: # pylint: disable=bare-except
assert 0, "test_mkldnn_model exception in bind and execution"
def test_mkldnn_ndarray_slice():
ctx = mx.cpu()
net = gluon.nn.HybridSequential()
with net.name_scope():
net.add(gluon.nn.Conv2D(channels=32, kernel_size=3, activation=None))
net.collect_params().initialize(ctx=ctx)
x = mx.nd.array(np.ones([32, 3, 224, 224]), ctx)
y = net(x)
# trigger computation on ndarray slice
assert_almost_equal(y[0].asnumpy()[0, 0, 0], 0.3376348)
def test_mkldnn_engine_threading():
net = gluon.nn.HybridSequential()
with net.name_scope():
net.add(gluon.nn.Conv2D(channels=32, kernel_size=3, activation=None))
net.collect_params().initialize(ctx=mx.cpu())
class Dummy(gluon.data.Dataset):
def __len__(self):
return 2
def __getitem__(self, key):
return key, np.ones((3, 224, 224)), np.ones((10, ))
loader = gluon.data.DataLoader(Dummy(), batch_size=2, num_workers=1)
X = (32, 3, 32, 32)
# trigger mkldnn execution thread
y = net(mx.nd.array(np.ones(X))).asnumpy()
# Use Gluon dataloader to trigger different thread.
# below line triggers different execution thread
for _ in loader:
y = net(mx.nd.array(np.ones(X))).asnumpy()
# output should be 016711406 (non-mkldnn mode output)
assert_almost_equal(y[0, 0, 0, 0], 0.016711406)
break
@with_seed()
def test_reshape_before_conv():
class Net(gluon.HybridBlock):
"""
test Net
"""
def __init__(self, **kwargs):
super(Net, self).__init__(**kwargs)
with self.name_scope():
self.conv0 = nn.Conv2D(10, (3, 3))
self.conv1 = nn.Conv2D(5, (3, 3))
def hybrid_forward(self, F, x, *args, **kwargs):
x_reshape = x.reshape((0, 0, 20, 5))
y = self.conv0(x_reshape)
y_reshape = y.reshape((0, 0, 9, 6))
out = self.conv1(y_reshape)
return out
x = mx.nd.random.uniform(shape=(2, 4, 10, 10))
x.attach_grad()
net = Net()
net.collect_params().initialize()
with mx.autograd.record():
out1 = net(x)
out1.backward()
dx1 = x.grad
net.hybridize()
with mx.autograd.record():
out2 = net(x)
out2.backward()
mx.test_utils.assert_almost_equal(dx1.asnumpy(), x.grad.asnumpy(), rtol=1e-5, atol=1e-6)
mx.test_utils.assert_almost_equal(out1.asnumpy(), out2.asnumpy(), rtol=1e-5, atol=1e-6)
@with_seed()
def test_slice_before_conv():
class Net(gluon.HybridBlock):
"""
test Net
"""
def __init__(self, **kwargs):
super(Net, self).__init__(**kwargs)
with self.name_scope():
self.conv0 = nn.Conv2D(4, (3, 3))
self.conv1 = nn.Conv2D(4, (3, 3))
def hybrid_forward(self, F, x, *args, **kwargs):
x_slice = x.slice(begin=(0, 0, 0, 0), end=(2, 4, 10, 10))
y = self.conv0(x_slice)
y_slice = y.slice(begin=(1, 0, 2, 2), end=(2, 1, 7, 7))
out = self.conv1(y_slice)
return out
x = mx.nd.random.uniform(shape=(2, 10, 10, 10))
x.attach_grad()
net = Net()
net.collect_params().initialize()
with mx.autograd.record():
out1 = net(x)
out1.backward()
dx1 = x.grad
net.hybridize()
with mx.autograd.record():
out2 = net(x)
out2.backward()
mx.test_utils.assert_almost_equal(dx1.asnumpy(), x.grad.asnumpy(), rtol=1e-5, atol=1e-6)
mx.test_utils.assert_almost_equal(out1.asnumpy(), out2.asnumpy(), rtol=1e-5, atol=1e-6)
@with_seed()
def test_slice_reshape_before_conv():
class Net(gluon.HybridBlock):
"""
test Net
"""
def __init__(self, **kwargs):
super(Net, self).__init__(**kwargs)
with self.name_scope():
self.conv0 = nn.Conv2D(4, (3, 3))
self.conv1 = nn.Conv2D(4, (3, 3))
def hybrid_forward(self, F, x, *args, **kwargs):
x_slice = x.slice(begin=(0, 0, 0, 0), end=(2, 4, 8, 9))
y = self.conv0(x_slice)
y_reshape = y.reshape((0, 0, 14, 3))
out = self.conv1(y_reshape)
return out
x = mx.nd.random.uniform(shape=(2, 10, 10, 10))
x.attach_grad()
net = Net()
net.collect_params().initialize()
with mx.autograd.record():
out1 = net(x)
out1.backward()
dx1 = x.grad
net.hybridize()
with mx.autograd.record():
out2 = net(x)
out2.backward()
mx.test_utils.assert_almost_equal(dx1.asnumpy(), x.grad.asnumpy(), rtol=1e-5, atol=1e-6)
mx.test_utils.assert_almost_equal(out1.asnumpy(), out2.asnumpy(), rtol=1e-5, atol=1e-6)
def test_mkldnn_sum_inplace_with_cpu_layout():
x_shape = (32, 3, 224, 224)
x_npy = np.ones(x_shape)
y_shape = (32, 32, 222, 222)
y_npy = np.ones(y_shape)
x = mx.sym.Variable("x")
y = mx.sym.Variable("y")
z = mx.symbol.Convolution(data=x, num_filter=32, kernel=(3, 3))
z = mx.sym.add_n(z, y)
exe = z.simple_bind(ctx=mx.cpu(), x=x_shape, y=y_shape)
out = exe.forward(is_train=False, x=x_npy, y=y_npy)[0]
assert_almost_equal(out[0].asnumpy()[0, 0, 0], 1.0)
if __name__ == '__main__':
test_mkldnn_install()
| [
"anirudh2290@apache.org"
] | anirudh2290@apache.org |
454cbfb46c7d918fb69092033e9e5117676beb29 | 6eef7d400474384c9e36cafbbae95e3c34dbb6ad | /manage.py | 9546f991d846e27cec4ace859f5bbc2dda3e97ad | [] | no_license | codeAligned/clinvitae | 61d3c160e9dbc65d548818292681a27501d330ce | 4a75c14113dc562991c7d2d1a5812d2db91e2da0 | refs/heads/master | 2020-05-17T12:02:33.514187 | 2019-02-21T06:47:35 | 2019-02-21T06:47:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 263 | py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "ben_kremer_clinvitae.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| [
"kremerdesign@gmail.com"
] | kremerdesign@gmail.com |
e3f53a3a413ada1afe4a2375cc0ac058751e6347 | c8bb293178969cb8974079b8c9e14c56f53bfbeb | /python2.7/site-packages/tinyrpc/protocols/jsonrpc.py | fa0875b17dd58f306455aeb3acd6166f78a5eb9b | [] | no_license | ahaWDY/ryu-controller | a322833db49910e5630f72291890e495adf12ab2 | 13c704c0a061901aaf8d2d2f77d6b5f2d8189536 | refs/heads/main | 2023-02-25T05:45:21.516870 | 2021-02-09T19:19:23 | 2021-02-09T19:19:23 | 323,458,834 | 1 | 1 | null | 2020-12-21T22:27:53 | 2020-12-21T22:08:49 | Python | UTF-8 | Python | false | false | 10,573 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import inspect
import json
import sys
import six
from .. import (
RPCBatchProtocol, RPCRequest, RPCResponse, RPCErrorResponse,
InvalidRequestError, MethodNotFoundError, InvalidReplyError, RPCError,
RPCBatchRequest, RPCBatchResponse
)
if 'jsonext' in sys.modules:
# jsonext was imported before this file, assume the intent is that
# it is used in place of the regular json encoder.
import jsonext
json_dumps = jsonext.dumps
else:
json_dumps = json.dumps
class FixedErrorMessageMixin(object):
def __init__(self, *args, **kwargs):
if not args:
args = [self.message]
if 'data' in kwargs:
self.data = kwargs.pop('data')
super(FixedErrorMessageMixin, self).__init__(*args, **kwargs)
def error_respond(self):
response = JSONRPCErrorResponse()
response.error = self.message
response.unique_id = None
response._jsonrpc_error_code = self.jsonrpc_error_code
if hasattr(self, 'data'):
response.data = self.data
return response
class JSONRPCParseError(FixedErrorMessageMixin, InvalidRequestError):
jsonrpc_error_code = -32700
message = 'Parse error'
class JSONRPCInvalidRequestError(FixedErrorMessageMixin, InvalidRequestError):
jsonrpc_error_code = -32600
message = 'Invalid Request'
class JSONRPCMethodNotFoundError(FixedErrorMessageMixin, MethodNotFoundError):
jsonrpc_error_code = -32601
message = 'Method not found'
class JSONRPCInvalidParamsError(FixedErrorMessageMixin, InvalidRequestError):
jsonrpc_error_code = -32602
message = 'Invalid params'
class JSONRPCInternalError(FixedErrorMessageMixin, InvalidRequestError):
jsonrpc_error_code = -32603
message = 'Internal error'
class JSONRPCServerError(FixedErrorMessageMixin, InvalidRequestError):
jsonrpc_error_code = -32000
message = ''
class JSONRPCSuccessResponse(RPCResponse):
def _to_dict(self):
return {
'jsonrpc': JSONRPCProtocol.JSON_RPC_VERSION,
'id': self.unique_id,
'result': self.result
}
def serialize(self):
return json_dumps(self._to_dict())
class JSONRPCErrorResponse(RPCErrorResponse):
def _to_dict(self):
msg = {
'jsonrpc': JSONRPCProtocol.JSON_RPC_VERSION,
'id': self.unique_id,
'error': {
'message': str(self.error),
'code': self._jsonrpc_error_code
}
}
if hasattr(self, 'data'):
msg['error']['data'] = self.data
return msg
def serialize(self):
return json_dumps(self._to_dict())
def _get_code_message_and_data(error):
assert isinstance(error, (Exception, six.string_types))
data = None
if isinstance(error, Exception):
if hasattr(error, 'jsonrpc_error_code'):
code = error.jsonrpc_error_code
msg = str(error)
try:
data = error.data
except AttributeError:
pass
elif isinstance(error, InvalidRequestError):
code = JSONRPCInvalidRequestError.jsonrpc_error_code
msg = JSONRPCInvalidRequestError.message
elif isinstance(error, MethodNotFoundError):
code = JSONRPCMethodNotFoundError.jsonrpc_error_code
msg = JSONRPCMethodNotFoundError.message
else:
# allow exception message to propagate
code = JSONRPCServerError.jsonrpc_error_code
if len(error.args) == 2:
msg = str(error.args[0])
data = error.args[1]
else:
msg = str(error)
else:
code = -32000
msg = error
return code, msg, data
class JSONRPCRequest(RPCRequest):
def error_respond(self, error):
if self.unique_id is None:
return None
response = JSONRPCErrorResponse()
code, msg, data = _get_code_message_and_data(error)
response.error = msg
response.unique_id = self.unique_id
response._jsonrpc_error_code = code
if data:
response.data = data
return response
def respond(self, result):
if self.unique_id is None:
return None
response = JSONRPCSuccessResponse()
response.result = result
response.unique_id = self.unique_id
return response
def _to_dict(self):
jdata = {
'jsonrpc': JSONRPCProtocol.JSON_RPC_VERSION,
'method': self.method,
}
if self.args:
jdata['params'] = self.args
if self.kwargs:
jdata['params'] = self.kwargs
if hasattr(self, 'unique_id') and self.unique_id is not None:
jdata['id'] = self.unique_id
return jdata
def serialize(self):
return json_dumps(self._to_dict())
class JSONRPCBatchRequest(RPCBatchRequest):
def create_batch_response(self):
if self._expects_response():
return JSONRPCBatchResponse()
def _expects_response(self):
for request in self:
if isinstance(request, Exception):
return True
if request.unique_id != None:
return True
return False
def serialize(self):
return json_dumps([req._to_dict() for req in self])
class JSONRPCBatchResponse(RPCBatchResponse):
def serialize(self):
return json_dumps([resp._to_dict() for resp in self if resp != None])
class JSONRPCProtocol(RPCBatchProtocol):
"""JSONRPC protocol implementation.
Currently, only version 2.0 is supported."""
JSON_RPC_VERSION = "2.0"
_ALLOWED_REPLY_KEYS = sorted(['id', 'jsonrpc', 'error', 'result'])
_ALLOWED_REQUEST_KEYS = sorted(['id', 'jsonrpc', 'method', 'params'])
def __init__(self, *args, **kwargs):
super(JSONRPCProtocol, self).__init__(*args, **kwargs)
self._id_counter = 0
def _get_unique_id(self):
self._id_counter += 1
return self._id_counter
def request_factory(self):
return JSONRPCRequest()
def create_batch_request(self, requests=None):
return JSONRPCBatchRequest(requests or [])
def create_request(self, method, args=None, kwargs=None, one_way=False):
if args and kwargs:
raise InvalidRequestError('Does not support args and kwargs at '
'the same time')
request = self.request_factory()
if not one_way:
request.unique_id = self._get_unique_id()
request.method = method
request.args = args
request.kwargs = kwargs
return request
def parse_reply(self, data):
if six.PY3 and isinstance(data, bytes):
# zmq won't accept unicode strings, and this is the other
# end; decoding non-unicode strings back into unicode
data = data.decode()
try:
rep = json.loads(data)
except Exception as e:
raise InvalidReplyError(e)
for k in six.iterkeys(rep):
if not k in self._ALLOWED_REPLY_KEYS:
raise InvalidReplyError('Key not allowed: %s' % k)
if not 'jsonrpc' in rep:
raise InvalidReplyError('Missing jsonrpc (version) in response.')
if rep['jsonrpc'] != self.JSON_RPC_VERSION:
raise InvalidReplyError('Wrong JSONRPC version')
if not 'id' in rep:
raise InvalidReplyError('Missing id in response')
if ('error' in rep) == ('result' in rep):
raise InvalidReplyError(
'Reply must contain exactly one of result and error.'
)
if 'error' in rep:
response = JSONRPCErrorResponse()
error = rep['error']
response.error = error['message']
response._jsonrpc_error_code = error['code']
else:
response = JSONRPCSuccessResponse()
response.result = rep.get('result', None)
response.unique_id = rep['id']
return response
def parse_request(self, data):
if six.PY3 and isinstance(data, bytes):
# zmq won't accept unicode strings, and this is the other
# end; decoding non-unicode strings back into unicode
data = data.decode()
try:
req = json.loads(data)
except Exception as e:
raise JSONRPCParseError()
if isinstance(req, list):
# batch request
requests = JSONRPCBatchRequest()
for subreq in req:
try:
requests.append(self._parse_subrequest(subreq))
except RPCError as e:
requests.append(e)
except Exception as e:
requests.append(JSONRPCInvalidRequestError())
if not requests:
raise JSONRPCInvalidRequestError()
return requests
else:
return self._parse_subrequest(req)
def _parse_subrequest(self, req):
if not isinstance(req, dict):
raise JSONRPCInvalidRequestError()
for k in six.iterkeys(req):
if not k in self._ALLOWED_REQUEST_KEYS:
raise JSONRPCInvalidRequestError()
if req.get('jsonrpc', None) != self.JSON_RPC_VERSION:
raise JSONRPCInvalidRequestError()
if not isinstance(req['method'], six.string_types):
raise JSONRPCInvalidRequestError()
request = self.request_factory()
request.method = str(req['method'])
request.unique_id = req.get('id', None)
params = req.get('params', None)
if params is not None:
if isinstance(params, list):
request.args = req['params']
elif isinstance(params, dict):
request.kwargs = req['params']
else:
raise JSONRPCInvalidParamsError()
return request
def _caller(self, method, args, kwargs):
# custom dispatcher called by RPCDispatcher._dispatch()
# when provided with the address of a custom dispatcher.
# Used to generate a customized error message when the
# function signature doesn't match the parameter list.
try:
inspect.getcallargs(method, *args, **kwargs)
except TypeError:
raise JSONRPCInvalidParamsError()
else:
return method(*args, **kwargs)
| [
"2503131624@qq.com"
] | 2503131624@qq.com |
196076833a5592428b92e52569b4b23032a18e2c | 7c241ec2d3255f232db634a037af54658712caf7 | /Projekte/Modulo/Main.spec | 32993d25989bdae4389c1862c61d894ee4bf4431 | [] | no_license | GoodGuyTuring/GreaterOnes | 85c47814901e7dface2a839510ed9f6028464cc0 | c3ea1f7005d192fc504f33a78e42e90f27d873ae | refs/heads/master | 2020-09-01T01:39:24.274355 | 2020-01-24T16:10:25 | 2020-01-24T16:10:25 | 218,844,671 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 988 | spec | # -*- mode: python ; coding: utf-8 -*-
block_cipher = None
a = Analysis(['Main.py'],
pathex=['/home/mark/Git/GreaterOnes/Projekte/Modulo'],
binaries=[],
datas=[],
hiddenimports=[],
hookspath=[],
runtime_hooks=[],
excludes=[],
win_no_prefer_redirects=False,
win_private_assemblies=False,
cipher=block_cipher,
noarchive=False)
pyz = PYZ(a.pure, a.zipped_data,
cipher=block_cipher)
exe = EXE(pyz,
a.scripts,
[],
exclude_binaries=True,
name='Main',
debug=False,
bootloader_ignore_signals=False,
strip=False,
upx=True,
console=True )
coll = COLLECT(exe,
a.binaries,
a.zipfiles,
a.datas,
strip=False,
upx=True,
upx_exclude=[],
name='Main')
| [
"daniel.huschina@uni-bonn.de"
] | daniel.huschina@uni-bonn.de |
1be96dfd88ce2372ebf4307223158365f6f38a93 | 900aabfb61b714b98dc843a7a6922df0d1fac4bc | /txartnet/test/__init__.py | 758981b66e65db20aabe9a9bfa8775eb870457bd | [
"MIT"
] | permissive | aalex/txartnet | ee93b17f911d37f0dea3d091d61f008f7b399424 | a01abe5e2d5cec616421f3ba769b6851aae04923 | refs/heads/master | 2016-08-07T19:53:45.832967 | 2014-02-28T22:11:35 | 2014-02-28T22:11:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 67 | py | #!/usr/bin/env python
"""
Unit tests for the txartnet library.
"""
| [
"alexandre@quessy.net"
] | alexandre@quessy.net |
0dbb5d5f42d7c6f530c0b2a619756f5d5850c136 | 95ca12c782c7ae4f392e297186a7d95a74c472bb | /for_build.spec | 11e817261abf67d9d09e7f4b6df88101f1a2b646 | [] | no_license | TepNik/Sumulation-newton-s-cradle | 9efda83f402c69c14f0aad5720b31c0c8e740cd0 | 48b05a5f787f68fcd58b396d1732efdae5961430 | refs/heads/master | 2020-05-27T17:54:02.262347 | 2019-05-26T21:12:06 | 2019-05-26T21:12:06 | 188,731,725 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 954 | spec | # -*- mode: python -*-
block_cipher = None
a = Analysis(['proga.py'],
pathex=['Your path'],
binaries=[],
hiddenimports=[],
hookspath=[],
runtime_hooks=[],
excludes=[],
win_no_prefer_redirects=False,
win_private_assemblies=False,
cipher=block_cipher)
a.datas += [('newtons-cradle.ico', 'Your path/newtons-cradle.ico', 'DATA')]
pyz = PYZ(a.pure, a.zipped_data,
cipher=block_cipher)
exe = EXE(pyz,
a.scripts,
a.binaries,
a.zipfiles,
a.datas,
name='proga',
debug=False,
strip=False,
upx=True,
console=False,
icon='Your path/newtons-cradle.ico')
coll = COLLECT(exe,
a.binaries,
a.zipfiles,
a.datas,
strip=False,
upx=True,
name='proga')
| [
"tepelin.nikita@gmail.com"
] | tepelin.nikita@gmail.com |
cd0c89d314658b289357e3eaf240900c29f54130 | 7bd5ca970fbbe4a3ed0c7dadcf43ba8681a737f3 | /aoj/aoj-icpc/300/1305.py | e3d262fb73a310e2dafd28e76451fa6d53bedd63 | [] | no_license | roiti46/Contest | c0c35478cd80f675965d10b1a371e44084f9b6ee | c4b850d76796c5388d2e0d2234f90dc8acfaadfa | refs/heads/master | 2021-01-17T13:23:30.551754 | 2017-12-10T13:06:42 | 2017-12-10T13:06:42 | 27,001,893 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 444 | py | while 1:
n = input()
if n == 0: break
ans = {}
for i in range(n):
group,name = raw_input().split(":")
if i == 0: first = group
ans[group] = set(name[:-1].split(","))
while 1:
for key in ans:
flag = 0
if key == first: continue
for key1 in ans:
if key in ans[key1]:
ans[key1] |= ans[key]
ans[key1].discard(key)
flag = 1
if flag:
del ans[key]
break
if flag == 0: break
print len(ans[first])
| [
"roiti46@gmail.com"
] | roiti46@gmail.com |
695f3f91507485220caf4ed5db68b50ce6fe4ed8 | 7628b23a81440834dac9aa629b3ea9165cc5c5a5 | /media.py | 0cee132c2410713d9c27a06eea0be271940b81d9 | [] | no_license | puisaha/Movie_Trailer | 078c1317578e1727aa86a656fa479210f97ef56b | ddf55473d8767ff0fdf29bbac2e668d160ce9d1b | refs/heads/master | 2021-05-12T08:54:23.981298 | 2018-01-14T18:47:52 | 2018-01-14T18:47:52 | 117,302,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,066 | py | # This file defines Movie class which provide a way to store movie information
# Author: Amrita Saha
# Date: 01/14/2018
import webbrowser
class Movie():
""" This class provide a way to store movie related information """
# Description:
# This is constructor of Movie class
# Parameters:
# movie_title (string): title of the movie
# movie_storyline (string): storyline of the movie
# poster_image (string): URL of the poster
# trailer_youtube (string): URL of the trailer from youtube
# trailer_rating (string): URL of rating in IMDB
def __init__(self, movie_title, movie_storyline, poster_image,
trailer_youtube, trailer_rating):
self.title = movie_title
self.storyline = movie_storyline
self.poster_image_url = poster_image
self.trailer_youtube_url = trailer_youtube
self.rating = trailer_rating
# Description:
# This function opens the trailer in web browser
def show_trailer(self):
webbrowser.open(self.trailer_youtube_url)
| [
"pui.amrita@gmail.com"
] | pui.amrita@gmail.com |
ba19d402cada443858f5c912e94740794ab239c6 | fd7473c2ae601add76525dd0c0c7da375189cfe9 | /neighbors2d.py | 408f86ec32af32424b2dab448ce77aa9b8b55b38 | [] | no_license | SlamminU/chem160module3 | 759794e7180749dac829f1e878c5fa18121fb859 | 93a26096a8dd9a2bc0e79d31ae1ab177074e51f4 | refs/heads/master | 2022-12-22T03:23:05.073053 | 2020-09-09T07:04:21 | 2020-09-09T07:04:21 | 294,030,792 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 429 | py | from random import choice
n=100
count=0
nocc=0
list2d=[[choice((0,1)) for x in range(n)] for y in range(n)]
for x in range(1,n-1):
for y in range(1,n-1):
if list2d[x][y]==1:
nocc+=1
neigh=list2d[x-1][y]+list2d[x+1][y]+\
list2d[x][y-1]+list2d[x][y+1]
if neigh > 2:
count+=1
print("Fraction with more than 2 neighbors:",count/nocc) | [
"noreply@github.com"
] | SlamminU.noreply@github.com |
576e9d4e00c457925362b7811446d04872edd57c | 07271909bd7c0f78127c39509ea5b842dd4b65e3 | /logicallake/grammar/test/test_hana_sql.py | 4504bc86e196e6bd4ed4e5d6ebbf3bc9fd344024 | [
"BSD-2-Clause"
] | permissive | stewarg9/logicallake | 61e8cb8040f8682b0d4ca9da4a408841eb67fe2c | a5ac4d172b94a4bb8130545b6d41eebad60eb4b4 | refs/heads/master | 2023-01-28T16:55:22.838434 | 2020-12-07T21:31:03 | 2020-12-07T21:31:03 | 318,164,151 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,080 | py | # -*- coding: utf-8 -*-
"""
Created on Thu Nov 26 16:21:17 2020
@author: kbzg512
"""
import unittest
from tatsu import parse, compile, to_python_model
from tatsu.util import asjson
import tatsu
import json
GRAMMAR_DIR = "..\\"
#
#
# Things to be aware of...
# The granmar is case insensitive; it's munching stuff to upper case.
#
#
class TestGrammar(unittest.TestCase):
def setUp(self):
# Load the grammar in from a reference file.
with open(GRAMMAR_DIR + "HANA_SQL_Grammar.bnf") as f:
grammar = f.read()
self.debug = False
self.model = compile(grammar)
def generate_ast(self, input):
munged_input = input.replace(',', ' , ')
self.ast = self.model.parse(munged_input)
result = str(json.dumps(asjson(self.ast), sort_keys=True))
return result
def test_simple_select_from(self):
input = "SELECT * FROM MARA;"
target = """[{"from_clause": {"table_expression": {"table_ref": {"table_name": {"identifier": "MARA"}}}}, "select_clause": ["SELECT", {"selectitem": "*"}]}, ";"]"""
result = self.generate_ast(input)
if self.debug:
print()
print(target)
print(result)
self.assertEqual(result, target)
def test_simple_select_from_alias(self):
input = "SELECT * FROM MARA AS A;"
target = """[{"from_clause": {"table_expression": {"table_ref": {"table_alias": "A", "table_name": {"identifier": "MARA"}}}}, "select_clause": ["SELECT", {"selectitem": "*"}]}, ";"]"""
result = self.generate_ast(input)
if self.debug:
print()
print(target)
print(result)
self.assertEqual(result, target)
def test_simple_select_single_column(self):
input = "SELECT ZSID FROM MARA AS A;"
result = self.generate_ast(input)
target = """[{"from_clause": {"table_expression": {"table_ref": {"table_alias": "A", "table_name": {"identifier": "MARA"}}}}, "select_clause": ["SELECT", {"selectitem": {"column_name": "ZSID"}}]}, ";"]"""
if self.debug:
print()
print(target)
print(result)
self.assertEqual(result, target)
def test_simple_select_column_list(self):
input = "SELECT MATNR,ZSID FROM MARA AS A;"
result = self.generate_ast(input)
target = """[{"from_clause": {"table_expression": {"table_ref": {"table_alias": "A", "table_name": {"identifier": "MARA"}}}}, "select_clause": ["SELECT", {"selectitem": [{"column_name": "MATNR"}, {"column_name": "ZSID"}]}]}, ";"]"""
if self.debug:
print()
print(target)
print(result)
self.assertEqual(result, target)
def test_simple_select_column_list_column_alias(self):
input = "SELECT MATNR,ZSID,TXT40 AS Short_Desc FROM MARA AS A;"
target = """[{"from_clause": {"table_expression": {"table_ref": {"table_alias": "A", "table_name": {"identifier": "MARA"}}}}, "select_clause": ["SELECT", {"selectitem": [{"column_name": "MATNR"}, {"column_name": "ZSID"}, {"column_alias": {"identifier": "Short_Desc"}, "column_name": "TXT40"}]}]}, ";"]"""
result = self.generate_ast(input)
if self.debug:
print()
print(target)
print(result)
self.assertEqual(result, target)
def test_simple_join(self):
input = "SELECT * FROM MARA AS A LEFT OUTER JOIN MAKT AS B ON A.ZSID = B.ZSID;"
result = self.generate_ast(input)
target = """[{"from_clause": {"table_expression": {"table_ref": {"table_alias": "A", "table_name": {"identifier": "MARA"}}}}, "joined_table": {"join_predicate": {"comparison_predicate": {"comparison_operator": "=", "expression": [{"identifier": ["A", "ZSID"]}, {"identifier": ["B", "ZSID"]}]}}, "join_type": [["LEFT"], "OUTER"], "table_ref": {"table_alias": "B", "table_name": {"identifier": "MAKT"}}}, "select_clause": ["SELECT", {"selectitem": "*"}]}, ";"]"""
if self.debug:
print()
print(target)
print(result)
self.assertEqual(result, target)
def test_simple_where(self):
input = "SELECT * FROM MARA AS A WHERE 4 = 4;"
target = """[{"from_clause": {"table_expression": {"table_ref": {"table_alias": "A", "table_name": {"identifier": "MARA"}}}}, "select_clause": ["SELECT", {"selectitem": "*"}], "where_clause": {"condition": {"predicate": {"comparison_predicate": {"comparison_operator": "=", "expression": [{"constant": ["4", []]}, {"constant": ["4", []]}]}}}}}, ";"]"""
result = self.generate_ast(input)
if self.debug:
print()
print(target)
print(result)
self.assertEqual(str(result), str(target))
def test_real_world_from(self):
# This one is a bit clunky.
# Need to ensure the expected result escapes the backslash used to escape the quote... !
input = """SELECT * FROM masterdata."DataIngestion.SDI.FLATFILE.MDM::APO_LOCATION" ;"""
target = """[{"from_clause": {"table_expression": {"table_ref": {"table_name": {"identifier": ["\\"", ["DataIngestion.SDI.FLATFILE.MDM::APO_LOCATION"], "\\""], "schema_name": "masterdata"}}}}, "select_clause": ["SELECT", {"selectitem": "*"}]}, ";"]"""
result = self.generate_ast(input)
if self.debug:
print()
print(target)
print(result)
self.assertEqual(result, target)
def test_column_alias_multiple_wheres(self):
# This one is a bit clunky.
# Need to ensure the expected result escapes the backslash used to escape the quote... !
input = """SELECT ZSID,matnr,mara.fED AS blah FROM MARA WHERE ZSID = 'EU3' AND ID = 4; """
target = """[{"from_clause": {"table_expression": {"table_ref": {"table_name": {"identifier": "MARA"}}}}, "select_clause": ["SELECT", {"selectitem": [{"column_name": "ZSID"}, {"column_name": "matnr"}, {"column_alias": {"identifier": "blah"}, "column_name": "fED", "table_alias": "mara"}]}], "where_clause": {"condition": {"and": "AND", "condition": [{"predicate": {"comparison_predicate": {"comparison_operator": "=", "expression": [{"identifier": "ZSID"}, {"constant": ["'", ["EU3"], "'"]}]}}}, {"predicate": {"comparison_predicate": {"comparison_operator": "=", "expression": [{"identifier": "ID"}, {"constant": ["4", []]}]}}}]}}}, ";"]"""
result = self.generate_ast(input)
if self.debug:
print()
print(target)
print(result)
self.assertEqual(result, target)
def test_simple_nested_subquery(self):
input = """SELECT * FROM (SELECT ZSID FROM (SELECT * FROM (SELECT 1 FROM DUNCE)));"""
target = """[{"from_clause": {"table_expression": {"subquery": {"select_statement": {"from_clause": {"table_expression": {"subquery": {"select_statement": {"from_clause": {"table_expression": {"subquery": {"select_statement": {"from_clause": {"table_expression": {"table_ref": {"table_name": {"identifier": "DUNCE"}}}}, "select_clause": ["SELECT", {"selectitem": {"expression": {"constant": ["1", []]}}}]}}}}, "select_clause": ["SELECT", {"selectitem": "*"}]}}}}, "select_clause": ["SELECT", {"selectitem": {"column_name": "ZSID"}}]}}}}, "select_clause": ["SELECT", {"selectitem": "*"}]}, ";"]"""
result = self.generate_ast(input)
if self.debug:
print()
print(target)
print(result)
self.assertEqual(result, target)
def test_complicated_nested_subquery(self):
input = """SELECT * FROM (SELECT ZSID FROM (SELECT B.* FROM (SELECT A.ID FROM DUNCE AS A) B ) C);"""
target = """[{"from_clause": {"table_expression": {"subquery": {"select_statement": {"from_clause": {"table_expression": {"subquery": {"select_statement": {"from_clause": {"table_expression": {"subquery": {"select_statement": {"from_clause": {"table_expression": {"table_ref": {"table_name": {"identifier": "DUNCE"}}}}, "select_clause": ["SELECT", {"selectitem": {"expression": {"constant": ["1", []]}}}]}}}}, "select_clause": ["SELECT", {"selectitem": "*"}]}}}}, "select_clause": ["SELECT", {"selectitem": {"column_name": "ZSID"}}]}}}}, "select_clause": ["SELECT", {"selectitem": "*"}]}, ";"]"""
result = self.generate_ast(input)
if self.debug:
print()
print(target)
print(result)
self.assertEqual(result, target)
if __name__ == '__main__':
unittest.main()
| [
"stewarg9@yahoo.co.uk"
] | stewarg9@yahoo.co.uk |
2a7cf01986d9df5b9680b2ea1ec075516674419f | 1e42fb25742cf029d6ad96242b1178f7d76add44 | /classes/api/BitAPI.py | 40e377a9eb8e6274a29f95dadd3bd269f0365ae0 | [
"MIT"
] | permissive | WongLynn/bitfinexbot | bfc3e707f7429be768dba7f3374009e0c407f685 | b7b12bbb2e48eaf0d4d92d2190f8f85f546e47cb | refs/heads/master | 2020-03-11T21:07:52.951966 | 2018-01-05T16:51:56 | 2018-01-05T16:51:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,318 | py | #!/usr/bin/env python
import requests
import json
import base64
import hmac
import hashlib
import time
import functools
class BitAPI:
"""
For reference and to make a valid code, im using code developed by dawsbot.
Check his repo: https://github.com/dawsbot/bitfinex
"""
def __init__(self):
self.url = "https://api.bitfinex.com/v1"
self.keys_file = 'keys.txt'
self.__read_keys()
@functools.lru_cache(maxsize=None)
def __read_keys(self):
with open(self.keys_file) as fp:
self.api_key = fp.readline().rstrip() # put your API public key here.
self.api_secret = fp.readline().rstrip() # put your API private key here.
def __gen_nonce(self): # generates a nonce, used for authentication.
return str(int(time.time() * 1000000))
def __payload_packer(self, payload): # packs and signs the payload of the request.
j = bytearray(json.dumps(payload), 'iso-8859-1')
data = bytes(base64.standard_b64encode(j))
h = hmac.new(bytearray(self.api_secret, 'iso-8859-1'), data, hashlib.sha384)
signature = h.hexdigest()
return {
"X-BFX-APIKEY": self.api_key,
"X-BFX-SIGNATURE": signature,
"X-BFX-PAYLOAD": data
}
def ticker(self, symbol='btcusd'): # gets the innermost bid and asks and information on the most recent trade.
r = requests.get(f"{self.url}/pubticker/" + symbol, verify=True) # <== UPDATED TO LATEST VERSION OF BFX!
rep = r.json()
try:
rep['last_price']
except KeyError:
return rep['error']
return rep
def stats(self, symbol='btcusd'): # Various statistics about the requested pairs.
r = requests.get(f"{self.url}/stats/" + symbol, verify=True) # <== UPDATED TO LATEST VERSION OF BFX!
return r.json()
def symbols(self): # get a list of valid symbol IDs.
r = requests.get(f"{self.url}/symbols", verify=True)
rep = r.json()
return rep
# authenticated methods
def active_orders(self): # view your active orders.
payload = {
"request": "/v1/orders",
"nonce": self.__gen_nonce()
}
signed_payload = self.__payload_packer(payload)
r = requests.post(f"{self.url}/orders", headers=signed_payload, verify=True)
return r.json()
def place_order(self, amount, price, side, ord_type, symbol='btcusd', exchange='bitfinex'): # submit a new order.
payload = {
"request": "/v1/order/new",
"nonce": self.__gen_nonce(),
"symbol": symbol,
"amount": amount,
"price": price,
"exchange": exchange,
"side": side,
"type": ord_type
}
signed_payload = self.__payload_packer(payload)
r = requests.post(f"{self.url}/order/new", headers=signed_payload, verify=True)
rep = r.json()
try:
rep['order_id']
except KeyError as e:
print(rep)
return rep['message']
return rep
def delete_order(self, order_id): # cancel an order.
payload = {
"request": "/v1/order/cancel",
"nonce": self.__gen_nonce(),
"order_id": order_id
}
signed_payload = self.__payload_packer(payload)
r = requests.post(f"{self.url}/order/cancel", headers=signed_payload, verify=True)
rep = r.json()
try:
rep['avg_execution_price']
except KeyError as e:
return rep['error']
return rep
def delete_all_order(self): # cancel an order.
payload = {
"request": "/v1/order/cancel/all",
"nonce": self.__gen_nonce(),
}
signed_payload = self.__payload_packer(payload)
r = requests.post(f"{self.url}/order/cancel/all", headers=signed_payload, verify=True)
return r.json()
def balances(self): # see your balances.
payload = {
"request": "/v1/balances",
"nonce": self.__gen_nonce()
}
signed_payload = self.__payload_packer(payload)
r = requests.post(f"{self.url}/balances", headers=signed_payload, verify=True)
return r.json()
| [
"rdenadai@ccuec.unicamp.br"
] | rdenadai@ccuec.unicamp.br |
4651ffff83608bc314d7fb43a722b4e68943837a | b188fc954db980188a362d853c237c4a8c742276 | /server/apps/budget/admin/transaction.py | a8685fa0e76e02c2e368bc604d37a303d2e668d5 | [] | no_license | vshagur/sibdev-practice-2021-project-python | 78081b35749c5d96f5bdd4f3e4fb016e22cbd3ed | 2c6d7877a78dd2d9505ba1bb3a75ea0cb3ab7bf7 | refs/heads/main | 2023-07-16T16:06:32.348534 | 2021-08-24T03:39:40 | 2021-08-24T03:39:40 | 399,317,220 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 167 | py | from ..models.budget.transaction import Transaction
from django.contrib import admin
@admin.register(Transaction)
class TransactionAdmin(admin.ModelAdmin):
pass
| [
"vshagur@gmail.com"
] | vshagur@gmail.com |
9c455ce4b8af925afea25a90680844bd0cd02e46 | 301b039050c00a9efa4f3a5635e8b633f8adf988 | /caffe2/python/layers/functional.py | 08612d21a4babfe8b412473834b03ea02a2621a1 | [
"Apache-2.0",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"BSD-2-Clause"
] | permissive | sunpan822/caffe2 | 9704b6fe556d272fbedfd6edfdb796f6a8f02970 | a3c56d892eb85054b4e7cbd1cf0a0d07422ae796 | refs/heads/master | 2020-04-12T14:31:45.919799 | 2019-04-19T04:10:40 | 2019-04-19T04:10:40 | 162,555,100 | 1 | 0 | Apache-2.0 | 2018-12-20T09:14:48 | 2018-12-20T09:14:47 | null | UTF-8 | Python | false | false | 5,022 | py | # @package functional
# Module caffe2.python.layers.functional
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import core, schema, scope, workspace
from caffe2.python.layers.layers import (
ModelLayer,
)
import caffe2.proto.caffe2_pb2 as caffe2_pb2
import numpy as np
import six
import logging
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
class Functional(ModelLayer):
def __init__(self, model, input_record, output_names_or_num, function,
name='functional', output_dtypes=None, **kwargs):
# allow coercion
input_record = schema.as_record(input_record)
super(Functional, self).__init__(model, name, input_record, **kwargs)
self._function = function
self._kwargs = kwargs
return_struct = (
isinstance(output_names_or_num, list) or
(isinstance(output_names_or_num, six.integer_types) and
output_names_or_num != 1)
)
with scope.NameScope(self.name, reset=True):
if isinstance(output_names_or_num, int):
struct_output_schema = schema.NewRecord(
model.net, schema.RawTuple(output_names_or_num))
elif isinstance(output_names_or_num, schema.Field):
self.output_schema = output_names_or_num.clone(keep_blobs=True)
return
else:
if not isinstance(output_names_or_num, list):
output_names_or_num = [output_names_or_num]
out_tuple = [(out, np.void) for out in output_names_or_num]
struct_output_schema = schema.NewRecord(
model.net, schema.Struct(*out_tuple))
num_outputs = len(struct_output_schema.field_blobs())
# functional layer returns Struct if more than one outputs or output is
# a list, otherwise Scalar
if return_struct:
self.output_schema = struct_output_schema
else:
self.output_schema = struct_output_schema[0]
# If output_dtypes is provided, use it for output schema. Otherwise
# the shape and type will be inferred.
if output_dtypes is not None:
if not isinstance(output_dtypes, list):
output_dtypes = [output_dtypes] * num_outputs
assert len(output_dtypes) == num_outputs
for dtype, scalar in zip(output_dtypes,
self.output_schema.all_scalars()):
scalar.set_type(dtype)
return
# Fake execution of the function to infer shapes and types automatically
had_issues = False
try:
type_net = core.Net('_temp_type_and_shape_inference_net')
schema.InitEmptyRecord(type_net, input_record, enforce_types=True)
function(type_net, self.input_record, self.output_schema, **kwargs)
(shapes, types) = workspace.InferShapesAndTypes([type_net], {})
for i in range(num_outputs):
scalar_schema = (self.output_schema[i] if return_struct
else self.output_schema)
blob = scalar_schema()
if blob not in types or blob not in shapes:
had_issues = True
continue
if shapes[blob] == []:
# Scalar type
shape = tuple()
elif shapes[blob][0] == 0:
shape = tuple(shapes[blob][1:])
else:
logger.warning("unexpeced shape: {}".format(shapes[blob]))
# If batch dimension is not first - give up on shape
# inference for that blob
had_issues = True
continue
# TODO(amalevich): Move it to some shared library
dtype = None
if types[blob] == caffe2_pb2.TensorProto.DOUBLE:
dtype = (np.float64, shape)
elif types[blob] == caffe2_pb2.TensorProto.FLOAT:
dtype = (np.float32, shape)
elif types[blob] == caffe2_pb2.TensorProto.INT32:
dtype = (np.int32, shape)
elif types[blob] == caffe2_pb2.TensorProto.INT64:
dtype = (np.int64, shape)
elif types[blob] == caffe2_pb2.TensorProto.FLOAT16:
dtype = (np.float16, shape)
if dtype is not None:
scalar_schema.set_type(dtype)
except TypeError as ex:
had_issues = True
logger.warning(str(ex))
if had_issues:
logger.warning(
"Type inference had problems for layer: {}".format(self.name))
def add_ops(self, net):
self._function(
net, self.input_record, self.output_schema, **(self._kwargs))
| [
"facebook-github-bot@users.noreply.github.com"
] | facebook-github-bot@users.noreply.github.com |
557f3838b0dc8bef75277c2e4dca2caff00cb413 | 78370c33a987d4c319b55fa8a698afd48f15d0aa | /lessons/admin.py | 5bb09113e7ea5258dc5ac9c76c8fd357a8334e07 | [] | no_license | liuzhenxin2/RuleOf72 | 6296e05a0351ce1af19d80e7b8d3ed64de9da943 | c80dce7dd3c5733a2b4aee81edd67dac9e11ae15 | refs/heads/master | 2022-12-24T02:17:57.733797 | 2020-10-04T18:06:12 | 2020-10-04T18:06:12 | 293,755,711 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 132 | py | from django.contrib import admin
from .models import Lesson, Sub_topic
admin.site.register(Lesson)
admin.site.register(Sub_topic)
| [
"liuzhenxin1@live.com"
] | liuzhenxin1@live.com |
0ba4c698d48293dfd250b0cf5dcc7f196103bfaa | d4850a40bc5dbebb33739f219fe35b02a12589db | /ceramic-master-beta/Ceramic/settings.py | bf60a93b017fde420d89dbfa6dc06571bc21599c | [] | no_license | kong9410/ceramic | ccd77ffa71a7309f922eb56dbaa6fa733a7af146 | 65ccd5b08128bbfeb070e03b308602b5377231e1 | refs/heads/master | 2022-05-26T13:40:34.053380 | 2019-11-07T15:26:49 | 2019-11-07T15:26:49 | 220,264,997 | 0 | 0 | null | 2022-04-26T01:25:52 | 2019-11-07T15:12:08 | HTML | UTF-8 | Python | false | false | 3,577 | py | """
Django settings for Ceramic project.
Generated by 'django-admin startproject' using Django 2.2.5.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '39osv)m0$6k^nm72fqlt0fg=5vgt!pd8p=xe3seycu83k$s56j'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'gui.apps.GuiConfig',
'management.apps.ManagementConfig',
'image_analysis.apps.ImageAnalysisConfig',
'process_analysis.apps.ProcessAnalysisConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'Ceramic.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')]
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'Ceramic.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'djongo',
'NAME': 'ceramicdb',
'HOST': '113.198.137.124',
'PORT': 27017,
'USER': 'ceramicAdmin',
'PASSWORD': 'ceramic123!@#$',
'AUTH_SOURCE': 'ceramicdb',
'AUTH_MECHANISM': 'SCRAM-SHA-1'
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static/')
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
MEDIA_URL = '/media/'
| [
"gnitsgogo@gmail.com"
] | gnitsgogo@gmail.com |
185b47c53fb33ce144d02c63d274d36c771c8b4e | f3caac4e79efe25e1db20c78f04c2e62e573aab2 | /projects/01_fyyur/starter_code/migrations/versions/bf9d672acc27_first_attempt_at_venue_genres.py | 3ae15b86b9b68727bf7ead80fca0505cd41b90f6 | [] | no_license | EsNFish/FSND | ced0b81ac4d2076abc038b6cfdb5f370627dc5a4 | 5cc8aee5cb7d8a032dbc4d84e630fba7deebf8f3 | refs/heads/master | 2023-05-08T13:49:21.239342 | 2021-06-02T22:37:31 | 2021-06-02T22:37:31 | 357,344,134 | 0 | 0 | null | 2021-06-02T22:07:32 | 2021-04-12T21:28:37 | Python | UTF-8 | Python | false | false | 887 | py | """first attempt at venue genres
Revision ID: bf9d672acc27
Revises: 170e9f5a7391
Create Date: 2021-04-14 16:42:41.107310
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'bf9d672acc27'
down_revision = '170e9f5a7391'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('venuegenre',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('genre', sa.String(length=120), nullable=False),
sa.Column('venue_id', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['venue_id'], ['venue.id'], ),
sa.PrimaryKeyConstraint('id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('venuegenre')
# ### end Alembic commands ###
| [
"esnfish@gmail.com"
] | esnfish@gmail.com |
1de7c275d0299c2c4771f2e76446f0388e3b6064 | 57dbcfe5fe149b5353d42d687ebacfee36f16551 | /sambam/sam_strip_tags.py | 07dd6983bcdc366b975a62036992193da80974d7 | [
"MIT"
] | permissive | peterjc/picobio | 74d3f570a6344dc3fbd3ddca46d65c4292ce0ee7 | 63a5f8b5670afc3680bdeac0d9663d8fcbe904c1 | refs/heads/master | 2023-09-06T04:26:31.955632 | 2023-08-31T14:12:25 | 2023-08-31T14:12:25 | 2,184,466 | 34 | 14 | null | null | null | null | UTF-8 | Python | false | false | 3,572 | py | #!/usr/bin/env python
"""Python script to remove tags from SAM/BAM files.
This script is designed to be used as part of a Unix pipeline. It
takes as optional command line arguments a white list of tags to
preserve (or a black list of tags to remove). It reads SAM format
data from stdin, and writes SAM format data to stdout.
Simple usage with SAM files, keeping only read-group tags:
$ ./sam_strip_tags.py RG < original.sam > only_RG.sam
Simple usage with BAM files with conversion to/from SAM via samtools:
$ samtools view -h original.bam | ./sam_strip_tags.py RG | samtools view -S -b - > only_RG.bam
If your SAM/BAM files lack @SQ headers, you may need to give
samtools the reference FASTA file as well.
To remove particular tags (a black list rather than a white list)
include the switch -v (for invert, like the grep option). For example,
to remove any original quality (OC) tags, use:
$ ./sam_strip_tags.py -v OQ < original.sam > no_OQ.sam
Likewise with BAM files via samtools,
$ samtools view -h original.bam | ./sam_strip_tags.py -v OQ | samtools view -S -b - > no_OQ.bam
Copyright Peter Cock 2012. All rights reserved. See:
https://github.com/peterjc/picobio
"""
import sys
if "-v" in sys.argv[1:]:
black_list = set(x.strip() for x in sys.argv[1:] if x != "-v")
sys.stderr.write("Removing these tags: %s\n" % ", ".join(black_list))
for line in sys.stdin:
if line[0] != "@":
# Should be a read
(
qname,
flag,
rname,
pos,
mapq,
cigar,
rnext,
pnext,
tlen,
seq,
qual,
tags,
) = line.rstrip().split("\t", 11)
tags = "\t".join(t for t in tags.split("\t") if t[:2] not in black_list)
line = (
"\t".join(
[
qname,
flag,
rname,
pos,
mapq,
cigar,
rnext,
pnext,
tlen,
seq,
qual,
tags,
]
)
+ "\n"
)
sys.stdout.write(line)
else:
white_list = set(x.strip() for x in sys.argv[1:])
sys.stderr.write("Keeping only these tags: %s\n" % ", ".join(white_list))
for line in sys.stdin:
if line[0] != "@":
# Should be a read
(
qname,
flag,
rname,
pos,
mapq,
cigar,
rnext,
pnext,
tlen,
seq,
qual,
tags,
) = line.rstrip().split("\t", 11)
tags = "\t".join(t for t in tags.split("\t") if t[:2] in white_list)
line = (
"\t".join(
[
qname,
flag,
rname,
pos,
mapq,
cigar,
rnext,
pnext,
tlen,
seq,
qual,
tags,
]
)
+ "\n"
)
sys.stdout.write(line)
| [
"p.j.a.cock@googlemail.com"
] | p.j.a.cock@googlemail.com |
624d54fc452b022fe7756a520b32eb2daf1ccb3b | 3045cc6ae29a6bd01b76c453b6084f22378bc5f9 | /Fibot/api/data_types/lecture.py | 7d638bf7274131dc2c682a8768b6ad6e3d0be3b5 | [] | no_license | caoxu915683474/FIB-Chatbot | f425c6f9da2fb49dc726a790ea408a66020a1663 | beb0ab40d37bbbc9369236bcc9a5ac98fce808c9 | refs/heads/master | 2021-04-06T05:34:34.098658 | 2018-02-21T19:22:54 | 2018-02-21T19:22:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,433 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
class Lecture(object):
""" Helper class for actions
Parameters:
data(:obj:`dict`): Information of a lecture. Example:
{
"codi_assig": "WSE",
"grup": "10",
"dia_setmana": 2,
"inici": "12:00",
"durada": 2,
"tipus": "T",
"aules": "A5201"
}
"""
def __init__(self, data):
days = {
1: 'Monday',
2: 'Tuesday',
3: 'Wednesday',
4: 'Thursday',
5: 'Friday'
}
self.assig = data['codi_assig']
self.group = data['grup']
self.day = days[data['dia_setmana']]
self.begin_hour = data['inici']
aux_hour = self.begin_hour.split(':')
self.end_hour = "{}:{}".format(
str(int(aux_hour[0])+data['durada']),
aux_hour[1] )
if data['tipus'] == 'T': self._type = 'theory'
elif data['tipus'] == 'L': self._type = 'laboratory'
else: self._type = 'problems'
self.classroom = data['aules']
def __repr__(self):
return "{} from {} to {} a {} class at classroom {}".format(
self.day,
self.begin_hour,
self.end_hour,
self._type,
self.classroom
)
| [
"victorbusque@gmail.com"
] | victorbusque@gmail.com |
946a2d515b6d377f0bab4dc01a5dfc573d4295b6 | b8088fa77c164e9c1af30862185232d3ed47c58c | /mapper.py | 4cd75ef6e0859c68e505c918bdaff77c05121b87 | [] | no_license | 2russellsmith/cs462Lab4 | f1ec0c6e1bd86ff3ebbbc425366f95a2d8c2fa71 | e3829b6a177751243659f3910eafbcc92b7b75d7 | refs/heads/master | 2020-09-10T15:54:13.142963 | 2016-08-25T18:59:58 | 2016-08-25T18:59:58 | 66,584,505 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,453 | py | #!/usr/bin/python
#
# Adapted from script by Diana MacLean 2011
#
# Mapper script adapted for CS448G from Amazon's example: http://aws.amazon.com/jobflows/2273?_encoding=UTF8&jiveRedirect=1
#
#
import sys
import re
stopwords = ['i','me','my','myself','we','our','ours',
'ourselves','you','your','yours','yourself','yourselves',
'he','him','his','himself','she','her','hers','herself',
'it','its','itself','they','them','their','theirs','themselves',
'what','which','who','whom','this','that','these','those','am','is',
'are','was','were','be','been','being','have','has','had','having','do',
'does','did','doing','a','an','the','and','but','if','or','because','as',
'until','while','of','at','by','for','with','about','against','between',
'into','through','during','before','after','above','below','to','from','up',
'down','in','out','on','off','over','under','again','further','then','once',
'here','there','when','where','why','how','all','any','both','each','few','more','most',
'other','some','such','no','nor','not','only','own','same','so','than','too','very','s',
't','can','will','just','don','should','now'
]
def main(argv):
line = sys.stdin.readline()
pattern = re.compile("[a-zA-Z][a-zA-Z0-9]*")
try:
while line:
for word in pattern.findall(line):
if word not in stopwords :
print word.lower() + "\t" + "1"
line = sys.stdin.readline()
except "end of file":
return None
if __name__ == "__main__":
main(sys.argv)
| [
"2russellsmith@gmail.com"
] | 2russellsmith@gmail.com |
074889911eebba54b35240e90b3e3237c5da2856 | dab8aafc0f4fcc22c1fb7ec3bc69cb58eb93692c | /mysite/settings.py | 42ec093fc747f47210c97dc6a777a8fcab670c64 | [] | no_license | nayans99/my_first_blog | 7cc35c9038246f28e1d2b98cfa9ea5a40406268a | 17d4bf8e505ac19e0e75b0d355eb0dcaa054d764 | refs/heads/master | 2020-06-08T11:10:24.064124 | 2019-06-22T10:14:15 | 2019-06-22T10:14:15 | 193,218,937 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,226 | py | """
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 1.9.13.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '*ior)tzsxz=b7b33fx=l7s*p#92x1%drh*gf09)_*8+zf7-sa1'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'blog',
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
| [
"nayansabnis99@gmail.com"
] | nayansabnis99@gmail.com |
6fa53185e2e05b9e6e6db365d3d3defaf82130cf | f8e64dd069b2d65f1b9af53e03c42d97301e9a1d | /apps/currency/forms/fields.py | 9c65327d4b8701519fd5e5bf2100f8c390ed6e36 | [] | no_license | grengojbo/django-currency | 8daef53e442d7409f02c68dec48ff535b1712377 | 26e26cfb09ae8e62851a81bc8d821e1530eef20c | refs/heads/master | 2021-04-12T04:32:53.928776 | 2010-04-28T17:00:39 | 2010-04-28T17:00:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 897 | py | from django.utils.translation import ugettext_lazy as _
from django import forms
from widgets import InputMoneyWidget
from currency.money import Money
from currency.models import Currency
__all__ = ('MoneyField',)
class MoneyField(forms.DecimalField):
def __init__(self, currency_widget=None, *args, **kwargs):
self.widget = InputMoneyWidget(currency_widget=currency_widget)
super(MoneyField, self).__init__(*args, **kwargs)
def clean(self, value):
if not isinstance(value, tuple):
raise Exception("Invalid value provided for MoneyField.clean (expected tupple)")
amount = super(MoneyField, self).clean(value[0])
currency = Currency.objects.get_currency(value[1])
if not currency:
raise forms.ValidationError(_(u'Input currency'))
return Money(amount=amount, currency=currency) | [
"oleg.dolya@gmail.com"
] | oleg.dolya@gmail.com |
703921458c0f564aaa1f58619aad6a1b66d47297 | 3fb3038f212d68550836d1956515da8cbccbdf8e | /products/migrations/0001_initial.py | f90d612c99f51ebf6f7a9e485beb3139dadbd8ca | [] | no_license | ivanbat1/mysite | 9e8debb6ec713b6148fa1dd6b50e4f768c1c29df | 8ea492c22c0b8c3b340f79812d39320f61308627 | refs/heads/master | 2020-03-19T04:18:41.227917 | 2018-12-27T12:51:55 | 2018-12-27T12:51:55 | 135,815,010 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,386 | py | # Generated by Django 2.0.5 on 2018-05-23 19:33
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Product',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('price', models.DecimalField(decimal_places=2, default=0, max_digits=10)),
('name', models.CharField(blank=True, default=None, max_length=64, null=True)),
('description', models.TextField(blank=True, default=None, null=True)),
('is_active', models.BooleanField(default=True)),
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
('discount', models.IntegerField(default=0)),
],
),
migrations.CreateModel(
name='ProductCategory',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(blank=True, default=None, max_length=64, null=True)),
('is_active', models.BooleanField(default=True)),
],
),
migrations.CreateModel(
name='ProductImage',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('image', models.ImageField(blank=True, null=True, upload_to='products_image/')),
('is_main', models.BooleanField(default=False)),
('is_active', models.BooleanField(default=True)),
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
('product', models.ForeignKey(blank=True, default=None, null=True, on_delete=django.db.models.deletion.CASCADE, to='products.Product')),
],
),
migrations.AddField(
model_name='product',
name='type',
field=models.ForeignKey(blank=True, default=None, null=True, on_delete=django.db.models.deletion.CASCADE, to='products.ProductCategory'),
),
]
| [
"baturin.ivan9@gmail.com"
] | baturin.ivan9@gmail.com |
b59c5b90bec745c23ed7e23d949ecbabbe82375a | 4762b15498e642b39edfff3745e9ea134f081893 | /workshop_admin/moodle/migrations/0002_statement.py | 302a1756920a5a26ec21dd32551a7dd89f96533f | [] | no_license | Informatinks/informatics-back | d1d29e7297e547a8749b8da4d6c70565495fc509 | be298f72c072023be004895faf88cff9806650f6 | refs/heads/master | 2022-12-10T05:33:34.637043 | 2019-11-01T16:29:12 | 2019-11-01T16:29:12 | 171,288,054 | 0 | 3 | null | 2022-12-08T04:53:26 | 2019-02-18T13:20:53 | Python | UTF-8 | Python | false | false | 671 | py | # Generated by Django 2.2.1 on 2019-07-02 11:30
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('moodle', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Statement',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=256)),
('summary', models.TextField()),
],
options={
'db_table': 'mdl_statements',
'managed': False,
},
),
]
| [
"n.pakhtusov@tinkoff.ru"
] | n.pakhtusov@tinkoff.ru |
c97367ed534b90c843c6311c841f761386ab1f08 | 0c5aab3ed30f00c8dbbf95a95dca2160b360a599 | /ecommerce/form.py | 17c701d38230436eb713b5d2d688f9956c4493db | [] | no_license | 13alireza77/ecommerce | 7fa0bcd16d9e809ef14167edc8910868624116ce | e552a788ce2af5a66934ef5fb5fc48d75a887c5e | refs/heads/master | 2020-04-24T22:01:07.458180 | 2019-02-24T04:42:12 | 2019-02-24T04:42:12 | 169,950,447 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,898 | py | from django import forms
from django.contrib.auth import get_user_model
User = get_user_model()
class contactForm(forms.Form):
fullName = forms.CharField(widget=forms.TextInput(attrs={"class": "form-control", "placeholder": "Full Name"}))
email = forms.EmailField(widget=forms.EmailInput(attrs={"class": "form-control", "placeholder": "Email"}))
contact = forms.CharField(widget=forms.Textarea(attrs={"class": "form-control", "placeholder": "Youre Message"}))
def clean_email(self):
email = self.cleaned_data.get("email")
if not "gmail.com" in email:
raise forms.ValidationError("Email has to be gmail.com")
return email
class LoginForm(forms.Form):
username = forms.CharField()
password = forms.CharField(widget=forms.PasswordInput)
class registerForm(forms.Form):
username = forms.CharField(widget=forms.TextInput)
email = forms.EmailField(widget=forms.EmailInput(attrs={"class": "form-control", "placeholder": "Email"}))
password1 = forms.CharField(widget=forms.PasswordInput, label='password')
password2 = forms.CharField(widget=forms.PasswordInput, label='confirm password')
def clean_username(self):
username = self.cleaned_data.get('username')
qs = User.objects.filter(username=username)
if qs.exists:
raise forms.ValidationError("username is taken")
return username
def clean_email(self):
email = self.cleaned_data.get('email')
qs = User.objects.filter(email=email)
if qs.exists:
raise forms.ValidationError("email is taken")
return email
def clean(self):
data = self.cleaned_data
pass1 = self.cleaned_data.get('password1')
pass2 = self.cleaned_data.get('password2')
if pass1 != pass2:
raise forms.ValidationError("passwords must match.")
return data
| [
"13alirezaalidoosti77@gmail.com"
] | 13alirezaalidoosti77@gmail.com |
be607de4c4d3d9045d21c9098fef2bd6258ce7a9 | cd77de4a0c3796c1cf7e9508fdd9e02bb6b7c81b | /products/urls.py | 9a1a694e1031a564596f65efd9a83479392a2036 | [] | no_license | Sohelll/ghostownstore | 5569dba1c7cf044cbc04f8a9ee422599d30970ef | dfa373c4fa6c394e7e07619e63106da4d17289c6 | refs/heads/master | 2020-04-16T03:19:05.646820 | 2019-01-24T13:21:19 | 2019-01-24T13:21:19 | 165,227,744 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 904 | py | from django.urls import path
from . import views
urlpatterns = [
path('', views.index, name='products'),
path('search', views.search, name='search'),
path('<int:product_id>', views.product, name='product'),
path('category/<int:category_id>', views.cat_wise, name='cat_wise'),
path('checkout_single/<int:product_id>', views.checkout_single, name='checkout_single'),
path('checkout/<int:user_id>', views.checkout, name='checkout'),
path('checkout/', views.checkout_tologin, name='checkout_tologin'),
# Ajax runtime response url, from every page!
path('ajax/add_to_cart', views.add_to_cart, name='add_to_cart'),
path('checkout/ajax/delete_from_cart', views.delete_from_cart, name='delete_from_cart'),
path('category/ajax/add_to_cart', views.add_to_cart, name='add_to_cart'),
path('ajax/delete_from_cart', views.delete_from_cart, name='delete_from_cart'),
]
| [
"shaikhsohel.011@gmail.com"
] | shaikhsohel.011@gmail.com |
118e3b71b782fa295f2a247d81a815d8673f60c5 | b4982d7ffb9e65db8432e7728f89fa2dd4878aa6 | /Object Oriented Concept/encapsulation.py | f8aed772cf8b60e7fcaabebf4a62a52ede6aebd2 | [] | no_license | anupjungkarki/30-Days-Python-Challenge | 1d8e794235ac60e098f704cefa2c4a461134e8a4 | 96be38590a159d59397b122f8ee171574f5a556c | refs/heads/master | 2023-06-04T13:24:16.250487 | 2022-12-05T09:46:26 | 2022-12-05T09:46:26 | 327,277,767 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,465 | py | # Encapsulation is one of the method of the fundamental concept in object oriented programming(OOP).Other programming have access specifier
# to handle with the private data but in python private data is easily access from the outside of the class so Encapsulation helps to
# restrict to access data and variable outside the class.
# Here access of private key is possible
class Car:
def __init__(self, name, mileage):
self._name = name
self.mileage = mileage
def description(self):
return f'The{self._name} car gives the mileage of {self.mileage} km/1hr'
obj = Car('BMW 7-Series', 39.53)
# accessing the protected variable by class method
print(obj.description())
# accessing the protected variable directly from outside
print(obj._name)
print(obj.mileage)
# Now lets work some encapsulation method
class Car:
def __init__(self, name, mileage):
self.__name = name # Private Variable
self.mileage = mileage
def description(self):
return f'The {self.__name} car given the mileage of {self.mileage} km/1hr'
obj = Car('BMW 7-Series', 39.53)
# Accessing the private variable by class method
print(obj.description())
# Accessing the private variable directly from the outside
# print(obj.__name)
# print(obj.mileage)
# It give an error while trying to access from the outside the class but we can also access by using Name MANGLING
# print(obj.mileage)
# print(obj._car__name) # mangled name
| [
"anupkarki2012@gmail.com"
] | anupkarki2012@gmail.com |
009c97483cd7634d38ffeac4a1744beaae479f57 | ae7d5d11351af9201ce6181c48b8c60363c7ed00 | /packages/data/setup.py | 28faa87c3d988024ce6993d21ad79eeb365f0a85 | [
"CC-BY-2.5",
"AFL-2.1",
"AFL-3.0",
"CC-BY-3.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | natefoo/galaxy | 818037d03f39ccfb3714c7e784fd64d7ad8f4d2e | 64150c5bd803e75ed032e9f15acd003bae92b5ef | refs/heads/master | 2023-08-17T02:57:02.580487 | 2020-03-26T13:33:01 | 2020-03-26T13:33:01 | 31,212,836 | 2 | 1 | NOASSERTION | 2019-04-25T12:30:28 | 2015-02-23T15:01:46 | Python | UTF-8 | Python | false | false | 3,207 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import ast
import os
import re
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
SOURCE_DIR = "galaxy"
_version_re = re.compile(r'__version__\s+=\s+(.*)')
with open('%s/project_galaxy_data.py' % SOURCE_DIR, 'rb') as f:
init_contents = f.read().decode('utf-8')
def get_var(var_name):
pattern = re.compile(r'%s\s+=\s+(.*)' % var_name)
match = pattern.search(init_contents).group(1)
return str(ast.literal_eval(match))
version = get_var("__version__")
PROJECT_NAME = get_var("PROJECT_NAME")
PROJECT_URL = get_var("PROJECT_URL")
PROJECT_AUTHOR = get_var("PROJECT_AUTHOR")
PROJECT_EMAIL = get_var("PROJECT_EMAIL")
PROJECT_DESCRIPTION = get_var("PROJECT_DESCRIPTION")
TEST_DIR = 'tests'
PACKAGES = [
'galaxy',
'galaxy.datatypes',
'galaxy.datatypes.dataproviders',
'galaxy.datatypes.display_applications',
'galaxy.datatypes.util',
'galaxy.datatypes.test',
'galaxy.model',
'galaxy.model.dataset_collections',
'galaxy.model.migrate',
'galaxy.model.orm',
'galaxy.model.store',
'galaxy.model.tool_shed_install',
'galaxy.quota',
'galaxy.security',
]
ENTRY_POINTS = '''
[console_scripts]
galaxy-build-objects=galaxy.model.store.build_objects:main
galaxy-manage-db=galaxy.model.orm.scripts:manage_db
'''
PACKAGE_DATA = {
# Be sure to update MANIFEST.in for source dist.
'galaxy': [
'datatypes/test/*',
],
}
PACKAGE_DIR = {
SOURCE_DIR: SOURCE_DIR,
}
readme = open('README.rst').read()
history = open('HISTORY.rst').read().replace('.. :changelog:', '')
if os.path.exists("requirements.txt"):
requirements = open("requirements.txt").read().split("\n")
else:
# In tox, it will cover them anyway.
requirements = []
test_requirements = [
# TODO: put package test requirements here
]
setup(
name=PROJECT_NAME,
version=version,
description=PROJECT_DESCRIPTION,
long_description=readme + '\n\n' + history,
long_description_content_type='text/x-rst',
author=PROJECT_AUTHOR,
author_email=PROJECT_EMAIL,
url=PROJECT_URL,
packages=PACKAGES,
entry_points=ENTRY_POINTS,
package_data=PACKAGE_DATA,
package_dir=PACKAGE_DIR,
include_package_data=True,
install_requires=requirements,
license="AFL",
zip_safe=False,
keywords='galaxy',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Environment :: Console',
'License :: OSI Approved :: Academic Free License (AFL)',
'Operating System :: POSIX',
'Topic :: Software Development',
'Topic :: Software Development :: Code Generators',
'Topic :: Software Development :: Testing',
'Natural Language :: English',
"Programming Language :: Python :: 2",
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
],
test_suite=TEST_DIR,
tests_require=test_requirements
)
| [
"jmchilton@gmail.com"
] | jmchilton@gmail.com |
bc5318fa1d6c7db54b728d4678f3d5df28b75c30 | fe42aa7f3729e87ec486552b767f7049ff50ef12 | /ILLUSTRIS/CREATE/CREATE_SUBHALOS_HYDRO.py | d00533238addc36157d6bcba75f645a0ba508359 | [] | no_license | aishwaryasrivastava/AstrophySURP | 2b49f97d267a7d743cca1516044a51db9c682251 | 93546a646eb661be0b3e703f7cf689e010214068 | refs/heads/master | 2021-01-21T21:09:56.468433 | 2017-07-24T15:14:52 | 2017-07-24T15:14:52 | 92,312,419 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,287 | py | #--------------------------------IMPORTS-------------------------------#
import numpy as np
import illustris_python as il
np.seterr(divide = 'ignore', invalid = 'ignore')
import sqlite3
import sys
#--------------------------CONSTANTS-----------------------------------#
# Multiply the mass in Illustris catalog with this to get mass in Msun
mass_constant = 1.e10/0.704
# Given halo mass in Msun, returns stellar mass in Msun
def StellarMass(M):
N10 = 0.0351+0.0058 # N10 + sigmaN10
gamma10 = 0.608+0.059 # gamma10 + sigma(gamma10)
beta10 = 1.376+0.153 # beta10 + sigma(beta10)
M10 = 10**(11.590+0.236) # 10^(M10 + sigmaM10)
return (2*M*N10*(((M/M10)**(-beta10))+((M/M10)**gamma10))**(-1))
#------------------SETTING UP THE DATABASE CONNECTION------------------#
conn = sqlite3.connect('../ILLUSTRIS-HYDRO.db')
c = conn.cursor()
#------------------------------READING THE CATALOG--------------------#
# Setup the HDF5 file
basePath = './Illustris-1/'
snapNum = 135
print "Reading catalog..."
CATALOG = il.groupcat.load(basePath, snapNum)
SUBHALOS = CATALOG['subhalos']
#------------------------------SUBHALOS-------------------------------#
c.execute("CREATE TABLE Subhalos (SubhaloID int PRIMARY KEY, SubhaloBHMdot float, SubhaloVmax float, SubhaloWindMass float, SubhaloGasMetallicityMaxRad float, SubhaloVelDisp float, SubhaloSFR float, SubhaloStarMetallicityMaxRad float, SubhaloLen, SubhaloSFRinHaldRad float, SubhaloGasMetallicity float, SubhaloBHMass float, SubhaloIDMostbound, SubhaloStellarPhotometricsMassInRad float, SubhaloHalfmassRad float, SubhaloParent int, SubhaloStarMetallicityHalfRad float, SubhaloGasMetallicitySfrWeighted float, SubhaloGasMetallicityHalfRad float, SubhaloMassInRad float, SubhaloGrNr int, SubhaloMassInHalfRad float, SubhaloSFRinRad float, SubhaloMassInMaxRad float, SubhaloStarMetallicity float)")
c.execute("CREATE TABLE SubhaloPos (SubhaloID int PRIMARY KEY, X float, Y float, Z float, FOREIGN KEY(SubhaloID) REFERENCES Subhalos(SubhaloID))")
c.execute("CREATE TABLE SubhaloStellarPhotometrics (SubhaloID int PRIMARY KEY, U float, B float, V float, K float, g float, r float, i float, z float, FOREIGN KEY(SubhaloID) REFERENCES Subhalos(SubhaloID))")
c.execute("CREATE TABLE SubhaloMassType (SubhaloID int PRIMARY KEY, Type1 float, Type2 float, Type3 float, Type4 float, Type5 float, Type6 float, FOREIGN KEY(SubhaloID) REFERENCES Subhalos(SubhaloID))")
c.execute("CREATE TABLE SubhaloSpin (SubhaloID int PRIMARY KEY, X float, Y float, Z float, FOREIGN KEY(SubhaloID) REFERENCES Subhalos(SubhaloID))")
c.execute("CREATE TABLE SubhaloVel (SubhaloID int PRIMARY KEY, X float, Y float, Z float, FOREIGN KEY(SubhaloID) REFERENCES Subhalos(SubhaloID))")
c.execute("CREATE TABLE SubhaloLenType (SubhaloID int PRIMARY KEY, Type1 int, Type2 int, Type3 int, Type4 int, Type5 int, Type6 int, FOREIGN KEY(SubhaloID) REFERENCES Subhalos(SubhaloID))")
c.execute("CREATE TABLE SubhaloHalfmassRadType (SubhaloID int PRIMARY KEY, Type1 float, Type2 float, Type3 float, Type4 float, Type5 float, Type6 float, FOREIGN KEY(SubhaloID) REFERENCES Subhalos(SubhaloID))")
c.execute("CREATE TABLE SubhaloMassInMaxRadType (SubhaloID int PRIMARY KEY, Type1 float, Type2 float, Type3 float, Type4 float, Type5 float, Type6 float, FOREIGN KEY(SubhaloID) REFERENCES Subhalos(SubhaloID))")
c.execute("CREATE TABLE SubhaloCM (SubhaloID int, X float, Y float, Z float, FOREIGN KEY(SubhaloID) REFERENCES Subhalos(SubhaloID))")
fraction = 100000
barwidth = (SUBHALOS['count']/fraction)+1
sys.stdout.write("Creating subhalos...[%s]"%(" " * barwidth))
sys.stdout.flush()
sys.stdout.write("\b" * (barwidth+1))
for i in range(0, SUBHALOS['count']):
SubhaloID = i
if i % fraction == 0:
sys.stdout.write("#")
sys.stdout.flush()
SubhaloParams = (SubhaloID, float(SUBHALOS['SubhaloBHMdot'][i]), float(SUBHALOS['SubhaloVmax'][i]), float(SUBHALOS['SubhaloWindMass'][i]), float(SUBHALOS['SubhaloGasMetallicityMaxRad'][i]), float(SUBHALOS['SubhaloVelDisp'][i]), float(SUBHALOS['SubhaloSFR'][i]), float(SUBHALOS['SubhaloStarMetallicityMaxRad'][i]), str(SUBHALOS['SubhaloLen'][i]), float(SUBHALOS['SubhaloSFRinHalfRad'][i]), float(SUBHALOS['SubhaloGasMetallicity'][i]), float(SUBHALOS['SubhaloBHMass'][i]), str(SUBHALOS['SubhaloIDMostbound'][i]), float(SUBHALOS['SubhaloStellarPhotometricsMassInRad'][i]), float(SUBHALOS['SubhaloHalfmassRad'][i]), long(SUBHALOS['SubhaloParent'][i]), float(SUBHALOS['SubhaloStarMetallicityHalfRad'][i]), float(SUBHALOS['SubhaloGasMetallicitySfrWeighted'][i]), float(SUBHALOS['SubhaloGasMetallicityHalfRad'][i]), float(SUBHALOS['SubhaloMassInRad'][i]), long(SUBHALOS['SubhaloGrNr'][i]), float(SUBHALOS['SubhaloMassInHalfRad'][i]), float(SUBHALOS['SubhaloSFRinRad'][i]), float(SUBHALOS['SubhaloMassInMaxRad'][i]), float(SUBHALOS['SubhaloMassInMaxRad'][i])
)
c.execute("INSERT INTO Subhalos VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)", SubhaloParams)
SubhaloPosParams = (SubhaloID, float(SUBHALOS['SubhaloPos'][i][0]), float(SUBHALOS['SubhaloPos'][i][1]), float(SUBHALOS['SubhaloPos'][i][2]))
c.execute("INSERT INTO SubhaloPos VALUES (?, ?, ?, ?)", SubhaloPosParams)
SubhaloStellarPhotometricsParams = (SubhaloID, float(SUBHALOS['SubhaloStellarPhotometrics'][i][0]), float(SUBHALOS['SubhaloStellarPhotometrics'][i][1]),float(SUBHALOS['SubhaloStellarPhotometrics'][i][2]),float(SUBHALOS['SubhaloStellarPhotometrics'][i][3]),float(SUBHALOS['SubhaloStellarPhotometrics'][i][4]),float(SUBHALOS['SubhaloStellarPhotometrics'][i][5]),float(SUBHALOS['SubhaloStellarPhotometrics'][i][6]),float(SUBHALOS['SubhaloStellarPhotometrics'][i][7]))
c.execute("INSERT INTO SubhaloStellarPhotometrics VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)", SubhaloStellarPhotometricsParams)
SubhaloMassTypeParams = (SubhaloID, float(SUBHALOS['SubhaloMassType'][i][0]*mass_constant), float(SUBHALOS['SubhaloMassType'][i][1]*mass_constant), float(SUBHALOS['SubhaloMassType'][i][2]*mass_constant), float(SUBHALOS['SubhaloMassType'][i][3]*mass_constant), float(SUBHALOS['SubhaloMassType'][i][4]*mass_constant), float(SUBHALOS['SubhaloMassType'][i][5]*mass_constant))
c.execute("INSERT INTO SubhaloMassType VALUES (?, ?, ?, ?, ?, ?, ?)", SubhaloMassTypeParams)
SubhaloSpinParams = (SubhaloID, float(SUBHALOS['SubhaloSpin'][i][0]), float(SUBHALOS['SubhaloSpin'][i][1]), float(SUBHALOS['SubhaloSpin'][i][2]))
c.execute("INSERT INTO SubhaloSpin VALUES (?, ?, ?, ?)", SubhaloSpinParams)
SubhaloVelParams = (SubhaloID, float(SUBHALOS['SubhaloVel'][i][0]), float(SUBHALOS['SubhaloVel'][i][1]), float(SUBHALOS['SubhaloVel'][i][2]))
c.execute("INSERT INTO SubhaloVel VALUES (?, ?, ?, ?)", SubhaloVelParams)
SubhaloLenTypeParams = (SubhaloID, int(SUBHALOS['SubhaloLenType'][i][0]), int(SUBHALOS['SubhaloLenType'][i][1]), int(SUBHALOS['SubhaloLenType'][i][2]), int(SUBHALOS['SubhaloLenType'][i][3]), int(SUBHALOS['SubhaloLenType'][i][4]), int(SUBHALOS['SubhaloLenType'][i][5]))
c.execute("INSERT INTO SubhaloLenType VALUES (?, ?, ?, ?, ?, ?, ?)", SubhaloLenTypeParams)
SubhaloHalfmassRadTypeParams = (SubhaloID, float(SUBHALOS['SubhaloHalfmassRadType'][i][0]), float(SUBHALOS['SubhaloHalfmassRadType'][i][1]), float(SUBHALOS['SubhaloHalfmassRadType'][i][2]), float(SUBHALOS['SubhaloHalfmassRadType'][i][3]), float(SUBHALOS['SubhaloHalfmassRadType'][i][4]), float(SUBHALOS['SubhaloHalfmassRadType'][i][5]))
c.execute("INSERT INTO SubhaloHalfmassRadType VALUES (?, ?, ?, ?, ?, ?, ?)", SubhaloHalfmassRadTypeParams)
SubhaloMassInMaxRadTypeParams = (SubhaloID, float(SUBHALOS['SubhaloMassInMaxRadType'][i][0]*mass_constant), float(SUBHALOS['SubhaloMassInMaxRadType'][i][1]*mass_constant), float(SUBHALOS['SubhaloMassInMaxRadType'][i][2]*mass_constant), float(SUBHALOS['SubhaloMassInMaxRadType'][i][3]*mass_constant), float(SUBHALOS['SubhaloMassInMaxRadType'][i][4]*mass_constant), float(SUBHALOS['SubhaloMassInMaxRadType'][i][5]*mass_constant))
c.execute("INSERT INTO SubhaloMassInMaxRadType VALUES (?, ?, ?, ?, ?, ?, ?)", SubhaloMassInMaxRadTypeParams)
SubhaloCMParams = (SubhaloID, float(SUBHALOS['SubhaloCM'][i][0]),float(SUBHALOS['SubhaloCM'][i][1]),float(SUBHALOS['SubhaloCM'][i][2]))
c.execute("INSERT INTO SubhaloCM VALUES(?, ?, ?, ?)", SubhaloCMParams)
print "\n"
conn.commit()
conn.close()
| [
"noreply@github.com"
] | aishwaryasrivastava.noreply@github.com |
f2d1848cd8952136feb770854fcef44e99792594 | 5550bd31a3f039d98d080d40dc1a28efde5b1297 | /worksheets/12 - Source Files/12q8.py | 4bb781e344a97771e9a33546919eefa7397e48c9 | [] | no_license | ceucomputing/olevel | f3bfb36ce3ccfa720aa6bcf4611639e8bcdd7bd2 | 71865c2109886b3c7fe66be742c621f11000c4e0 | refs/heads/master | 2023-01-03T02:00:18.794288 | 2020-10-27T03:58:41 | 2020-10-27T03:58:41 | 66,807,322 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 216 | py | g = 9.8
m = float(input("Enter the object's mass in kg: "))
h = float(input("Enter the object's height in m: "))
pe = m * g * h
print("The object has " + str(pe) +
" J of gravitational potential energy.")
| [
"jiunwei@gmail.com"
] | jiunwei@gmail.com |
791b3a07d266c9d2ec5209ea3c1e8bddf037739b | 9c79d6517bafd1ec7540a864f2bfb31fe02d1a78 | /scripts/plot_spectral_bands.py | 7ab2d4a882f683f1fe0918a73c121f4bf9625a08 | [
"MIT"
] | permissive | jhoule42/lamps_conversions | 2d512ef2ce602447a72a3d20b51e380a778cb77e | d166f39b9669142ae45f5f5a84f2f7d70cb944c0 | refs/heads/main | 2023-07-13T11:37:28.221811 | 2021-08-19T18:36:02 | 2021-08-19T18:36:02 | 398,003,988 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 984 | py | # Script pour plotter les
# Auteur : Julien-Pierre Houle
# Importer les librairies
import numpy as np
import matplotlib.pyplot as plt
import pytools as pt
import MultiScaleData as MSD
import hdftools as hdf
path = "git/Illumina-scripts/Spectres"
# SPECTRES d'Integration
scoto = path + "/scotopic.dat"
JC_U = path + "/JC_U.dat"
JC_B = path + "/JC_B.dat"
JC_V = path + "/JC_V.dat"
JC_R = path + "/JC_R.dat"
JC_I = path + "/JC_I.dat"
SQM = path + "/sqm.dat"
spct = [scoto, JC_U, JC_B, JC_V, JC_R, JC_I, SQM]
spct_list = [np.loadtxt(x, skiprows=1) for x in spct]
# Normaliser scoto sur 100
for wv in spct_list[0]:
wv[1] *= 100
for wv in spct_list[-1]:
wv[1] *= 100
# Plot le graphique
[plt.plot(i[:,0], i[:,1]) for i in spct_list]
plt.xlabel("wavelength (nm)")
plt.ylabel("Transmittance (%)")
plt.legend(("Scotopic", "U", "B", "V", "R", "I", "SQM"), loc="upper left")
plt.savefig("/home/jhoule42/Documents/Resultats_Sherbrooke/Spct/Spectres.png", bbox_inches="tight")
| [
"noreply@github.com"
] | jhoule42.noreply@github.com |
b54b585317c3cbdc1c1f9f7667ef9ec3526dbb4e | f4b6422703af7534867f90f2902aa3baa7b72416 | /2018/csiectf/hw5/lab3/lab3.py | b77153baa9653db6b42d406aee3da7f8c3490ba3 | [] | no_license | b04902036/ctf | d1eac85b915057e0961ad862d7bf2da106515321 | fac16cd79440a9c0fc870578d5c80b1491bb8eae | refs/heads/master | 2020-03-18T16:23:02.321424 | 2019-11-22T03:34:25 | 2019-11-22T03:34:25 | 134,962,628 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 906 | py | #!/usr/bin/python
from pwn import *
context.arch = 'amd64'
host = 'csie.ctf.tw'
port = 10131
#host = 'localhost'
#port = 4000
r = remote(host, port)
raw_input('#')
pop_rdi = 0x0000000000400883
pop_rsi_r15 = 0x0000000000400881
leave_ret = 0x0000000000400818
read_plt = 0x400610
puts_plt = 0x4005e0
puts_got = 0x0000000000600fc8
main_read = 0x4007c4
one_gadget = 0x10a38c
puts_base = 0x00000000000809c0
buf1 = 0x601060
buf2 = 0x602000 - 0x100
r.recvuntil(':')
rop1 = flat([buf2, pop_rdi, puts_got, puts_plt, main_read])
r.send(rop1.rjust(0x280, '\x00'))
r.recvuntil('say?\n')
r.send('\x00'*0x20 + p64(buf1 + 0x280 - 8*5) + p64(leave_ret))
r.recvuntil('~\n')
puts_libc = u64(r.recvuntil('\n').strip().ljust(8, '\x00'))
one_gadget = puts_libc - puts_base + one_gadget
print ('one gadget : ', hex(one_gadget))
r.sendline('\x00'*0x28 + p64(one_gadget))
r.interactive()
#FLAG{st4ck_m1gr4t10n_15_p0werfu1!}
| [
"b04902036@ntu.edu.tw"
] | b04902036@ntu.edu.tw |
2b7533e5b81c60de7c5946f4f6c3ef4026c3dc6b | 5a35b4e745027c8631d6931dc5e1f8aa7c43915b | /product/migrations/0005_auto_20180704_1749.py | 0394c14b4fe30093fe98153489b0ddf90c41ba09 | [] | no_license | lucasLB7/mybus_systems | 44a336c61a2a2de7e461ae82aa37b9f28c24cdfb | aed761bddb26b0b6042c485f47539a404461d4ac | refs/heads/master | 2020-03-22T05:50:24.141316 | 2018-07-06T16:46:41 | 2018-07-06T16:46:41 | 139,593,551 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 764 | py | # Generated by Django 2.0.7 on 2018-07-04 14:49
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('product', '0004_laminateflooring_date_of_arrival'),
]
operations = [
migrations.AddField(
model_name='laminateflooring',
name='purchase_cost',
field=models.PositiveIntegerField(null=True),
),
migrations.AddField(
model_name='laminateflooring',
name='quantity',
field=models.PositiveIntegerField(null=True),
),
migrations.AddField(
model_name='laminateflooring',
name='sales_cost',
field=models.PositiveIntegerField(null=True),
),
]
| [
"plucaslambert@gmail.com"
] | plucaslambert@gmail.com |
8eec6af91bf6913d22c03c800138aa02e775746e | ad4137e1ef30cdab7a8179b4ee7d48d1cdad6ce6 | /Menu.py | ac869bc6f6c1fe6e9c82ca26470ec4a6eca073a0 | [] | no_license | JulianMarsal/SopaDeLetras | 59f3441388839131f2a606da7a44dc958141c8bb | 4accbd8cda9f29c9bfd3f08d1cb6d726255b8135 | refs/heads/master | 2020-05-29T12:55:31.918901 | 2019-10-07T00:21:09 | 2019-10-07T00:21:09 | 189,144,070 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 2,896 | py | import PySimpleGUI as sg
import sopadeletras as Sopa
import constantes as const
import configuracion
import comprobacion as ingreso
from importlib import reload
import json
from funciones import leer_archivo
def promedio_temperatura():
oficinas=leer_archivo("json files/datos-oficinas.json")
config=leer_archivo("json files/configuracion.json")[0]
oficina_promedio=oficinas[config['oficina']]
temperaturas=0
cant_temperaturas=0
for var in oficina_promedio:
temperaturas+=var["temp"]
cant_temperaturas+=1
return temperaturas / cant_temperaturas
def look_and_feel():
temperatura=promedio_temperatura()
foto= 'images/logo.png'
if temperatura<10:
color="#291E9C"
color_boton=["white","#525AFE"]
elif 10<temperatura<20:
color="#9FD9DA"
color_boton=["white","#4E8498"]
elif 20<temperatura<27:
color="#FBB40D"
color_boton=["black","#FACA3C"]
elif temperatura>27:
color="#E6402A"
color_boton=["white","#C82610"]
colores=leer_archivo("json files/colores.json")
colores['COLOR_FONDO']=color
colores['COLOR_BOTON']=color_boton
colores['COLOR_TEXTO'][1]=color
archivo=open('json files/colores.json','w')
objeto=json.dumps(colores,indent=4)
archivo.write(objeto)
archivo.close
return foto, color,color_boton
def Main():
foto,color,color_boton=look_and_feel()
color_boton=color_boton
sg.SetOptions(background_color=color,button_color=color_boton, text_element_background_color=color)
layout_menu=[[sg.Text('')],
[sg.Button('JUGAR!')],
[sg.Text('')],
[sg.Button('CONFIGURACION')],
[sg.Text('')],
[sg.Button('INGRESO DE PALABRAS')],
[sg.Text('')],
[sg.Button('CERRAR')],
[sg.Image(filename=foto,background_color=color)]]
'''Programa Principal. Ejecuta la ventana principal y llama a las funciones
correspondendientes para la funcion del juego de sopa de letras.
Funciones:
configuracion.py
sopadeletras.py
comprobacion.py (ingreso de las palabras)
'''
window_menu=sg.Window('Menú',size=(400,500),font='Fixedsys',default_button_element_size=(20,2),
auto_size_buttons=False,element_padding=(60,0)).Layout(layout_menu)
while True:
event,values=window_menu.Read()
if event is None or event=='CERRAR':
window_menu.Close()
break
if event=='JUGAR!':
window_menu.Hide()
Sopa.Main()
window_menu.UnHide()
if event=='CONFIGURACION':
window_menu.Hide()
configuracion.Main()
window_menu.UnHide()
if event=='INGRESO DE PALABRAS':
ingreso.main()
if __name__ =='__main__':
Main()
| [
"noreply@github.com"
] | JulianMarsal.noreply@github.com |
5ff40e3149ac45dd8fb3384d8481fc3795f2c5e9 | e0d0121d927d1b9aac42308728748f796f53cbe1 | /src/script/reflection.py | 7dda843f4cb6b5b37f0a26c22d13ce26067babde | [] | no_license | 5255b64/PyAnalysis | 9a5e11b4ece0c2ed9dd4b12168f990072aa4db25 | cb6501136cf92232aa64d18080bb69a0094f097d | refs/heads/master | 2022-12-18T13:56:25.557086 | 2020-09-22T05:22:56 | 2020-09-22T05:22:56 | 295,336,596 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,324 | py | import csv
from decimal import Decimal
def run(input_csv_file_addr: str, output_csv_file_addr: str):
with open(input_csv_file_addr, 'r', encoding='utf-8') as f_in:
with open(output_csv_file_addr, 'w', encoding='utf-8') as f_out:
f_out_csv = csv.writer(f_out)
f_in_csv = csv.reader(f_in)
headers = next(f_in_csv)
f_out_csv.writerow(["百分比数/达到该百分比所需的测试用例数"] + list(range(101)))
for row in f_in_csv:
if len(row) > 0 and "覆盖率" in row[0]:
input_line_data = row
output_line_data = [row[0]]
max_coverage = row[-1]
# 反射
percentage = [row[0]]
for i in range(101):
# 计算所需覆盖率
coverage = Decimal(max_coverage) * i / 100
percentage.append(coverage)
ptr_input = 1
ptr_percentage = 1
while ptr_percentage < len(percentage):
if ptr_input < len(input_line_data) and Decimal(input_line_data[ptr_input]) < Decimal(
percentage[ptr_percentage]):
ptr_input = ptr_input + 1
else:
ptr = ptr_input
if ptr >= len(input_line_data):
ptr = len(input_line_data) - 1
output_line_data.append(headers[ptr])
ptr_percentage = ptr_percentage + 1
f_out_csv.writerow(output_line_data)
if __name__ == "__main__":
# input_csv_file_addr = "..\\..\\resource\\FxDealLogParser_1000_all.csv"
# output_csv_file_addr = "..\\..\\resource\\FxDealLogParser_1000_all_reflection.csv"
# run(input_csv_file_addr=input_csv_file_addr,
# output_csv_file_addr=output_csv_file_addr)
#
# input_csv_file_addr = "..\\..\\resource\\FxclDealLogParser_1000_all.csv"
# output_csv_file_addr = "..\\..\\resource\\FxclDealLogParser_1000_all_reflection.csv"
# run(input_csv_file_addr=input_csv_file_addr,
# output_csv_file_addr=output_csv_file_addr)
input_csv_file_addr = "..\\..\\resource\\BcbipType1_1000_all.csv"
output_csv_file_addr = "..\\..\\resource\\BcbipType1_1000_all_reflection.csv"
run(input_csv_file_addr=input_csv_file_addr,
output_csv_file_addr=output_csv_file_addr)
input_csv_file_addr = "..\\..\\resource\\BcbipType2_1000_all.csv"
output_csv_file_addr = "..\\..\\resource\\BcbipType2_1000_all_reflection.csv"
run(input_csv_file_addr=input_csv_file_addr,
output_csv_file_addr=output_csv_file_addr)
input_csv_file_addr = "..\\..\\resource\\BcbipType3_1000_all.csv"
output_csv_file_addr = "..\\..\\resource\\BcbipType3_1000_all_reflection.csv"
run(input_csv_file_addr=input_csv_file_addr,
output_csv_file_addr=output_csv_file_addr)
# input_csv_file_addr = "..\\..\\resource\\BcbipType_all_1000_all.csv"
# output_csv_file_addr = "..\\..\\resource\\BcbipType_all_1000_all_reflection.csv"
# run(input_csv_file_addr=input_csv_file_addr,
# output_csv_file_addr=output_csv_file_addr)
| [
"5255b64@gmail.com"
] | 5255b64@gmail.com |
d72a1163acfa6e897a9e131e9d3523083253254c | 0268f4c895f9f54e93fc7e3d2b0334206a4e6d9e | /day14/03-tk.py | a2ce5018023977ebed3408b81989034151538d9e | [] | no_license | zhangzongyan/python0702 | adebccacf26e300ec7a681bdf0f7ab7bdf228eeb | 7dcb6133d241fdf97b0812b9f25933ab389d2663 | refs/heads/master | 2020-03-22T21:05:51.218502 | 2018-08-15T09:54:42 | 2018-08-15T09:54:42 | 140,656,620 | 8 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,214 | py |
import tkinter as tk
# 按钮触发的方法
def click_button():
print("已点击")
def click_button2():
print("再次点击")
root = tk.Tk()
root.geometry("400x600")
root.title("这是一个测试窗口")
#root.minsize(width=400, height=300)
#root.maxsize(width=400, height=300)
#root.resizable(width=0,height=0) # width 0不可伸缩, 1可伸缩
'''
# 按钮类Button
button = tk.Button(root, text="确定", fg="red", bg = "black", command=click_button)
button["fg"] = "blue"
button["text"] = "退出"
button.config(fg="yellow")
button.pack(side="top", expand=0) # pack布局
button.invoke() # 触发按钮
button.config(command = click_button2)
button2 = tk.Button(root, text="退出")
button2.pack(side="left", expand=0)
'''
# 网格布局
b1 = tk.Button(root, text="1")
b2 = tk.Button(root, text="2")
b3 = tk.Button(root, text="3")
b4 = tk.Button(root, text="4")
b5 = tk.Button(root, text="5")
b1.grid(row = 1, column=1)
b2.grid(row = 1, column=0)
b3.grid(row = 1, column=2)
b4.grid(row = 2, column=0, columnspan=2)
b5.grid(row = 2, column=2)
'''
#place
b1 = tk.Button(root, text="1")
b2 = tk.Button(root, text="2")
b1.place(x=0, y= 0)
b2.place(x=100, y=100)
'''
root.mainloop() # 不结束
| [
"zhangzongyan@uplooking.com"
] | zhangzongyan@uplooking.com |
01676da4dc26233e3f4a9e9dd6096a563d95e76b | d728d6d47080ca37b0a6e8ded1baf63fe1f9e929 | /vision/datasets/VIRAT_DataLoader.py | e51b0f497b18e219a83eae30afecf4f6d01694f9 | [
"MIT"
] | permissive | Jwy-Leo/SSD_Detection | 8500de891ec4fa8b0eb7c791f16ae14d9cfd37a8 | 2d19a36ab6815a8546b3730eb6a6b1810f1df708 | refs/heads/master | 2020-07-06T06:21:57.150154 | 2019-08-17T19:09:50 | 2019-08-17T19:09:50 | 202,921,632 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,735 | py | import torch
import os
import sys
import numpy as np
import pickle
import torch.utils.data as data
import glob2
import logging
import cv2
logging.basicConfig(stream=sys.stdout, level=logging.INFO,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
class VIRAT_Loader(data.Dataset):
def __init__(self,image_path, anno_path, transform=None, target_transform=None, transforms=None):
super(VIRAT_Loader,self).__init__()
# check if img and anno path exist
self._check_path_exists(image_path, anno_path)
# define classes
self._class_names = {u"BackGround":0, u"unknown":1, u"person":2, u"car":3, u"vehicles":4, u"object":5, u"bike,bicycles":6}
self.class_names = ["BackGround","unknown","person","car","vehicles","object","bike,bicycles"]
self.data_size = None
self.img_pickle_list = glob2.glob(image_path+"/*.pickle")
self.anno_pickle_list = glob2.glob(anno_path+"/*.pickle")
self.transform = transform
self.transforms = transforms
self.target_transform = target_transform
self.count = 0
# https://www.twblogs.net/a/5c835297bd9eee35fc13bd96
# https://blog.csdn.net/u012436149/article/details/78545766
# Test one batch size
with open(self.img_pickle_list[0], 'rb') as f:
shuffled_img = pickle.load(f)
self.data_size = shuffled_img.shape[0]
def __getitem__(self, index):
# if self.count == 0:
# self.shuffled_img = self._load_samples()
# self.BBOXes, self.LABELes = self._load_anno()
# else:
# pass
# img = self.shuffled_img[index]
# bboxes_x0y0x1y1, labels = self.BBOXes[index], self.LABELes[index]
# if self.transform:
# img, bboxes_x0y0x1y1, labels = self.transform(self.shuffled_img[index], self.BBOXes[index], self.LABELes[index])
# if self.target_transform:
# bboxes_x0y0x1y1, labels = self.target_transform(self.BBOXes[index], self.LABELes[index])
# img = self.shuffled_img[index]
img = self._load_samples(index)
bboxes_x0y0x1y1, labels = self._load_anno(index)
logging.debug("===== before transform VIRAT img shape : {} ======".format(img.shape))
logging.debug("===== before transform VIRAT bbox shape : {} & type : {} ======".format(bboxes_x0y0x1y1.shape, bboxes_x0y0x1y1.dtype))
logging.debug("===== before transform VIRAT labels shape : {} & type : {} ======".format(labels.shape, labels.dtype))
if self.transform:
img, bboxes_x0y0x1y1, labels = self.transform(img, bboxes_x0y0x1y1, labels)
if self.target_transform:
bboxes_x0y0x1y1, labels = self.target_transform(bboxes_x0y0x1y1, labels)
labels = labels.type('torch.LongTensor')
logging.debug("===== VIRAT img shape : {} ======".format(img.shape))
logging.debug("===== VIRAT bbox shape : {} ======".format(bboxes_x0y0x1y1.shape))
logging.debug("===== VIRAT label shape : {} ======".format(labels.shape))
return img, bboxes_x0y0x1y1, labels
def __len__(self):
# return self.data_size
return self.data_size * len(self.img_pickle_list)
def _check_path_exists(self, image_path, anno_path):
print(image_path)
assert os.path.exists(image_path), 'The folder path : {} is wrong '.format(image_path)
assert os.path.exists(anno_path), 'The gound truth path : {} is wrong '.format(anno_path)
def _load_samples(self, index):
fetch_data_pickle = index // self.data_size
fetch_data_slice = index % self.data_size
if int(sys.version[0]) > 2:
with open(self.img_pickle_list[fetch_data_pickle], 'rb') as f:
shuffled_img = pickle.load(f)
else:
with open(self.img_pickle_list[fetch_data_pickle], 'rb') as f:
raise NotImplementedError("Can't load by python 2")
import pdb;pdb.set_trace()
shuffled_img = pickle.load(f, encoding = 'latin1')
# self.img_pickle_list.append(self.img_pickle_list[0])
# del self.img_pickle_list[0]
# original shape is (N.C,H,W) change to (N,W,H,C)
# shuffled_img = shuffled_img.transpose(0,3,2,1)
# self.data_size = shuffled_img.shape[0]
# self.count = 1
shuffled_img = shuffled_img[fetch_data_slice,...]
shuffled_img = shuffled_img.transpose(1,2,0)
shuffled_img = shuffled_img.astype(np.uint8)
shuffled_img = cv2.cvtColor(shuffled_img, cv2.COLOR_BGR2RGB)
return shuffled_img
def _load_anno(self, index):
fetch_data_pickle = index // self.data_size
fetch_data_slice = index % self.data_size
with open(self.anno_pickle_list[fetch_data_pickle], 'rb') as f:
shuffled_anno = pickle.load(f)
# self.anno_pickle_list.append(self.anno_pickle_list[0])
# del self.anno_pickle_list[0]
# shuffled_anno is a list
# inside the list is a array
# batch_size = len(shuffled_anno)
shuffled_anno = shuffled_anno[fetch_data_slice]
# BBOXes = []
# Labels = []
# for each_b in shuffled_anno:
# bboxes = []
# labels = []
# for sets in each_b:
# x0,y0 = sets[3], sets[4]
# x1,y1 = sets[3]+sets[5], sets[4]+sets[6]
# bboxes.append(np.array([x0,y0,x1,y1]))
# labels.append(sets[7])
# BBOXes.append(np.array(bboxes))
# Labels.append(np.array(labels))
# BBOXes = np.array(BBOXes) # [batchsize, bbox_of_each_frame, x0y0x1y1]
# LABELes = np.array(Labels) # [batchsize, labels_of_each_frame, label_classes]
bboxes = []
labels = []
for sets in shuffled_anno:
x0,y0 = sets[3], sets[4]
x1,y1 = sets[3]+sets[5], sets[4]+sets[6]
bboxes.append(np.array([x0,y0,x1,y1]))
labels.append(sets[7])
BBOXes = np.array(bboxes) # [batchsize, bbox_of_each_frame, x0y0x1y1]
LABELes = np.array(labels) # [batchsize, labels_of_each_frame, label_classes]
logging.debug("========= BBOXes shape:{} =======".format(BBOXes.shape))
logging.debug("========= LABELes shape:{} =======".format(LABELes.shape))
return BBOXes, LABELes
| [
"s0936100879@gmail.com"
] | s0936100879@gmail.com |
f0e927d2314e78e9861ea2b166aa51741f5e0816 | 648796da46791794ee5de7a8004da437c840323e | /pipeline_update/pipe_tools.py | 7c911ec5f4ef1987dbd89864e8381f7f40df112d | [] | no_license | YulianaGomez/ml_pp | 86530a2ee26bb2f39117ec6a458368a5c1c74104 | 3891350e1ef6fbf2fd29a792387182601f94c250 | refs/heads/master | 2020-03-07T19:09:25.958025 | 2018-05-25T22:34:28 | 2018-05-25T22:34:28 | 127,663,580 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,470 | py |
import numpy as np
import pdb
import itertools as it
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.dates as dates
from sklearn.metrics import f1_score
import pandas as pd
import os
import sys
import datetime
import glob
import re
import graphviz
import seaborn as sns
import numpy as np
from scipy.stats import norm
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.metrics import precision_recall_curve
from sklearn.metrics import f1_score
from sklearn.preprocessing import StandardScaler
from scipy import stats
import warnings
warnings.filterwarnings('ignore')
import statsmodels.api as sm
from patsy import dmatrices
from sklearn.metrics import roc_auc_score
from sklearn import tree
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, BaggingClassifier, GradientBoostingClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.neighbors import KNeighborsClassifier
from sklearn.cross_validation import train_test_split
from sklearn import metrics
from sklearn.cross_validation import cross_val_score
import json
"""
Homework 2: ML Pipeline
Looking at data regarding credit distress and trying to predict who will
have credit distress in the next two years. Below is a pipeline of various
ml tools that can be used to analyze, explore, and clean data.
author: Yuliana Zamora
Date: April 17, 2018
"""
# Reading csv data from file - must be in same directory
def load_data(csv_file,nrows=None):
return pd.read_csv(csv_file,nrows=nrows)
#converts a string that is camelCase into snake_case
#https://stackoverflow.com/questions/1175208/elegant-python-function-to-convert-camelcase-to-snake-case
def camel_case(column_name):
s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', column_name)
return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower()
#Give data with specific column
def histogram(data_frame):
sns.distplot(data_frame)
plt.show()
#Given specific column or row, returns statistical summary
def summary(data_frame):
return data_frame.describe()
#Creating a correlation heat map from data set where var_name is the
#variable which has the most correlation
def cor_heat(data_frame,var_name):
corrmat = data_frame.corr()
k = 12
cols = corrmat.nlargest(k, var_name)[var_name].index
cm = np.corrcoef(data_frame[cols].values.T)
sns.set(font_scale=1.25)
hm = sns.heatmap(cm, cbar=True, annot=True, square=True, fmt='.2f', annot_kws={'size': 10}, yticklabels=cols.values, xticklabels=cols.values)
plt.show()
#Scatter plots of desired variables in list
def plotCorr(dataFrame, list):
sns.set()
sns.pairplot(dataFrame[list], size = 2.5)
return plt.show()
#Shows data is missing, we should delete the corresponding variable and pretend it never existed - threshold as parameter
def miss_data(data_frame):
total = data_frame.isnull().sum().sort_values(ascending=False)
percent = (data_frame.isnull().sum()/data_frame.isnull().count()).sort_values(ascending=False)
missing_data = pd.concat([total, percent], axis=1, keys=['Total', 'Percent'])
return missing_data.head(20)
#Dealing with missing data
def clean_miss(data_frame):
missing_data = miss_data(data_frame)
data_frame = data_frame.drop((missing_data[missing_data['Total'] > 1]).index,1)
data_frame.isnull().sum().max() #just checking that there's no missing data missing...
return data_frame
#Univariate analysis - scaling data, prints out low range and high range
def scale(data_frame, var_scale):
data_scaled = StandardScaler().fit_transform(data_frame[var_scale][:,np.newaxis]);
low_range = data_scaled[data_scaled[:,0].argsort()][:10]
high_range= data_scaled[data_scaled[:,0].argsort()][-10:]
print('outer range (low) of the distribution:')
print(low_range)
print('\nouter range (high) of the distribution:')
print(high_range)
#Bivariate analysis
def bivariate(data_frame, var_1,var_2):
varx = var_1
vary = var_2
data = pd.concat([data_frame[varx], data_frame[vary]], axis=1)
data.plot.scatter(x=varx, y=vary, ylim=(0,100));
plt.show()
#histogram and normal probability plot
def norm_plot(data_frame,var_name):
sns.distplot(data_frame[var_name], fit=norm);
fig = plt.figure()
res = stats.probplot((data_frame)[var_name], plot=plt)
plt.show()
#Fill in empty values
def fill_empty(data_frame,var, new_var):
return data_frame[var].fillna(new_var)
#Discretize continuous variables
def descretize(data_frame, var, num):
return pd.cut(data_frame[var],num,retbins=True)
#Creating dummy variables from categorical variables
def dummy_var(data_frame, var):
return pd.get_dummies(data_frame[var])
#Creating dictionary with no repeated column items
def column_dic(data_frame):
dict = {line[:1]:line[1:].split()[0] for line in data_frame}
print (dict)
#Logistic regression = iv, independent variable, var_list - dependent variables
def logReg(data_frame, IV, var_list):
#organizing variable list to independent and dependent variables
#taking care of hyphen if first word contains it
if '-' in var_list[0]:
formula = IV + "~"+'Q("'+var_list[0]+'")'
else:
formula = IV + "~"+var_list[0]
#taking care of the rest of the potential hyphens
for i in range(1, len(var_list)):
if '-' in var_list[i]:
formula = formula + "+"+'Q("'+var_list[i]+'")'
else:
formula = formula + "+"+ var_list[i]
y, X = dmatrices(formula,data_frame, return_type="dataframe")
y = np.ravel(y)
model = LogisticRegression()
model = model.fit(X, y)
print (pd.DataFrame(list(zip(X.columns, np.transpose(model.coef_)))))
return model.score(X,y)
#Nearest Neighbors -
def knearest(data_frame,train, test):
#data_frame = data_frame.reshape(-1,1)
X = data_frame[train].reshape(-1,1)
Y = data_frame[test].reshape(-1,1)
X_train = X[:100]
Y_train = Y[:100]
X_validate = X[100:]
Y_validate = Y[100:]
neighbor = KNeighborsClassifier(n_neighbors = 2, weights ='uniform')
neighbor.fit(X_train, Y_train)
predicted = neighbor.predict(X_validate)
print (predicted)
def merging_data(dataframe_1,dataframe_2):
return pd.merge(dataframe_1,dataframe_2)
def merging_data2(dataframe_1,dataframe_2):
dataframe_1['fully_funded'] = 1
return dataframe_1
def get_combos(param_grid_dict):
all = sorted(param_grid_dict)
all_combos=[]
combinations = it.product(*(param_grid_dict[Name] for Name in all))
for i in combinations:
lil_combo = {}
for iter,key in enumerate(all):
lil_combo[key] = i[iter]
all_combos.append(lil_combo)
return (all_combos)
#change items into binary columns
def to_binary(df,array_col):
for i in array_col:
print(i)
#df[i] = df[i].apply(lambda x: 1 if x == 't' else (0 if x =='f' else np.nan))
df[i] = df[i].apply(lambda x: 1 if x == 't' else 0)
return df
#analyzing results from classifiers
def get_metrics(y_pred, val_Y):
metric_results ={}
#loss = f1_score(y_pred,val_Y)
perf_metrics = [.01,.02,.05,.10,.20,.30,.50]
for i in perf_metrics:
#pdb.set_trace()
metric_results["precision at" + str([i])] = precision_score(val_Y, y_pred[:,0] > 1 - i)
metric_results["recall at" + str([i])] = recall_score(val_Y, y_pred[:,0] > 1 - i)
metric_results["F1 at" + str([i])] = f1_score(val_Y, y_pred[:,0] > 1 - i)
metric_results["ROC"] = roc_auc_score(val_Y, y_pred[:,0])
prec,rec,thresh = precision_recall_curve(val_Y, y_pred[:,0])
metric_results["PREC"] = prec.tolist()
metric_results["REC"] = rec.tolist()
metric_results["THRESH"] = thresh.tolist()
return (metric_results)
#plotting precisison and recal graphs, input one column for y_pred in class_comp method
def plot_precision_recall(val_Y,y_pred,model_name,output_type):
#pdb.set_trace()
prec,rec,thresh = precision_recall_curve(val_Y, y_pred)
prec = prec[:-1]
recall_curve = rec[:-1]
pct_above_per_thresh = []
number_scored = len(y_pred)
for value in thresh:
num_above_thresh = len(y_pred[y_pred>=value])
pct_above_thresh = num_above_thresh / float(len(y_pred))
if pct_above_thresh <= 1:
pct_above_per_thresh.append(pct_above_thresh)
else:
pdb.set_trace()
pct_above_per_thresh = np.array(pct_above_per_thresh)
plt.clf()
fig, ax1 = plt.subplots()
ax1.plot(pct_above_per_thresh, prec, 'b')
print("PLOTTING STUFF")
print(pct_above_per_thresh)
print(prec[:-1])
ax1.set_xlabel('percent of population')
ax1.set_ylabel('precision', color='b')
ax2 = ax1.twinx()
ax2.plot(pct_above_per_thresh, recall_curve, 'r')
ax2.set_ylabel('recall', color='r')
ax1.set_ylim([0,1])
ax1.set_ylim([0,1])
ax2.set_xlim([0,1])
name = model_name
plt.title(name)
#pdb.set_trace()
if (output_type == 'save'):
plt.savefig(name)
elif (output_type == 'show'):
plt.show()
else:
plt.show()
def temp_val(data_frame,target,features):
models_params = {
LogisticRegression: {'C':[10**-1,10**-2,10**-3],'penalty':['l1','l2']},
KNeighborsClassifier:{'n_neighbors':[5,10,25,100], 'p':[1,2,3],'n_jobs':[2]},
DecisionTreeClassifier:{'max_depth': [5,10,15],'min_samples_leaf':[2,5,10]},
RandomForestClassifier:{'n_estimators':[100] , 'criterion':['gini','entropy'], 'max_features':['sqrt','log2'] , 'max_depth':[5,10],'n_jobs':[4], 'min_samples_leaf':[10,50,100]},
GradientBoostingClassifier:{'learning_rate':[.1,.01],'n_estimators':[100] ,'max_features':['sqrt','log2'] , 'max_depth':[1,2,3]},
BaggingClassifier:{'max_samples':[.1,.25,.65], 'n_jobs':[4]},
#SVC:{'kernel':['linear','rbf'],'gamma':[10,1,.1,.01], 'C':[10,1,.1,.01], 'probability':[True]}
}
# start time of our data
#start_time = '2002-09-13'
start_time_date = data_frame['date_posted'].min()
#last date of data including labels and outcomes that we have
#end_time = '2014-05-12'
end_time_date = data_frame['date_posted'].max()
#how far out do we want to predict (let's say in months for now)
prediction_windows = [1]
#how often is this prediction being made? every day? every month? once a year?
update_window = 12
from datetime import date, datetime, timedelta
from dateutil.relativedelta import relativedelta
#start_time_date = datetime.strptime(start_time, '%Y-%m-%d')
#end_time_date = datetime.strptime(end_time, '%Y-%m-%d')
for prediction_window in prediction_windows:
print(start_time_date,end_time_date)
test_end_time = end_time_date
while (test_end_time >= start_time_date + 2 * relativedelta(months=+prediction_window)):
test_start_time = test_end_time - relativedelta(months=+prediction_window)
train_end_time = test_start_time - relativedelta(days=+1) # minus 1 day
train_start_time = train_end_time - relativedelta(months=+prediction_window)
while (train_start_time >= start_time_date ):
#pdb.set_trace()
print (train_start_time,train_end_time,test_start_time,test_end_time, prediction_window)
train_start_time -= relativedelta(months=+prediction_window)
# call function to get data
train_set, test_set = extract_train_test_sets(train_start_time, train_end_time, test_start_time, test_end_time,data_frame)
#pdb.set_trace()
class_comp(train_set,test_set,target,features,models_params)
# fit on train data
# predict on test data
test_end_time -= relativedelta(months=+update_window)
#Splitting the data for training and testing sets
def extract_train_test_sets(train_start_time, train_end_time, test_start_time, test_end_time, df):
train_set = df[(df['date_posted'] > train_start_time) & (df['date_posted']<train_end_time)]
test_set = df[(df['date_posted'] > test_start_time) & (df['date_posted']<test_end_time)]
return train_set, test_set
def class_comp(train_set,test_set,target,features,models_params):
out = open("out.txt","a")
X = train_set[features]
y = train_set[target]
metrics = {}
#validation
val_X = test_set[features]
val_Y = test_set[target]
for m, m_param in models_params.items():
listofparam = get_combos(m_param)
print("start training for {0}".format(m))
out.write("start training for {0}\n".format(m))
for params in listofparam:
print (params)
out.write(json.dumps(params))
model = m(**params)
model.fit(X,y)
#y_pred vector of prob estimates
#val_y are true values
y_pred = model.predict_proba(val_X)
metrics[m] = get_metrics(y_pred,val_Y)
print("this is valy")
print (val_Y)
print("this is y_pred")
print (y_pred)
plot_precision_recall(val_Y, y_pred[:,0],model,'show')
out.write("----------------------------\n")
out.write("Using %s classifier \n" % models_params)
out.write(json.dumps(metrics[m]))
| [
"ygomez297@gmail.com"
] | ygomez297@gmail.com |
7272ed278e983c56a0003cccbd7ac2dbb0ddce94 | 86ca276527868f0eebc98d2f72f0ea8804defb47 | /python/MIS-5400/exam1/talk2.py | b355cd16d7698af04b1efacdf0ae078d0d805146 | [] | no_license | RussellMoore1987/resources | 73cacfc17658ece645316a26084a4a49aca928d2 | 4876813fd7f29701984b25e651189ad363049d43 | refs/heads/master | 2023-08-09T06:49:21.869110 | 2023-07-27T16:02:42 | 2023-07-27T16:02:42 | 151,770,329 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,808 | py | # Write the following code in a .py file and upload it:
# * to run: python python/MIS-5400/exam1/talk.py
# Define a function named "talk" that accepts the following parameters:
# name - required
# age - optional parameter - default value should be 21
# occupation - required parameter
# In the body of the function do the following:
# Make sure the name contains no numeric digits (0-9) - If not raise Exception
# Make sure the age is not a negative value or over 150 - If not raise Exception
# If the first letter in the name is between 'A' and 'M' then return a string result of 'Group 1'
# If the first letter in the name is between 'N' and 'Z' then return a string result of 'Group 2'
# Define a function named "talk" that accepts the following parameters:
# name - required
# age - optional parameter - default value should be 21
# occupation - required parameter
def talk(name, occupation, age=21):
# In the body of the function do the following:
# Make sure the name contains no numeric digits (0-9) - If not raise Exception
if not name.isalpha():
raise Exception('The parameter "name" can have no numeric digits.')
# Make sure the age is not a negative value or over 150 - If not raise Exception
if age < 0 or age > 150:
raise Exception('The parameter "age" must be between the digits of 0 - 150.')
# If the first letter in the name is between 'A' and 'M' then return a string result of 'Group 1'
if name[0].lower() >= 'a' and name[0].lower() <= 'm':
return 'Group 1'
# If the first letter in the name is between 'N' and 'Z' then return a string result of 'Group 2'
if name[0].lower() >= 'n' and name[0].lower() <= 'z':
return 'Group 2'
# Using the talk function defined above, show an example innovation of the function using the following, alternative ways:
# 1) Using all of the default parameters
# 2) Overriding the default age parameter
# 3) Using named-parameters, invoke with the parameters in the following order: age, name, occupation.
# 4) Invoke the function using a try / except statement that "handles" the raised exception and prints out "Try Again..."
# # code starting here ===============================================================================
# 1) Using all of the default parameters
print(talk('Russell', 'Developer'))
# 2) Overriding the default age parameter
print(talk('Sam', 'UX Designer', 33))
# 3) Using named-parameters, invoke with the parameters in the following order: age, name, occupation.
print(talk(age=33, name='Jill', occupation='UI Designer'))
# 4) Invoke the function using a try / except statement that "handles" the raised exception and prints out "Try Again..."
try:
talk('Sam', 'UX Designer', -10)
except Exception as e:
print(e)
print("Try Again...") | [
"truthandgoodness87@gmail.com"
] | truthandgoodness87@gmail.com |
15a05b515ac2a5fa114c23136a7a3cd7a6f74e1d | 6fcfb638fa725b6d21083ec54e3609fc1b287d9e | /python/clips_pattern/pattern-master/pattern/text/de/inflect.py | 69c40f1e7abcdb645f18e27579ac930f4b905f6d | [] | no_license | LiuFang816/SALSTM_py_data | 6db258e51858aeff14af38898fef715b46980ac1 | d494b3041069d377d6a7a9c296a14334f2fa5acc | refs/heads/master | 2022-12-25T06:39:52.222097 | 2019-12-12T08:49:07 | 2019-12-12T08:49:07 | 227,546,525 | 10 | 7 | null | 2022-12-19T02:53:01 | 2019-12-12T07:29:39 | Python | UTF-8 | Python | false | false | 29,115 | py | #### PATTERN | DE | INFLECT ########################################################################
# -*- coding: utf-8 -*-
# Copyright (c) 2012 University of Antwerp, Belgium
# Author: Tom De Smedt <tom@organisms.be>
# License: BSD (see LICENSE.txt for details).
####################################################################################################
# Regular expressions-based rules for German word inflection:
# - pluralization and singularization of nouns and adjectives,
# - conjugation of verbs,
# - attributive and predicative of adjectives,
# - comparative and superlative of adjectives.
# Accuracy (measured on CELEX German morphology word forms):
# 75% for gender()
# 72% for pluralize()
# 84% for singularize() (for nominative)
# 87% for Verbs.find_lemma()
# 87% for Verbs.find_lexeme()
# 98% for predicative
import os
import sys
import re
try:
MODULE = os.path.dirname(os.path.realpath(__file__))
except:
MODULE = ""
sys.path.insert(0, os.path.join(MODULE, "..", "..", "..", ".."))
from pattern.text import Verbs as _Verbs
from pattern.text import (
INFINITIVE, PRESENT, PAST, FUTURE,
FIRST, SECOND, THIRD,
SINGULAR, PLURAL, SG, PL,
INDICATIVE, IMPERATIVE, SUBJUNCTIVE,
PROGRESSIVE,
PARTICIPLE, GERUND
)
sys.path.pop(0)
VERB, NOUN, ADJECTIVE, ADVERB = "VB", "NN", "JJ", "RB"
VOWELS = "aeiouy"
re_vowel = re.compile(r"a|e|i|o|u|y", re.I)
is_vowel = lambda ch: ch in VOWELS
#### ARTICLE #######################################################################################
# German inflection of depends on gender, role and number + the determiner (if any).
# Inflection gender.
# Masculine is the most common, so it is the default for all functions.
MASCULINE, FEMININE, NEUTER, PLURAL = \
MALE, FEMALE, NEUTRAL, PLURAL = \
M, F, N, PL = "m", "f", "n", "p"
# Inflection role.
# - nom = subject, "Der Hund bellt" (the dog barks).
# - acc = object, "Das Mädchen küsst den Hund" (the girl kisses the dog).
# - dat = object (indirect), "Der Mann gibt einen Knochen zum Hund" (the man gives the dog a bone).
# - gen = property, "die Knochen des Hundes" (the dog's bone).
NOMINATIVE, ACCUSATIVE, DATIVE, GENITIVE = SUBJECT, OBJECT, INDIRECT, PROPERTY = \
"nominative", "accusative", "dative", "genitive"
article_definite = {
("m", "nom"): "der", ("f", "nom"): "die", ("n", "nom"): "das", ("p", "nom"): "die",
("m", "acc"): "den", ("f", "acc"): "die", ("n", "acc"): "das", ("p", "acc"): "die",
("m", "dat"): "dem", ("f", "dat"): "der", ("n", "dat"): "dem", ("p", "dat"): "den",
("m", "gen"): "des", ("f", "gen"): "der", ("n", "gen"): "des", ("p", "gen"): "der",
}
article_indefinite = {
("m", "nom"): "ein" , ("f", "nom"): "eine" , ("n", "nom"): "ein" , ("p", "nom"): "eine",
("m", "acc"): "einen", ("f", "acc"): "eine" , ("n", "acc"): "ein" , ("p", "acc"): "eine",
("m", "dat"): "einem", ("f", "dat"): "einer", ("n", "dat"): "einem", ("p", "dat"): "einen",
("m", "gen"): "eines", ("f", "gen"): "einer", ("n", "gen"): "eines", ("p", "gen"): "einer",
}
def definite_article(word, gender=MALE, role=SUBJECT):
""" Returns the definite article (der/die/das/die) for a given word.
"""
return article_definite.get((gender[:1].lower(), role[:3].lower()))
def indefinite_article(word, gender=MALE, role=SUBJECT):
""" Returns the indefinite article (ein) for a given word.
"""
return article_indefinite.get((gender[:1].lower(), role[:3].lower()))
DEFINITE = "definite"
INDEFINITE = "indefinite"
def article(word, function=INDEFINITE, gender=MALE, role=SUBJECT):
""" Returns the indefinite (ein) or definite (der/die/das/die) article for the given word.
"""
return function == DEFINITE \
and definite_article(word, gender, role) \
or indefinite_article(word, gender, role)
_article = article
def referenced(word, article=INDEFINITE, gender=MALE, role=SUBJECT):
""" Returns a string with the article + the word.
"""
return "%s %s" % (_article(word, article, gender, role), word)
#### GENDER #########################################################################################
gender_masculine = (
"ant", "ast", "ich", "ig", "ismus", "ling", "or", "us"
)
gender_feminine = (
"a", "anz", "ei", "enz", "heit", "ie", "ik", "in", "keit", "schaf", "sion", "sis",
u"tät", "tion", "ung", "ur"
)
gender_neuter = (
"chen", "icht", "il", "it", "lein", "ma", "ment", "tel", "tum", "um","al", "an", "ar",
u"ät", "ent", "ett", "ier", "iv", "o", "on", "nis", "sal"
)
gender_majority_vote = {
MASCULINE: (
"ab", "af", "ag", "ak", "am", "an", "ar", "at", "au", "ch", "ck", "eb", "ef", "eg",
"el", "er", "es", "ex", "ff", "go", "hn", "hs", "ib", "if", "ig", "ir", "kt", "lf",
"li", "ll", "lm", "ls", "lt", "mi", "nd", "nk", "nn", "nt", "od", "of", "og", "or",
"pf", "ph", "pp", "ps", "rb", "rd", "rf", "rg", "ri", "rl", "rm", "rr", "rs", "rt",
"rz", "ss", "st", "tz", "ub", "uf", "ug", "uh", "un", "us", "ut", "xt", "zt"
),
FEMININE: (
"be", "ce", "da", "de", "dt", "ee", "ei", "et", "eu", "fe", "ft", "ge", "he", "hr",
"ht", "ia", "ie", "ik", "in", "it", "iz", "ka", "ke", "la", "le", "me", "na", "ne",
"ng", "nz", "on", "pe", "ra", "re", "se", "ta", "te", "ue", "ur", "ve", "ze"
),
NEUTER: (
"ad", "al", "as", "do", "ed", "eh", "em", "en", "hl", "id", "il", "im", "io", "is",
"iv", "ix", "ld", "lk", "lo", "lz", "ma", "md", "mm", "mt", "no", "ns", "ol", "om",
"op", "os", "ot", "pt", "rk", "rn", "ro", "to", "tt", "ul", "um", "uz"
)
}
def gender(word, pos=NOUN):
""" Returns the gender (MALE, FEMALE or NEUTRAL) for nouns (majority vote).
Returns None for words that are not nouns.
"""
w = word.lower()
if pos == NOUN:
# Default rules (baseline = 32%).
if w.endswith(gender_masculine):
return MASCULINE
if w.endswith(gender_feminine):
return FEMININE
if w.endswith(gender_neuter):
return NEUTER
# Majority vote.
for g in gender_majority_vote:
if w.endswith(gender_majority_vote[g]):
return g
#### PLURALIZE ######################################################################################
plural_inflections = [
("aal", u"äle" ), ("aat", "aaten"), ( "abe", "aben" ), ("ach", u"ächer"), ("ade", "aden" ),
("age", "agen" ), ("ahn", "ahnen"), ( "ahr", "ahre" ), ("akt", "akte" ), ("ale", "alen" ),
("ame", "amen" ), ("amt", u"ämter"), ( "ane", "anen" ), ("ang", u"änge" ), ("ank", u"änke" ),
("ann", u"änner" ), ("ant", "anten"), ( "aph", "aphen"), ("are", "aren" ), ("arn", "arne" ),
("ase", "asen" ), ("ate", "aten" ), ( "att", u"ätter"), ("atz", u"ätze" ), ("aum", "äume" ),
("aus", u"äuser" ), ("bad", u"bäder"), ( "bel", "bel" ), ("ben", "ben" ), ("ber", "ber" ),
("bot", "bote" ), ("che", "chen" ), ( "chs", "chse" ), ("cke", "cken" ), ("del", "del" ),
("den", "den" ), ("der", "der" ), ( "ebe", "ebe" ), ("ede", "eden" ), ("ehl", "ehle" ),
("ehr", "ehr" ), ("eil", "eile" ), ( "eim", "eime" ), ("eis", "eise" ), ("eit", "eit" ),
("ekt", "ekte" ), ("eld", "elder"), ( "ell", "elle" ), ("ene", "enen" ), ("enz", "enzen" ),
("erd", "erde" ), ("ere", "eren" ), ( "erk", "erke" ), ("ern", "erne" ), ("ert", "erte" ),
("ese", "esen" ), ("ess", "esse" ), ( "est", "este" ), ("etz", "etze" ), ("eug", "euge" ),
("eur", "eure" ), ("fel", "fel" ), ( "fen", "fen" ), ("fer", "fer" ), ("ffe", "ffen" ),
("gel", "gel" ), ("gen", "gen" ), ( "ger", "ger" ), ("gie", "gie" ), ("hen", "hen" ),
("her", "her" ), ("hie", "hien" ), ( "hle", "hlen" ), ("hme", "hmen" ), ("hne", "hnen" ),
("hof", u"höfe" ), ("hre", "hren" ), ( "hrt", "hrten"), ("hse", "hsen" ), ("hte", "hten" ),
("ich", "iche" ), ("ick", "icke" ), ( "ide", "iden" ), ("ieb", "iebe" ), ("ief", "iefe" ),
("ieg", "iege" ), ("iel", "iele" ), ( "ien", "ium" ), ("iet", "iete" ), ("ife", "ifen" ),
("iff", "iffe" ), ("ift", "iften"), ( "ige", "igen" ), ("ika", "ikum" ), ("ild", "ilder" ),
("ilm", "ilme" ), ("ine", "inen" ), ( "ing", "inge" ), ("ion", "ionen"), ("ise", "isen" ),
("iss", "isse" ), ("ist", "isten"), ( "ite", "iten" ), ("itt", "itte" ), ("itz", "itze" ),
("ium", "ium" ), ("kel", "kel" ), ( "ken", "ken" ), ("ker", "ker" ), ("lag", u"läge" ),
("lan", u"läne" ), ("lar", "lare" ), ( "lei", "leien"), ("len", "len" ), ("ler", "ler" ),
("lge", "lgen" ), ("lie", "lien" ), ( "lle", "llen" ), ("mel", "mel" ), ("mer", "mer" ),
("mme", "mmen" ), ("mpe", "mpen" ), ( "mpf", "mpfe" ), ("mus", "mus" ), ("mut", "mut" ),
("nat", "nate" ), ("nde", "nden" ), ( "nen", "nen" ), ("ner", "ner" ), ("nge", "ngen" ),
("nie", "nien" ), ("nis", "nisse"), ( "nke", "nken" ), ("nkt", "nkte" ), ("nne", "nnen" ),
("nst", "nste" ), ("nte", "nten" ), ( "nze", "nzen" ), ("ock", u"öcke" ), ("ode", "oden" ),
("off", "offe" ), ("oge", "ogen" ), ( "ohn", u"öhne" ), ("ohr", "ohre" ), ("olz", u"ölzer" ),
("one", "onen" ), ("oot", "oote" ), ( "opf", u"öpfe" ), ("ord", "orde" ), ("orm", "ormen" ),
("orn", u"örner" ), ("ose", "osen" ), ( "ote", "oten" ), ("pel", "pel" ), ("pen", "pen" ),
("per", "per" ), ("pie", "pien" ), ( "ppe", "ppen" ), ("rag", u"räge" ), ("rau", u"raün" ),
("rbe", "rben" ), ("rde", "rden" ), ( "rei", "reien"), ("rer", "rer" ), ("rie", "rien" ),
("rin", "rinnen"), ("rke", "rken" ), ( "rot", "rote" ), ("rre", "rren" ), ("rte", "rten" ),
("ruf", "rufe" ), ("rzt", "rzte" ), ( "sel", "sel" ), ("sen", "sen" ), ("ser", "ser" ),
("sie", "sien" ), ("sik", "sik" ), ( "sse", "ssen" ), ("ste", "sten" ), ("tag", "tage" ),
("tel", "tel" ), ("ten", "ten" ), ( "ter", "ter" ), ("tie", "tien" ), ("tin", "tinnen"),
("tiv", "tive" ), ("tor", "toren"), ( "tte", "tten" ), ("tum", "tum" ), ("tur", "turen" ),
("tze", "tzen" ), ("ube", "uben" ), ( "ude", "uden" ), ("ufe", "ufen" ), ("uge", "ugen" ),
("uhr", "uhren" ), ("ule", "ulen" ), ( "ume", "umen" ), ("ung", "ungen"), ("use", "usen" ),
("uss", u"üsse" ), ("ute", "uten" ), ( "utz", "utz" ), ("ver", "ver" ), ("weg", "wege" ),
("zer", "zer" ), ("zug", u"züge" ), (u"ück", u"ücke" )
]
def pluralize(word, pos=NOUN, gender=MALE, role=SUBJECT, custom={}):
""" Returns the plural of a given word.
The inflection is based on probability rather than gender and role.
"""
w = word.lower().capitalize()
if word in custom:
return custom[word]
if pos == NOUN:
for a, b in plural_inflections:
if w.endswith(a):
return w[:-len(a)] + b
# Default rules (baseline = 69%).
if w.startswith("ge"):
return w
if w.endswith("gie"):
return w
if w.endswith("e"):
return w + "n"
if w.endswith("ien"):
return w[:-2] + "um"
if w.endswith(("au", "ein", "eit", "er", "en", "el", "chen", "mus", u"tät", "tik", "tum", "u")):
return w
if w.endswith(("ant", "ei", "enz", "ion", "ist", "or", "schaft", "tur", "ung")):
return w + "en"
if w.endswith("in"):
return w + "nen"
if w.endswith("nis"):
return w + "se"
if w.endswith(("eld", "ild", "ind")):
return w + "er"
if w.endswith("o"):
return w + "s"
if w.endswith("a"):
return w[:-1] + "en"
# Inflect common umlaut vowels: Kopf => Köpfe.
if w.endswith(("all", "and", "ang", "ank", "atz", "auf", "ock", "opf", "uch", "uss")):
umlaut = w[-3]
umlaut = umlaut.replace("a", u"ä")
umlaut = umlaut.replace("o", u"ö")
umlaut = umlaut.replace("u", u"ü")
return w[:-3] + umlaut + w[-2:] + "e"
for a, b in (
("ag", u"äge"),
("ann", u"änner"),
("aum", u"äume"),
("aus", u"äuser"),
("zug", u"züge")):
if w.endswith(a):
return w[:-len(a)] + b
return w + "e"
return w
#### SINGULARIZE ###################################################################################
singular_inflections = [
( "innen", "in" ), (u"täten", u"tät"), ( "ahnen", "ahn"), ( "enten", "ent"), (u"räser", "ras"),
( "hrten", "hrt"), (u"ücher", "uch"), (u"örner", "orn"), (u"änder", "and"), (u"ürmer", "urm"),
( "ahlen", "ahl"), ( "uhren", "uhr"), (u"ätter", "att"), ( "suren", "sur"), ( "chten", "cht"),
( "kuren", "kur"), ( "erzen", "erz"), (u"güter", "gut"), ( "soren", "sor"), (u"änner", "ann"),
(u"äuser", "aus"), ( "taten", "tat"), ( "isten", "ist"), (u"bäder", "bad"), (u"ämter", "amt"),
( "eiten", "eit"), ( "raten", "rat"), ( "ormen", "orm"), ( "ionen", "ion"), ( "nisse", "nis"),
(u"ölzer", "olz"), ( "ungen", "ung"), (u"läser", "las"), (u"ächer", "ach"), ( "urten", "urt"),
( "enzen", "enz"), ( "aaten", "aat"), ( "aphen", "aph"), (u"öcher", "och"), (u"türen", u"tür"),
( "sonen", "son"), (u"ühren", u"ühr"), (u"ühner", "uhn"), ( "toren", "tor"), (u"örter", "ort"),
( "anten", "ant"), (u"räder", "rad"), ( "turen", "tur"), (u"äuler", "aul"), ( u"änze", "anz"),
( "tten", "tte"), ( "mben", "mbe"), ( u"ädte", "adt"), ( "llen", "lle"), ( "ysen", "yse"),
( "rben", "rbe"), ( "hsen", "hse"), ( u"raün", "rau"), ( "rven", "rve"), ( "rken", "rke"),
( u"ünge", "ung"), ( u"üten", u"üte"), ( "usen", "use"), ( "tien", "tie"), ( u"läne", "lan"),
( "iben", "ibe"), ( "ifen", "ife"), ( "ssen", "sse"), ( "gien", "gie"), ( "eten", "ete"),
( "rden", "rde"), ( u"öhne", "ohn"), ( u"ärte", "art"), ( "ncen", "nce"), ( u"ünde", "und"),
( "uben", "ube"), ( "lben", "lbe"), ( u"üsse", "uss"), ( "agen", "age"), ( u"räge", "rag"),
( "ogen", "oge"), ( "anen", "ane"), ( "sken", "ske"), ( "eden", "ede"), ( u"össe", "oss"),
( u"ürme", "urm"), ( "ggen", "gge"), ( u"üren", u"üre"), ( "nten", "nte"), ( u"ühle", u"ühl"),
( u"änge", "ang"), ( "mmen", "mme"), ( "igen", "ige"), ( "nken", "nke"), ( u"äcke", "ack"),
( "oden", "ode"), ( "oben", "obe"), ( u"ähne", "ahn"), ( u"änke", "ank"), ( "inen", "ine"),
( "seen", "see"), ( u"äfte", "aft"), ( "ulen", "ule"), ( u"äste", "ast"), ( "hren", "hre"),
( u"öcke", "ock"), ( "aben", "abe"), ( u"öpfe", "opf"), ( "ugen", "uge"), ( "lien", "lie"),
( u"ände", "and"), ( u"ücke", u"ück"), ( "asen", "ase"), ( "aden", "ade"), ( "dien", "die"),
( "aren", "are"), ( "tzen", "tze"), ( u"züge", "zug"), ( u"üfte", "uft"), ( "hien", "hie"),
( "nden", "nde"), ( u"älle", "all"), ( "hmen", "hme"), ( "ffen", "ffe"), ( "rmen", "rma"),
( "olen", "ole"), ( "sten", "ste"), ( "amen", "ame"), ( u"höfe", "hof"), ( u"üste", "ust"),
( "hnen", "hne"), ( u"ähte", "aht"), ( "umen", "ume"), ( "nnen", "nne"), ( "alen", "ale"),
( "mpen", "mpe"), ( "mien", "mie"), ( "rten", "rte"), ( "rien", "rie"), ( u"äute", "aut"),
( "uden", "ude"), ( "lgen", "lge"), ( "ngen", "nge"), ( "iden", "ide"), ( u"ässe", "ass"),
( "osen", "ose"), ( "lken", "lke"), ( "eren", "ere"), ( u"üche", "uch"), ( u"lüge", "lug"),
( "hlen", "hle"), ( "isen", "ise"), ( u"ären", u"äre"), ( u"töne", "ton"), ( "onen", "one"),
( "rnen", "rne"), ( u"üsen", u"üse"), ( u"haün", "hau"), ( "pien", "pie"), ( "ihen", "ihe"),
( u"ürfe", "urf"), ( "esen", "ese"), ( u"ätze", "atz"), ( "sien", "sie"), ( u"läge", "lag"),
( "iven", "ive"), ( u"ämme", "amm"), ( u"äufe", "auf"), ( "ppen", "ppe"), ( "enen", "ene"),
( "lfen", "lfe"), ( u"äume", "aum"), ( "nien", "nie"), ( "unen", "une"), ( "cken", "cke"),
( "oten", "ote"), ( "mie", "mie"), ( "rie", "rie"), ( "sis", "sen"), ( "rin", "rin"),
( "ein", "ein"), ( "age", "age"), ( "ern", "ern"), ( "ber", "ber"), ( "ion", "ion"),
( "inn", "inn"), ( "ben", "ben"), ( u"äse", u"äse"), ( "eis", "eis"), ( "hme", "hme"),
( "iss", "iss"), ( "hen", "hen"), ( "fer", "fer"), ( "gie", "gie"), ( "fen", "fen"),
( "her", "her"), ( "ker", "ker"), ( "nie", "nie"), ( "mer", "mer"), ( "ler", "ler"),
( "men", "men"), ( "ass", "ass"), ( "ner", "ner"), ( "per", "per"), ( "rer", "rer"),
( "mus", "mus"), ( "abe", "abe"), ( "ter", "ter"), ( "ser", "ser"), ( u"äle", "aal"),
( "hie", "hie"), ( "ger", "ger"), ( "tus", "tus"), ( "gen", "gen"), ( "ier", "ier"),
( "ver", "ver"), ( "zer", "zer"),
]
singular = {
u"Löwen": u"Löwe",
}
def singularize(word, pos=NOUN, gender=MALE, role=SUBJECT, custom={}):
""" Returns the singular of a given word.
The inflection is based on probability rather than gender and role.
"""
w = word.lower().capitalize()
if word in custom:
return custom[word]
if word in singular:
return singular[word]
if pos == NOUN:
for a, b in singular_inflections:
if w.endswith(a):
return w[:-len(a)] + b
# Default rule: strip known plural suffixes (baseline = 51%).
for suffix in ("nen", "en", "n", "e", "er", "s"):
if w.endswith(suffix):
w = w[:-len(suffix)]
break
# Corrections (these add about 1% accuracy):
if w.endswith(("rr", "rv", "nz")):
return w + "e"
return w
return w
#### VERB CONJUGATION ##############################################################################
# The verb table was trained on CELEX and contains the top 2000 most frequent verbs.
prefix_inseparable = (
"be", "emp", "ent", "er", "ge", "miss", u"über", "unter", "ver", "voll", "wider", "zer"
)
prefix_separable = (
"ab", "an", "auf", "aus", "bei", "durch", "ein", "fort", "mit", "nach", "vor", "weg",
u"zurück", "zusammen", "zu", "dabei", "daran", "da", "empor", "entgegen", "entlang",
"fehl", "fest", u"gegenüber", "gleich", "herab", "heran", "herauf", "heraus", "herum",
"her", "hinweg", "hinzu", "hin", "los", "nieder", "statt", "umher", "um", "weg",
"weiter", "wieder", "zwischen"
) + ( # There are many more...
"dort", "fertig", "frei", "gut", "heim", "hoch", "klein", "klar", "nahe", "offen", "richtig"
)
prefixes = prefix_inseparable + prefix_separable
def encode_sz(s):
return s.replace(u"ß", "ss")
def decode_sz(s):
return s.replace("ss", u"ß")
class Verbs(_Verbs):
def __init__(self):
_Verbs.__init__(self, os.path.join(MODULE, "de-verbs.txt"),
language = "de",
format = [0, 1, 2, 3, 4, 5, 8, 17, 18, 19, 20, 21, 24, 52, 54, 53, 55, 56, 58, 59, 67, 68, 70, 71],
default = {6: 4, 22: 20, 57: 55, 60: 58, 69: 67, 72: 70}
)
def find_lemma(self, verb):
""" Returns the base form of the given inflected verb, using a rule-based approach.
"""
v = verb.lower()
# Common prefixes: be-finden and emp-finden probably inflect like finden.
if not (v.startswith("ge") and v.endswith("t")): # Probably gerund.
for prefix in prefixes:
if v.startswith(prefix) and v[len(prefix):] in self.inflections:
return prefix + self.inflections[v[len(prefix):]]
# Common sufixes: setze nieder => niedersetzen.
b, suffix = " " in v and v.split()[:2] or (v, "")
# Infinitive -ln: trommeln.
if b.endswith(("ln", "rn")):
return b
# Lemmatize regular inflections.
for x in ("test", "est", "end", "ten", "tet", "en", "et", "te", "st", "e", "t"):
if b.endswith(x): b = b[:-len(x)]; break
# Subjunctive: hielte => halten, schnitte => schneiden.
for x, y in (
("ieb", "eib"), ( "ied", "eid"), ( "ief", "auf" ), ( "ieg", "eig" ), ("iel", "alt"),
("ien", "ein"), ("iess", "ass"), (u"ieß", u"aß" ), ( "iff", "eif" ), ("iss", "eiss"),
(u"iß", u"eiß"), ( "it", "eid"), ( "oss", "iess"), (u"öss", "iess")):
if b.endswith(x): b = b[:-len(x)] + y; break
b = b.replace("eeiss", "eiss")
b = b.replace("eeid", "eit")
# Subjunctive: wechselte => wechseln
if not b.endswith(("e", "l")) and not (b.endswith("er") and len(b) >= 3 and not b[-3] in VOWELS):
b = b + "e"
# abknallst != abknalln => abknallen
if b.endswith(("hl", "ll", "ul", "eil")):
b = b + "e"
# Strip ge- from (likely) gerund:
if b.startswith("ge") and v.endswith("t"):
b = b[2:]
# Corrections (these add about 1.5% accuracy):
if b.endswith(("lnde", "rnde")):
b = b[:-3]
if b.endswith(("ae", "al", u"öe", u"üe")):
b = b.rstrip("e") + "te"
if b.endswith(u"äl"):
b = b + "e"
return suffix + b + "n"
def find_lexeme(self, verb):
""" For a regular verb (base form), returns the forms using a rule-based approach.
"""
v = verb.lower()
# Stem = infinitive minus -en, -ln, -rn.
b = b0 = re.sub("en$", "", re.sub("ln$", "l", re.sub("rn$", "r", v)))
# Split common prefixes.
x, x1, x2 = "", "", ""
for prefix in prefix_separable:
if v.startswith(prefix):
b, x = b[len(prefix):], prefix
x1 = (" " + x).rstrip()
x2 = x + "ge"
break
# Present tense 1sg and subjunctive -el: handeln => ich handle, du handlest.
pl = b.endswith("el") and b[:-2]+"l" or b
# Present tense 1pl -el: handeln => wir handeln
pw = v.endswith(("ln", "rn")) and v or b+"en"
# Present tense ending in -d or -t gets -e:
pr = b.endswith(("d", "t")) and b+"e" or b
# Present tense 2sg gets -st, unless stem ends with -s or -z.
p2 = pr.endswith(("s","z")) and pr+"t" or pr+"st"
# Present participle: spiel + -end, arbeiten + -d:
pp = v.endswith(("en", "ln", "rn")) and v+"d" or v+"end"
# Past tense regular:
pt = encode_sz(pr) + "t"
# Past participle: haushalten => hausgehalten
ge = (v.startswith(prefix_inseparable) or b.endswith(("r","t"))) and pt or "ge"+pt
ge = x and x+"ge"+pt or ge
# Present subjunctive: stem + -e, -est, -en, -et:
s1 = encode_sz(pl)
# Past subjunctive: past (usually with Umlaut) + -e, -est, -en, -et:
s2 = encode_sz(pt)
# Construct the lexeme:
lexeme = a = [
v,
pl+"e"+x1, p2+x1, pr+"t"+x1, pw+x1, pr+"t"+x1, pp, # present
pt+"e"+x1, pt+"est"+x1, pt+"e"+x1, pt+"en"+x1, pt+"et"+x1, ge, # past
b+"e"+x1, pr+"t"+x1, x+pw, # imperative
s1+"e"+x1, s1+"est"+x1, s1+"en"+x1, s1+"et"+x1, # subjunctive I
s2+"e"+x1, s2+"est"+x1, s2+"en"+x1, s2+"et"+x1 # subjunctive II
]
# Encode Eszett (ß) and attempt to retrieve from the lexicon.
# Decode Eszett for present and imperative.
if encode_sz(v) in self:
a = self[encode_sz(v)]
a = [decode_sz(v) for v in a[:7]] + a[7:13] + [decode_sz(v) for v in a[13:20]] + a[20:]
# Since the lexicon does not contain imperative for all verbs, don't simply return it.
# Instead, update the rule-based lexeme with inflections from the lexicon.
return [a[i] or lexeme[i] for i in range(len(a))]
def tenses(self, verb, parse=True):
""" Returns a list of possible tenses for the given inflected verb.
"""
tenses = _Verbs.tenses(self, verb, parse)
if len(tenses) == 0:
# auswirkte => wirkte aus
for prefix in prefix_separable:
if verb.startswith(prefix):
tenses = _Verbs.tenses(self, verb[len(prefix):] + " " + prefix, parse)
break
return tenses
verbs = Verbs()
conjugate, lemma, lexeme, tenses = \
verbs.conjugate, verbs.lemma, verbs.lexeme, verbs.tenses
#### ATTRIBUTIVE & PREDICATIVE #####################################################################
# Strong inflection: no article.
adjectives_strong = {
("m", "nom"): "er", ("f", "nom"): "e" , ("n", "nom"): "es", ("p", "nom"): "e",
("m", "acc"): "en", ("f", "acc"): "e" , ("n", "acc"): "es", ("p", "acc"): "e",
("m", "dat"): "em", ("f", "dat"): "er", ("n", "dat"): "em", ("p", "dat"): "en",
("m", "gen"): "en", ("f", "gen"): "er", ("n", "gen"): "en", ("p", "gen"): "er",
}
# Mixed inflection: after indefinite article ein & kein and possessive determiners.
adjectives_mixed = {
("m", "nom"): "er", ("f", "nom"): "e" , ("n", "nom"): "es", ("p", "nom"): "en",
("m", "acc"): "en", ("f", "acc"): "e" , ("n", "acc"): "es", ("p", "acc"): "en",
("m", "dat"): "en", ("f", "dat"): "en", ("n", "dat"): "en", ("p", "dat"): "en",
("m", "gen"): "en", ("f", "gen"): "en", ("n", "gen"): "en", ("p", "gen"): "en",
}
# Weak inflection: after definite article.
adjectives_weak = {
("m", "nom"): "e", ("f", "nom"): "e" , ("n", "nom"): "e", ("p", "nom"): "en",
("m", "acc"): "en", ("f", "acc"): "e" , ("n", "acc"): "e", ("p", "acc"): "en",
("m", "dat"): "en", ("f", "dat"): "en", ("n", "dat"): "en", ("p", "dat"): "en",
("m", "gen"): "en", ("f", "gen"): "en", ("n", "gen"): "en", ("p", "gen"): "en",
}
# Uninflected + exceptions.
adjective_attributive = {
"etwas" : "etwas",
"genug" : "genug",
"viel" : "viel",
"wenig" : "wenig"
}
def attributive(adjective, gender=MALE, role=SUBJECT, article=None):
""" For a predicative adjective, returns the attributive form (lowercase).
In German, the attributive is formed with -e, -em, -en, -er or -es,
depending on gender (masculine, feminine, neuter or plural) and role
(nominative, accusative, dative, genitive).
"""
w, g, c, a = \
adjective.lower(), gender[:1].lower(), role[:3].lower(), article and article.lower() or None
if w in adjective_attributive:
return adjective_attributive[w]
if a is None \
or a in ("mir", "dir", "ihm") \
or a in ("ein", "etwas", "mehr") \
or a.startswith(("all", "mehrer", "wenig", "viel")):
return w + adjectives_strong.get((g, c), "")
if a.startswith(("ein", "kein")) \
or a.startswith(("mein", "dein", "sein", "ihr", "Ihr", "unser", "euer")):
return w + adjectives_mixed.get((g, c), "")
if a in ("arm", "alt", "all", "der", "die", "das", "den", "dem", "des") \
or a.startswith((
"derselb", "derjenig", "jed", "jeglich", "jen", "manch",
"dies", "solch", "welch")):
return w + adjectives_weak.get((g, c), "")
# Default to strong inflection.
return w + adjectives_strong.get((g, c), "")
def predicative(adjective):
""" Returns the predicative adjective (lowercase).
In German, the attributive form preceding a noun is always used:
"ein kleiner Junge" => strong, masculine, nominative,
"eine schöne Frau" => mixed, feminine, nominative,
"der kleine Prinz" => weak, masculine, nominative, etc.
The predicative is useful for lemmatization.
"""
w = adjective.lower()
if len(w) > 3:
for suffix in ("em", "en", "er", "es", "e"):
if w.endswith(suffix):
b = w[:max(-len(suffix), -(len(w)-3))]
if b.endswith("bl"): # plausibles => plausibel
b = b[:-1] + "el"
if b.endswith("pr"): # propres => proper
b = b[:-1] + "er"
return b
return w
#### COMPARATIVE & SUPERLATIVE #####################################################################
COMPARATIVE = "er"
SUPERLATIVE = "st"
def grade(adjective, suffix=COMPARATIVE):
""" Returns the comparative or superlative form of the given (inflected) adjective.
"""
b = predicative(adjective)
# groß => großt, schön => schönst
if suffix == SUPERLATIVE and b.endswith(("s", u"ß")):
suffix = suffix[1:]
# große => großere, schönes => schöneres
return adjective[:len(b)] + suffix + adjective[len(b):]
def comparative(adjective):
return grade(adjective, COMPARATIVE)
def superlative(adjective):
return grade(adjective, SUPERLATIVE)
#print(comparative(u"schönes"))
#print(superlative(u"schönes"))
#print(superlative(u"große"))
| [
"659338505@qq.com"
] | 659338505@qq.com |
93f89e660325eefbc7ce9cfde9dc271e5890cce4 | 878b78e4b30b1c8e1ece2441669145e131622248 | /gslb/api/v1/__init__.py | 1172bb83b3423acdba17807840ba715be7911e61 | [
"Apache-2.0"
] | permissive | khgandhi/gslb | 1665bfd13d4c0bb7c01f023f6aa4ac51810113e9 | a4e33452fa809d7699a6d6377f873fc4e73e2a32 | refs/heads/master | 2021-01-23T21:27:47.180638 | 2015-08-25T03:16:25 | 2015-08-25T03:16:25 | 40,106,674 | 0 | 0 | null | 2015-08-03T05:31:34 | 2015-08-03T05:31:34 | null | UTF-8 | Python | false | false | 84 | py | __author__ = 'kugandhi'
"""
This module implements the v1 API controllers.
"""
| [
"kugandhi@ebay.com"
] | kugandhi@ebay.com |
ea6625719c5ac7b9e768344b7ea249f8faaf371b | 8df1c9bc047eee03e465df9872261e6984f3e175 | /test/language-features/assignment-simple.py | e1baec4a4acc4e004349252b2863ad3065896cea | [] | no_license | jstroem/TAPY | fbd82c2b3edfcd41ac9aaf2c14343b29fe70bc55 | 0989cb34c309d33f81bca5aa768009a681659b6a | refs/heads/master | 2021-01-23T03:53:12.018466 | 2013-07-01T14:10:37 | 2013-07-01T14:10:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13 | py | a = b = c = 0 | [
"christofferqa@gmail.com"
] | christofferqa@gmail.com |
c0c355f5a044a1f037795a602872e746d6fa3b11 | 1259bd44a79ada24a6ebc293eccac08e7aa255af | /app.py | 42f0a0d62ad452049ffa602fbea0821076682fcb | [] | no_license | surya739/app | a8558d01423b31f4ff172ae1e8bfc26877b9ef39 | 4600412d5300ff129f38d8f9791eaddf5bded8b0 | refs/heads/master | 2023-01-30T16:16:17.348418 | 2020-12-10T11:02:34 | 2020-12-10T11:02:34 | 320,240,443 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,994 | py | from flask import Flask, render_template, redirect, url_for, session
from flask_mysqldb import MySQL
from flask import json
from flask import request
import MySQLdb.cursors
import re
app = Flask(__name__)
app.secret_key = 'your secret key'
app.config['MYSQL_HOST'] = '127.0.0.1'
app.config['MYSQL_USER'] = 'root'
app.config['MYSQL_PASSWORD'] = ''
app.config['MYSQL_DB'] = 'login'
mysql = MySQL(app)
@app.route('/')
@app.route('/login', methods =['GET', 'POST'])
def login():
msg = ''
if request.method == 'POST' and 'username' in request.form and 'password' in request.form:
username = request.form['username']
password = request.form['password']
cursor = mysql.connection.cursor(MySQLdb.cursors.DictCursor)
cursor.execute('SELECT * FROM accounts WHERE username = % s AND password = % s', (username, password, ))
account = cursor.fetchone()
if account:
session['loggedin'] = True
session['id'] = account['id']
session['username'] = account['username']
msg = 'Logged in successfully !'
return render_template('index.html', msg = msg)
else:
msg = 'Incorrect username / password !'
return render_template('login.html', msg = msg)
@app.route('/logout')
def logout():
session.pop('loggedin', None)
session.pop('id', None)
session.pop('username', None)
return redirect(url_for('login'))
@app.route('/register', methods =['GET', 'POST'])
def register():
msg = ''
if request.method == 'POST' and 'username' in request.form and 'password' in request.form and 'email' in request.form :
username = request.form['username']
password = request.form['password']
email = request.form['email']
cursor = mysql.connection.cursor(MySQLdb.cursors.DictCursor)
cursor.execute('SELECT * FROM accounts WHERE username = % s', (username, ))
account = cursor.fetchone()
if account:
msg = 'Account already exists !'
elif not re.match(r'[^@]+@[^@]+\.[^@]+', email):
msg = 'Invalid email address !'
elif not re.match(r'[A-Za-z0-9]+', username):
msg = 'Username must contain only characters and numbers !'
elif not username or not password or not email:
msg = 'Please fill out the form !'
else:
cursor.execute('INSERT INTO accounts VALUES (NULL, % s, % s, % s)', (username, password, email, ))
mysql.connection.commit()
msg = 'You have successfully registered !'
elif request.method == 'POST':
msg = 'Please fill out the form !'
return render_template('register.html', msg = msg)
@app.route('/github', methods =['post'])
def github():
if request.headers['Content-type'] == 'app/json':
my_info = json.dumps(request.json)
if __name__ == "__main__":
app.run(debug = True)
| [
"70879421+surya739@users.noreply.github.com"
] | 70879421+surya739@users.noreply.github.com |
f3c10a8701813ee291c2d173a9b909c955eb29cc | 71046fbb9db7ee7948832dd3c7a70eec3dcabfa3 | /app.py | 13cd777f29e3fb5a653a8602f7063b5143f65cf9 | [] | no_license | vvs1999/Secure-data-encryption-and-decryption-using-Crypto-and-Stego | 317de517de773f060b5d62f3a87bf55a436fc2d6 | d77c75271a380c46f0c0637bca67d2ce86a3567c | refs/heads/main | 2023-02-12T18:52:54.889055 | 2021-01-08T06:21:18 | 2021-01-08T06:21:18 | 327,815,765 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,550 | py | from flask import Flask, render_template, request
app = Flask(__name__)
@app.route('/')
def home():
return render_template('home.html')
@app.route('/Encryption')
def encrypt():
return render_template('Encryption.html')
@app.route('/Decryption')
def decrypt():
return render_template('Decryption.html')
@app.route('/Encryption', methods=['POST'])
def getdata_enc():
import os
source_name = request.form['source_name'] #INPUT MAIN IMAGE
p = int(request.form['prime_1'])
q = int(request.form['prime_2'])
cover_name = request.form['cover_name'] #ANOTHER DUPLICATE INPUT
new_img_name = request.form['new_name'] #INPUT+DUPLIACET=NEW
import base_enc
base_enc.base_enc(source_name)
import rsa_enc
rsa_enc.call_rsa('s.txt', p, q)
import stego_enc
cover_name = os.path.dirname(os.path.abspath(__file__))+'/static/coverimages/'+cover_name
stego_enc.encode(cover_name, new_img_name)
return render_template('thank.html')
@app.route('/Decryption', methods=['POST'])
def getdata_dec():
cover_name = request.form['cover_name']
p = int(request.form['prime_1'])
q = int(request.form['prime_2'])
new_cover_name = request.form['new_cover_name']
import stego_dec
stego_dec.decode(cover_name)
import rsa_dec
rsa_dec.rsa_dec(p,q)
'''import base_dec
base_dec.base_dec(new_cover_name)'''
return render_template('thank.html')
if __name__ == '__main__':
app.run(debug=True) | [
"noreply@github.com"
] | vvs1999.noreply@github.com |
727bd531563b84fb2e2bddb7cd11d14b54aa1d1e | 636879d883c94f37075f3871cd7f9e1d9de9d75e | /snake.py | 4ee74adab22fb01034ad8237da72c4b119a3fc11 | [] | no_license | MdNaina/python_random_projects | 61e6eccb3a3953a55ad6f88e0e6b0ff6010448ad | a052dd07098ebb3fcc574277b8f6996ba49aa67e | refs/heads/main | 2023-02-11T23:13:53.983102 | 2020-12-31T11:31:41 | 2020-12-31T11:31:41 | 325,773,946 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,975 | py | import pygame
import random
pygame.init()
red = (255,0,0)
blue = (0,0,255)
green = (0,255,0)
white = (255,255,255)
width = 600
height = 600
dis = pygame.display.set_mode((width,height))
pygame.display.set_caption("snake game")
box = 30
clock = pygame.time.Clock()
score_font = pygame.font.SysFont("bahnschrift", 36)
message_font = pygame.font.SysFont("comicsansms", 28)
# print(foodx,foody)
def our_snake(snake):
for i in range(len(snake)):
pygame.draw.rect(dis, red, [snake[i][0],snake[i][1], box, box])
def score(s):
value = score_font.render(f"SCORE :{s} ", True, "yellow")
dis.blit(value, (0,0))
def message(msg):
value = message_font.render(msg, True, 'black')
dis.blit(value, (200,300))
def mainloop():
foodx = round(random.randrange(0,width-box)/box)*box
foody = round(random.randrange(0,height-box) / box)*box
done = False
close = False
x = width // 2
y = height // 2
dx = 0
dy = 0
snakes = []
length_of_the_snake = 1
while not done:
while close :
dis.fill(red)
message("you lost , do you want to play again ?")
score(length_of_the_snake-1)
pygame.display.update()
for event in pygame.event.get():
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_q:
done = True
close = False
if event.key == pygame.K_r:
mainloop()
for event in pygame.event.get():
if event.type == pygame.QUIT:
done = True
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_UP:
dy = -box
dx = 0
elif event.key == pygame.K_DOWN:
dy = box
dx = 0
elif event.key == pygame.K_LEFT:
dx = -box
dy = 0
elif event.key == pygame.K_RIGHT:
dx = box
dy = 0
if x >= width or x < 0 or y >= height or y < 0:
close = True
dis.fill(white)
x += dx
y += dy
pygame.draw.rect(dis, green, (foodx, foody, box, box))
snake_head = []
snake_head.append(x)
snake_head.append(y)
snakes.append(snake_head)
if len(snakes) > length_of_the_snake:
del snakes[0]
for i in snakes[:-1]:
if i == snake_head:
close = True
our_snake(snakes)
score(length_of_the_snake - 1)
pygame.display.update()
if x == foodx and y == foody:
foodx = round(random.randrange(0,width-box)/box)*box
foody = round(random.randrange(0,height-box)/box)*box
length_of_the_snake += 1
#test is vim is working
clock.tick(10)
mainloop()
| [
"naina.stnm@gmail.com"
] | naina.stnm@gmail.com |
1c8662f885da78193eaf3178a82cdf91656f2dbf | a57fee81833a90828253b6590eb61dca460c408a | /view.py | b89c85e0bcb64ddd9f7722e7dbc085c289f0cc53 | [] | no_license | firststef/RNBreakout | 6c70f13a3e6b816632555ebc0d2ce865f6d641cb | dab308243e389ecb87d7f0d18bc5aaa2b78e0d1c | refs/heads/master | 2023-02-11T22:18:37.743788 | 2021-01-05T17:34:07 | 2021-01-05T17:34:07 | 321,891,814 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,186 | py | from datetime import datetime
import gym
from time import sleep
import random
import numpy as np
import tensorflow as tf
from tensorflow.keras import Model
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, Input
from tensorflow import keras
env = gym.make('Breakout-ram-v0')
num_of_actions = env.action_space.n
render = True
class BreakoutNeuralNet(tf.keras.Model):
def __init__(self, outs):
super(BreakoutNeuralNet, self).__init__()
self.dense1 = tf.keras.layers.Dense(128, activation="relu")
self.dense2 = tf.keras.layers.Dense(64, activation="relu")
self.dense3 = tf.keras.layers.Dense(outs, dtype=tf.float32) # No activation
def call(self, x):
x = self.dense1(x)
x = self.dense2(x)
return self.dense3(x)
main_model = BreakoutNeuralNet(num_of_actions)
decision_model = BreakoutNeuralNet(num_of_actions)
decision_model.compile(optimizer='adam', loss='mse')
decision_model.set_weights(main_model.get_weights())
mse = tf.keras.losses.MeanSquaredError()
optimizer = tf.keras.optimizers.Adam(1e-4)
# Hyper parameters
alpha = 0.1
gamma = 0.99
epsilon = 1
# For plotting metrics
episode_reward_history = []
load = 'backu_breakout2020-12-31_06_38_34_631078.pickle'
# load = None
def actor_action(a_state):
scores = main_model(a_state)
choice = np.argmax(scores)
return choice
state = env.reset()
done = False
model = keras.models.load_model(load)
print([x.shape for x in model.get_weights()])
main_model(np.asarray([state]))
main_model.set_weights(model.get_weights())
decision_model(np.asarray([state]))
decision_model.set_weights(main_model.get_weights())
for i in range(1000):
state = env.reset()
done = False
counter = 0
while not done:
counter = counter + 1
# Make a decision
state = np.asarray([state])
action = actor_action(state)
# Execute the action and get the new state
next_state, reward, done, info = env.step(action)
if counter > 600:
break
if render:
sleep(0.01)
env.render()
state = next_state
| [
"noreply@github.com"
] | firststef.noreply@github.com |
7109aa376c0a9a18a06cb341979796f67349346b | 271cbadd98ad60e8c3609b1d5422c1b49b9a8347 | /turtlebot2_wss/turtlebot_simulation/build/vrep_simulation/catkin_generated/pkg.develspace.context.pc.py | 0592858c682d9ee83136524c7b345541f1f49b7e | [] | no_license | yxl-loading/ROCO506Z | 9a5a16ebb839b1424bf9b9be2c8358d5211d9ca4 | cd16e8a2428db3a3b8fea1534120d0affc2c262b | refs/heads/master | 2022-04-19T02:23:30.942440 | 2020-04-22T19:03:43 | 2020-04-22T19:03:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 438 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "vrep_simulation"
PROJECT_SPACE_DIR = "/home/youssef/ROCO506Z/turtlebot2_wss/turtlebot_simulation/devel/.private/vrep_simulation"
PROJECT_VERSION = "0.0.0"
| [
"youssefhindawi@hotmail.com"
] | youssefhindawi@hotmail.com |
808043585a57418013b8485f97a036e010cf856a | cbd9b30d155cb283a1cb9bf7c2410c5db1110e53 | /backup/drive/Client.py | 2fea6535072bcbf6ab743c679c8e657b1d77aa6d | [] | no_license | DominikHenkel/Self-Driving-Car | d3f4cd081187a01a2ca55fb0e75d44476423578a | cb0faff9a6abab79be1c1fffa8bea605152dca5b | refs/heads/master | 2020-03-22T08:18:19.633678 | 2018-11-16T22:55:08 | 2018-11-16T22:55:08 | 139,758,435 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 441 | py | import time
import socket
client_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
client_socket.settimeout(10.0)
message = b'CONNECTED'
addr = ("localhost", 8000)
start = time.time()
client_socket.sendto(message , addr)
try:
while True:
data, server = client_socket.recvfrom(131072)
end = time.time()
elapsed = end - start
print(f'{data}')
except socket.timeout:
print('REQUEST TIMED OUT')
| [
"sup4tdc@yahoo.de"
] | sup4tdc@yahoo.de |
c3d0c6798414ea088eb7b3efc5bd017d1d44eda3 | 55267c377da7a2a6676978d958e07c07bfc9d9b6 | /nbutil.py | 395b05b0e7c54b1f0b25ec174c5bb9c33908ef84 | [] | no_license | larsks/netbox-data-scripts | 54916afab045bed663c2a08ca90f102bf7efeeaa | 91aa6554aa815bdfc894a500037e942962c16705 | refs/heads/master | 2023-01-11T16:50:50.551000 | 2020-11-11T22:33:14 | 2020-11-11T22:33:14 | 309,502,679 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,594 | py | import click
import json
import logging
import pynetbox
import resources
import netbox
LOG = logging.getLogger(__name__)
logging.basicConfig(level='DEBUG')
@click.group(
context_settings=dict(auto_envvar_prefix='NETBOX'))
@click.option('--url', '-u')
@click.option('--token', '-t')
@click.pass_context
def main(ctx, url, token):
ctx.obj = netbox.Netbox(url, token=token)
@main.command()
@click.option('--site', '-s', required=True)
@click.option('--device-role', '-r')
@click.argument('factfiles', nargs=-1)
@click.pass_context
def load(ctx, site, device_role, factfiles):
api = ctx.obj
devices = []
for factfile in factfiles:
with open(factfile) as fd:
facts = json.load(fd)
if 'ansible_facts' not in facts:
LOG.warning('invalid fact file: %s', factfile)
continue
if facts['ansible_facts'].get('ansible_virtualization_role') != 'host':
LOG.warning('skipping virtual machine: %s', factfile)
continue
try:
dev = resources.device.from_ansible_facts(facts['ansible_facts'])
except KeyError as err:
LOG.warning('failed loading device from %s: missing %s',
factfile, err)
else:
devices.append(dev)
for dev in devices:
try:
_dev = api.dcim.devices.filter(name=dev.name)[0]
except IndexError:
LOG.info('adding %s', dev)
try:
_site = api.dcim.sites.filter(name=site)[0]
except IndexError:
_site = api.dcim.sites.create(name=site)
try:
manufacturer = api.dcim.manufacturers.filter(
name=dev.device_type.manufacturer)[0]
except IndexError:
obj = resources.manufacturer(name=dev.device_type.manufacturer)
LOG.info('create new manufacturer %s', obj)
manufacturer = api.dcim.manufacturers.create(**obj.to_dict())
try:
devtype = api.dcim.device_types.filter(
manufacturer_name=manufacturer.name,
model=dev.device_type.model)[0]
except IndexError:
obj = resources.device_type(
manufacturer=manufacturer.id,
model=dev.device_type.model)
LOG.info('create new device type %s', obj)
devtype = api.dcim.device_types.create(**obj.to_dict())
try:
devrole = api.dcim.device_roles.filter(
name=dev.device_role)[0]
except IndexError:
obj = resources.device_role(name=dev.device_role)
LOG.info('create new device role %s', obj)
devrole = api.dcim.device_roles.create(**obj.to_dict())
dev.site = _site.id
dev.device_type = devtype.id
dev.device_role = devrole.id
try:
_dev = api.dcim.devices.create(**dev.to_dict())
except pynetbox.core.query.RequestError as err:
breakpoint()
...
for interface in dev.interfaces.interfaces:
try:
_iface = api.dcim.interfaces.filter(
device_id=_dev.id, name=interface.name)[0]
except IndexError:
LOG.info('create new interface %s on %s', interface, dev)
_iface = api.dcim.interfaces.create(
device=_dev.id, **interface.to_dict())
if __name__ == '__main__':
main()
| [
"lars@redhat.com"
] | lars@redhat.com |
3045020f1542290997f407f8f8b43425a5a780ce | 4f098c687b0f067e31c6998cae48c4d8007450e1 | /oldzhifu/oldzhifu/settings.py | 91d4ce81a700df95d9cd4dccdc0c3c9f5a27ed2a | [] | no_license | wkongxiaojie/Test | 83ea1a3c1a89e7553b8ade98a3182754af6da266 | f3cf98baf512516962e312152a849452011b4556 | refs/heads/master | 2022-01-26T18:57:08.525899 | 2019-05-15T06:34:57 | 2019-05-15T06:34:57 | 186,727,588 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,062 | py | """
Django settings for oldzhifu project.
Generated by 'django-admin startproject' using Django 2.1.5.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'gb@nj7@nw5u!26^(d9izdjuz$p*fmmat1jz4a6k1*r)--s0tyu'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'zhifu.apps.ZhifuConfig',
'OfficialWebsite.apps.OfficialwebsiteConfig',
'rest_framework',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
# 'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'oldzhifu.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')]
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'oldzhifu.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default' : {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'zhufu4',
'USER': 'root',
'PASSWORD':'123456',
'HOST':'',
'PORT':'3306',
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
LANGUAGE_CODE = 'zh-Hans'
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS=[
os.path.join(BASE_DIR,'static')
]
#开发阶段文件上传目录
MEDIA_ROOT=os.path.join(BASE_DIR,'static')
# MEDIA_ROOT = 'media/'
MEDIA_URL = 'static/'
#部署阶段文件上传目录
# MEDIA_ROOT='/var/www/dailyfresh/static'
REST_FRAMEWORK = {
# Use Django's standard `django.contrib.auth` permissions,
# or allow read-only access for unauthenticated users.
'DEFAULT_PERMISSION_CLASSES': [
'rest_framework.permissions.DjangoModelPermissionsOrAnonReadOnly'
]
}
| [
"713042132@qq.com"
] | 713042132@qq.com |
2157fa5f00a7ea2f2da78c201b0648401aa85d19 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/otherforms/_attending.py | 42c967da90aa064ad1ee81dd35207c570ee2ae1f | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 226 | py |
#calss header
class _ATTENDING():
def __init__(self,):
self.name = "ATTENDING"
self.definitions = attend
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['attend']
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
e25cf0d305a146cf875a01066af4a033c3902e8f | 99d889a460e1fea2bbdd8ff15b8e45ed9dd2acf7 | /coreproject/coreproject/urls.py | fa5725b4abb890511bf6c3a398e30b3661ca3d1d | [] | no_license | BregenzerK/CustomizedERP | f7f42080140590c57e5a0d2004c0e64388e8cd31 | f048ed0a60255cf5538d1de0e1e2e6069c97446b | refs/heads/master | 2021-01-10T18:52:03.739254 | 2017-12-10T12:01:50 | 2017-12-10T12:01:50 | 28,302,753 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,445 | py | from django.conf.urls import patterns, include, url
from django.contrib import admin
urlpatterns = patterns('',
# Examples:
url(r'^$', 'coreproject.views.home', name='home'),
url(r'^kunden/create', 'kunden.views.create_with_account' ),
url(r'^kunden/overview', 'kunden.views.get_overview_filter'),
url(r'^kunden/(?P<kunden_id>[-\w]+)/Details', 'kunden.views.details_with_account'),
url(r'^kunden/export', 'kunden.views.export'),
url(r'^kunden/merge', 'kunden.views.merge'),
url(r'^kunden/(?P<kunden_id>[-\w]+)/create/Angebot', 'kauf.views.create_offer'),
url(r'^kunden/(?P<kunden_id>[-\w]+)/create/Kauf/(?P<angebot_id>[-\w]+)', 'kauf.views.create_purchase_from_offer'),
url(r'^kunden/(?P<kunden_id>[-\w]+)/create/Kauf', 'kauf.views.create_purchase'),
url(r'^kunden/(?P<kunden_id>[-\w]+)/create/Kauf/scannen', 'kauf.views.scan_products'),
url(r'^kunden/(?P<kunden_id>[-\w]+)/create/Kauf/zahlung', 'kauf.views.check_payment'),
url(r'^kunden/(?P<kunden_id>[-\w]+)/kaufhistorie', 'kauf.views.show_orderhistory_with_offer'),
url(r'^kunden/(?P<kunden_id>[-\w]+)/Kauf/(?P<kauf_id>[-\w]+)/download', 'kauf.views.download_purchase'),
url(r'^kunden/(?P<kunden_id>[-\w]+)/Angebot/(?P<angebot_id>[-\w]+)/download', 'kauf.views.download_offer'),
url(r'^kunden/(?P<kunden_id>[-\w]+)/Kauf/(?P<kauf_id>[-\w]+)/(?P<mitteilung_id>[-\w]+)', 'kauf.views.show_kauf_details'),
url(r'^kunden/(?P<kunden_id>[-\w]+)/Kauf/(?P<kauf_id>[-\w]+)', 'kauf.views.show_kauf_details'),
url(r'^kunden/(?P<kunden_id>[-\w]+)/Angebot/(?P<angebot_id>[-\w]+)', 'kauf.views.show_angebot_details'),
url(r'^Preis', 'kauf.views.get_Preis'),
url(r'^kunden/(?P<kunden_id>[-\w]+)/check_credit', 'kauf.views.check_credit'),
url(r'^lager/create/Lautsprecher', 'lager.views.create_Lautsprecher'),
url(r'^lager/create/Faden', 'lager.views.create_Faden'),
url(r'^lager/overview', 'lager.views.get_overview_filter'),
url(r'^lager/Lieferant/(?P<lieferanten_id>[-\w]+)/Details', 'lager.views.show_details_lieferant_with_account'),
url(r'^lager/(?P<model>[-\w]+)/(?P<fabrikats_id>[-\w]+)/Details$', 'lager.views.details'),
#url(r'^lager/generateInventurliste', 'lager.views.generate_inventurliste'),
url(r'^lager/inventurliste', 'lager.views.show_inventurliste'),
url(r'^lager/inventurliste/(?P<product_id>.+)', 'lager.views.show_inventurliste'),
url(r'^lager/inventur/(?P<inventur_id>[-\w]+)/auswerten', 'lager.views.inventur_auswerten'),
url(r'^lager/inventur/(?P<inventur_id>[-\w]+)/drucken', 'lager.views.inventur_ausdrucken'),
url(r'^lager/inventur/overview', 'lager.views.inventur_overview'),
url(r'^lager/create/Lieferant', 'lager.views.create_Lieferant_with_account'),
url(r'^lager/Lieferant/overview', 'lager.views.show_overview_lieferant'),
url(r'^einkauf/Bestellanforderung/overview', 'einkauf.views.show_banf_overview'),
url(r'^einkauf/Bestellung/overview', 'einkauf.views.show_bestellung_overview'),
url(r'^einkauf/create/Bestellung/(?P<lieferanten_id>[-\w]+)', 'einkauf.views.create_bestellung'),
url(r'^einkauf/create/Bestellung', 'einkauf.views.create_bestellung_without_banf'),
url(r'^einkauf/create/Wareneingang', 'einkauf.views.create_wareneingang'),
url(r'^einkauf/Bestellanforderung/(?P<banf_id>[-\w]+)/(?P<mitteilung_id>[-\w]+)', 'einkauf.views.show_banf_details'),
url(r'^einkauf/Bestellanforderung/(?P<banf_id>[-\w]+)', 'einkauf.views.show_banf_details'),
url(r'^einkauf/Bestellung/(?P<bestell_id>[-\w]+)/upload', 'einkauf.views.upload_document'),
url(r'^einkauf/Bestellung/(?P<bestell_id>[-\w]+)/bezahlen', 'einkauf.views.pay_bestellung'),
url(r'^einkauf/Bestellung/(?P<bestell_id>[-\w]+)/close', 'einkauf.views.close_bestellung'),
url(r'^einkauf/Bestellung/(?P<bestell_id>[-\w]+)/download', 'einkauf.views.download_bestellung'),
url(r'^einkauf/Bestellung/(?P<bestell_id>[-\w]+)/scan', 'einkauf.views.scan_products'),
url(r'^einkauf/Bestellung/(?P<bestell_id>[-\w]+)/Details', 'einkauf.views.show_details_bestellung'),
url(r'^Produkte', 'einkauf.views.get_produkte_zu_lieferant'),
url(r'^Mitteilungsboard/(?P<mitteilung_id>[-\w]+)/gelesen', 'mitteilungen.views.message_read'),
url(r'^Mitteilungsboard', 'mitteilungen.views.show_messages'),
url(r'^Messages', 'mitteilungen.views.calc_messages'),
url(r'^admin/', include(admin.site.urls)),
)
| [
"katja.bregenzer@bridging-it.de"
] | katja.bregenzer@bridging-it.de |
810c9a802e3d4c278f1c6108f938d8437907212d | 85da1d98e78fd2307f7077fa44ca2a3f08ac1e0e | /ExampleWidget/views.py | 86304c3fa653348dc7f61df215a7a75315f49ea9 | [
"MIT"
] | permissive | mcgoddard/example-widget | 532bfdaac5efd4c9778b57e6e91c8c2b3254ebc0 | 86c038ffcaa749d1b57a70c48c0bed81a4ce62b4 | refs/heads/master | 2021-01-10T07:28:59.632901 | 2016-04-03T20:27:17 | 2016-04-03T20:27:17 | 55,367,309 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,008 | py | """
Routes and views for the flask application.
"""
from datetime import datetime
from flask import render_template
from ExampleWidget import app
@app.route('/')
@app.route('/red')
def red():
"""Renders the red page."""
return render_template(
'red.html',
title='red page',
)
@app.route('/green')
def green():
"""Renders the green page."""
return render_template(
'green.html',
title='green page',
)
@app.route('/blue')
def blue():
"""Renders the blue page."""
return render_template(
'blue.html',
title='blue page',
)
@app.route('/yellow')
def yellow():
"""Renders the yellow page."""
return render_template(
'yellow.html',
title='yellow page',
)
@app.route('/orange')
def orange():
"""Renders the orange page."""
return render_template(
'orange.html',
title='orange page',
)
@app.route('/brown')
def brown():
"""Renders the brown page."""
return render_template(
'brown.html',
title='brown page',
)
@app.route('/pink')
def pink():
"""Renders the pink page."""
return render_template(
'pink.html',
title='pink page',
)
@app.route('/purple')
def purple():
"""Renders the purple page."""
return render_template(
'purple.html',
title='purple page',
)
@app.route('/turquoise')
def turquoise():
"""Renders the turquoise page."""
return render_template(
'turquoise.html',
title='turquoise page',
)
@app.route('/white')
def white():
"""Renders the white page."""
return render_template(
'white.html',
title='white page',
)
@app.route('/black')
def black():
"""Renders the black page."""
return render_template(
'black.html',
title='black page',
)
@app.route('/grey')
def grey():
"""Renders the grey page."""
return render_template(
'grey.html',
title='grey page',
) | [
"mikeygiom@gmail.com"
] | mikeygiom@gmail.com |
4126cae3139385d9130402aa99dcea5f4d49447a | eabc369a48d52a2f678794857738dbe859459261 | /app/utils/general.py | 29da0d161e92566733ebcd7da5c0c2b62cdc34c8 | [] | no_license | datawizzards-za/energyspazr | a6ea1ded17f2657474e906ad45194baa7c818b06 | cedfd99302b65a1067a78589a8f678e103e0d885 | refs/heads/master | 2021-12-15T13:56:11.503400 | 2017-08-21T01:06:19 | 2017-08-21T01:06:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 88 | py | from enum import Enum
class SIRole(Enum):
SUPPLY = 1
INSTALL = 2
BOTH = 3
| [
"adlaba@csir.co.za"
] | adlaba@csir.co.za |
99cb91a253228dbd5578a98ab5fdf1b02c43fe6e | 05eef27d0bb309b7596b405e429c2014c6a2d6de | /Random/sco.menu2.py | f95e6d4ca0a5be12dbdc6d4e4b049fda562669e2 | [] | no_license | vincenzo-scotto001/Python | 150981bc165f1308395206e0a33a9018400effc3 | d88f5b9a191689ab9fd8775bab18ddb30180e32a | refs/heads/master | 2020-04-04T06:54:35.252663 | 2019-09-11T14:10:03 | 2019-09-11T14:10:03 | 155,761,334 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,509 | py | # Menu driven problem with two programs by Vincenzo Scotto Di Uccio
import math
import random
INTEGER = "1"
FALLING_DISTANCE = "2"
FALLING_DISTANCE_2 ="3"
STOP = "4"
def main():
x = 0
while x != STOP:
valid = 0
display_menu()
x = input("Enter your number(1-4): ")
if x >= "1" and x <= "4":
if x == INTEGER:
int_calc()
elif x == FALLING_DISTANCE:
fall_dist()
elif x == FALLING_DISTANCE_2:
fall_dist2()
else:
print ("Please enter a valid number(1,2,3,4): ")
def int_calc():
num1 = random.randint(1,100)
num2 = random.randint(1,100)
print(" The first number is: ", num1)
print (" The second numebr is: ",num2)
if num1 > num2:
print(num1, "is bigger than", num2)
else:
print(num2, "is bigger than",num1)
def fall_dist():
time = int(input(" How long has your object been falling for in seconds: "))
d = 0.5 * 9.8 * (time**2)
print(" Your object fell,",d,"m in",time,"s")
def fall_dist2():
for c in range(1,11):
d = 0.5 * 9.8 *(c**2)
print ("The falling distance is" , format(d,",.2f"),"m",sep=" ")
def display_menu():
print (" MENU ")
print (" 1) Random integer program")
print (" 2) falling distance program")
print (" 3) falling distance 1-10 program")
print (" 4) STOP")
main()
| [
"noreply@github.com"
] | vincenzo-scotto001.noreply@github.com |
5e6113a805693ced86ee2ff949b681a5f7303780 | 9142be77c2453a6631335832e960aaafbac6abf5 | /sickle.py | f8fc8e9b00191d64b356fd4fedcfdb14e80aec4c | [] | no_license | 1sn0m4d/Sickle | f9a4ceb924bda24cddc86defbedeca616e003752 | 4d3691b5556cb9e1364f225aa239e3898bacf2e8 | refs/heads/master | 2020-05-25T02:05:48.243644 | 2019-05-17T05:18:37 | 2019-05-17T05:18:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 76 | py | #!/usr/bin/env python3
from Sickle import __main__
__main__.entry_point()
| [
"mvalen1997@gmail.com"
] | mvalen1997@gmail.com |
057b708cf69ba173fa400d3278be66d00cb31bc5 | 1985e2e8d817b9f1a0019d76fa86cd5866b3c58e | /blink_mouth-nn.py | ff41dafb6992e6e0c6176d3a0495bced5331142f | [] | no_license | Surya-Narayan/ADAS | 32754917cf4455d7dad154426a14a8cf995adadf | c517b142acb7bbed63b1e1872c02bc46cfae6b9f | refs/heads/master | 2020-04-19T19:08:29.951280 | 2019-04-29T16:25:44 | 2019-04-29T16:25:44 | 168,380,555 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 7,442 | py | # USAGE
# python blink_mouth-nn.py --shape-predictor shape_predictor_68_face_landmarks.dat --video cam.mp4
# python blink_mouth-nn.py --shape-predictor shape_predictor_68_face_landmarks.dat
# Epoch 5000 : cost = 5030.34 W = 1.5330261 b = -9.608036
# import the necessary packages
from scipy.spatial import distance as dist
from imutils.video import FileVideoStream
from imutils.video import VideoStream
from imutils import face_utils
import numpy as np
import argparse
import imutils
import time
import cv2
import dlib
import time
import csv
import os
import matplotlib.pyplot as plt
import pandas as pd
from sklearn.metrics import mean_squared_error,r2_score
def eye_aspect_ratio(eye):
# compute the euclidean distances between the two sets of
# vertical eye landmarks (x, y)-coordinates
A = dist.euclidean(eye[1], eye[5])
B = dist.euclidean(eye[2], eye[4])
# compute the euclidean distance between the horizontal
# eye landmark (x, y)-coordinates
C = dist.euclidean(eye[0], eye[3])
# compute the eye aspect ratio
ear = (A + B) / (2.0 * C)
# return the eye aspect ratio
return ear
def mouth_ratio(mouth):
#calculate the distance between the mouth coordinates
#only vertical distance as of now
mouthAR = dist.euclidean(mouth[3],mouth[18])
#51 = dist.euclidean(mouth[3])
#66 = dist.euclidean(mouth[18])
return mouthAR
pred=0
# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-p", "--shape-predictor", required=True,
help="path to facial landmark predictor")
ap.add_argument("-v", "--video", type=str, default="",
help="path to input video file")
args = vars(ap.parse_args())
if(os.path.isfile("train.csv")):
fo = open("train.csv","a")
else:
fo = open("train.csv","w+")
ft = open("test.csv","w+")
# define two constants, one for the eye aspect ratio to indicate
# blink and then a second constant for the number of consecutive
# frames the eye must be below the threshold
EYE_AR_THRESH = 0.22
EYE_AR_CONSEC_FRAMES = 3
# initialize the frame counters and the total number of blinks
COUNTER = 0
TOTAL = 0
# initialize dlib's face detector (HOG-based) and then create
# the facial landmark predictor
print("[INFO] loading facial landmark predictor...")
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor(args["shape_predictor"])
# grab the indexes of the facial landmarks for the left and
# right eye, respectively
(lStart, lEnd) = face_utils.FACIAL_LANDMARKS_IDXS["left_eye"]
(rStart, rEnd) = face_utils.FACIAL_LANDMARKS_IDXS["right_eye"]
(mStart, mEnd) = face_utils.FACIAL_LANDMARKS_IDXS["mouth"]
# start the video stream thread
print("[INFO] starting video stream thread...")
# vs = FileVideoStream(args["video"]).start()
# fileStream = True
vs = VideoStream(src=0).start()
# vs = VideoStream(usePiCamera=True).start()
fileStream = False
time.sleep(1.0)
start_time = time.time()
s=0
temp=0
cmew=np.array([1,2])
# tes=open("005_sleepyCombination_eye.txt","r")
# tes=open("005_slowBlinkWithNodding_eye.txt","r")
# tes=open("022_noglasses_mixing_drowsiness.txt","r")
# tes=open("004_noglasses_mixing_drowsiness.txt","r")
# l=[]
# while True:
''' c=tes.read(1)
if not c:
break
l.append(int(c))
# print(l)
FNO=0
cor=0
s=0
'''
# loop over frames from the video stream
while True:
# if this is a file video stream, then we need to check if
# there any more frames left in the buffer to process
if fileStream and not vs.more():
break
# grab the frame from the threaded video file stream, resize
# it, and convert it to grayscale
# channels)
frame = vs.read()
# nframe=frame.copy()
# nframe=cv2.flip( frame, -1 )
# print(type(frame))
if type(frame)!=type(cmew):
break
frame = imutils.resize(frame, width=450)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# detect faces in the grayscale frame
rects = detector(gray, 0)
# loop over the face detections
for rect in rects:
# determine the facial landmarks for the face region, then
# convert the facial landmark (x, y)-coordinates to a NumPy
# array
shape = predictor(gray, rect)
shape = face_utils.shape_to_np(shape)
# extract the left and right eye coordinates, then use the
# coordinates to compute the eye aspect ratio for both eyes
leftEye = shape[lStart:lEnd]
rightEye = shape[rStart:rEnd]
mouth = shape[mStart:mEnd]
leftEAR = eye_aspect_ratio(leftEye)
rightEAR = eye_aspect_ratio(rightEye)
mouthAR = mouth_ratio(mouth)
#Increment the frame no.
# FNO+=1
# average the eye aspect ratio together for both eyes
ear = (leftEAR + rightEAR) / 2.0
# compute the convex hull for the left and right eye, then
# visualize each of the eyes
leftEyeHull = cv2.convexHull(leftEye)
rightEyeHull = cv2.convexHull(rightEye)
mouthHull = cv2.convexHull(mouth)
cv2.drawContours(frame, [leftEyeHull], -1, (0, 255, 0), 1)
cv2.drawContours(frame, [rightEyeHull], -1, (0, 255, 0), 1)
cv2.drawContours(frame, [mouthHull], -1, (0, 255, 0), 1)
# check to see if the eye aspect ratio is below the blink
# threshold, and if so, increment the blink frame counter
if ear < EYE_AR_THRESH:
COUNTER += 1
# otherwise, the eye aspect ratio is not below the blink
# threshold
else:
# if the eyes were closed for a sufficient number of
# then increment the total number of blinks
pre=0
if COUNTER >= EYE_AR_CONSEC_FRAMES:
TOTAL += 1
s+=1
curt=(time.time() - start_time)
# W = 2.655932
# b = -1.0237936 Surya
# W = 1.9900047
# b = 1.2658211
#chinese bigger eyes
W=3.84615
b=1.112096
y=W*TOTAL + b
if(abs(y-curt)<=17):
pre=0
if(abs(y-curt)>17):
# print("OUTSIDE")
pre=1
cv2.putText(frame,"OUTSIDE",(10,300),cv2.FONT_HERSHEY_SIMPLEX,0.7,(0,0,255),2)
# if(l[FNO]==pre):
# cor+=1
fo.write(str(curt)+"," + str(TOTAL)+"\n")
# reset the eye frame counter
COUNTER = 0
# draw the total number of blinks on the frame along with
# the computed eye aspect ratio for the frame
cv2.putText(frame, "Blinks: {}".format(TOTAL), (10, 30),
cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
cv2.putText(frame, "EAR: {:.2f}".format(ear), (300, 30),
cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
cv2.putText(frame, "MouthAR: {}".format(mouthAR), (225, 320),
cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
# show the frame
cv2.imshow("Frame", frame)
key = cv2.waitKey(1) & 0xFF
# if the `q` key was pressed, break from the loop
if key == ord("q"):
break
cv2.destroyAllWindows()
# accuracy=pred/TOTAL*100
# print(accuracy)
fo.close()
fo = open("train.csv","a")
# W = 2.655932
# b = -1.0237936 Surya
# W = 1.9900047
# b = 1.2658211 Chinese small
#chinese bigger
W = 3.84615
b = 1.112096
column_names=['y','x']
dataset=pd.read_csv('train.csv',names=column_names)
# print(dataset)
column_names=['y','x']
x=dataset.x.tolist()
y=dataset.y.tolist()
plt.scatter(x,y)
x=np.array(x)
y=np.array(y)
y1=np.array(W*(x)+b)
yu=np.array(W*(x)+b+17)
yl=np.array(W*(x)+b-17)
plt.plot(x,y1)
plt.plot(x,yu)
plt.plot(x,yl)
# accuracy=cor/s*100
# rsq=r2_score(y,yp)
# print("R-Squared:",rsq)
# print("ACCURACY:",accuracy)
plt.show()
# do a bit of cleanup
vs.stop() | [
"noreply@github.com"
] | Surya-Narayan.noreply@github.com |
b14c2b98a07fad5acc877d946f624a0191ab7c48 | 3cfd5edbacb48d5197d709f52f77433194cedf2a | /app/middlewares/acl.py | 72dd97eb8c38bb3d704106b06790ff099a0bf2a5 | [] | no_license | pikoUsername/A-Search | 1ebb3062a930225cc3a7e5a515f77371aed862b6 | 59377c4e8cb6d0af09375aca1c03f35c371a212f | refs/heads/master | 2023-02-18T19:10:01.007817 | 2021-01-18T14:10:48 | 2021-01-18T14:10:48 | 325,986,023 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,000 | py | from typing import Optional
from aiogram import types
from aiogram.dispatcher.middlewares import BaseMiddleware
from ..models import dbc, User, Chat
class AclMiddleware(BaseMiddleware):
async def setup_chat(self, data: dict, tg_user: types.User, tg_chat: Optional[types.Chat] = None):
user_id = tg_user.id
chat_id = tg_chat.id if tg_chat else tg_user.id
user = await User.get(user_id)
if not user:
user = await dbc.add_new_user(tg_user)
chat = await Chat.get(chat_id)
if not chat:
chat = await dbc.add_new_chat(tg_chat)
data["user"] = user
data["chat"] = chat
async def on_pre_process_message(self, message: types.Message, data: dict):
await self.setup_chat(data, message.from_user, message.chat)
async def on_pre_process_callback_query(self, query: types.CallbackQuery, data: dict):
await self.setup_chat(data, query.from_user, query.message.chat if query.message else None)
| [
"galymzhan.amantaj@gmail.com"
] | galymzhan.amantaj@gmail.com |
63b8925658c1f05ca2b3c52b232b086acf5307c0 | f2b5889d73cc9fcfd58a2dc807253bd4796849b5 | /naginpy/pipeforward.py | a4893a4b1d3370e7b48d50c402601de681886f75 | [
"MIT"
] | permissive | dalejung/naginpy | e290cb2d26728c625d9b4199dbf1956fe1f6a0c9 | bbc2b380a278a129449ee170fb22efa7f687b6e8 | refs/heads/master | 2020-12-25T18:17:16.498018 | 2018-08-19T18:14:12 | 2018-08-19T18:14:12 | 23,586,699 | 4 | 1 | MIT | 2018-08-19T06:29:59 | 2014-09-02T16:40:21 | Python | UTF-8 | Python | false | false | 419 | py | """
df = value %>%
sum %>%
filter(is_na) %>%
summarize
df = value |>
sum |>
filter(is_na) |>
summarize
with PipeForward(value) as df:
_ = value
_ = sum(_)
_ = filter(_, is_na)
_ = summarize(_)
df = _
with PipeForward(value):
sum
filter(10)
summarize
with value.pipe():
"""
with value.pipe():
sum #>>
filter(10) #>>
summarize
value >> sum
| [
"dale@dalejung.com"
] | dale@dalejung.com |
c3a9940907e5fba2b01888c2fe89ad17729ac313 | 4ec8a0e68dd5ff97bdd7c4f57e510d77fa106dbf | /blog/migrations/0002_auto_20190527_1300.py | 689eebcd43ef87c71af7949aa7f4fddbf3efbea1 | [] | no_license | mohammadkanon/travel_blog | b377a13da2ee9be783639e6405244a5be6403a2e | f94ada7a44073151e81d4173cb1cbc9ed78cdd5d | refs/heads/master | 2020-06-04T15:07:53.762761 | 2019-06-15T12:15:42 | 2019-06-15T12:15:42 | 192,074,755 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,451 | py | # Generated by Django 2.2.1 on 2019-05-27 07:00
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='commoninfo',
name='author_details',
field=models.TextField(blank=True, null=True),
),
migrations.AddField(
model_name='commoninfo',
name='author_status',
field=models.TextField(blank=True, null=True),
),
migrations.AddField(
model_name='commoninfo',
name='author_twitter_url',
field=models.URLField(blank=True, null=True),
),
migrations.AddField(
model_name='commoninfo',
name='author_youtube_url',
field=models.URLField(blank=True, null=True),
),
migrations.AlterField(
model_name='commoninfo',
name='author_email',
field=models.EmailField(blank=True, max_length=254, null=True),
),
migrations.AlterField(
model_name='commoninfo',
name='author_fb_url',
field=models.URLField(blank=True, null=True),
),
migrations.AlterField(
model_name='commoninfo',
name='author_pic',
field=models.ImageField(blank=True, null=True, upload_to=''),
),
]
| [
"gentlekanon@gmail.com"
] | gentlekanon@gmail.com |
891ba0a2d86e0059af8f125d7eb99dae7de96607 | 6045518db77c6104b4f081381f61c26e0d19d5db | /python_scripts/search_version_added_mod1.py | b004390e83fe6d2b0ab9848b4394cb3dd5111e20 | [] | no_license | edisutoyo/msr16_td_removal | 6e039da7fed166b81ede9b33dcc26ca49ba9259c | 41b07293c134496ba1072837e1411e05ed43eb75 | refs/heads/master | 2023-03-22T21:40:42.993910 | 2017-09-22T09:19:51 | 2017-09-22T09:19:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,558 | py | # import difflib
# import distance
import psycopg2
import sys
import os
connection = None
# connect to the database to retrieve the file name linked with the commit
connection = psycopg2.connect(host='localhost', port='5432', database='comment_classification', user='evermal', password='')
cursor = connection.cursor()
def parse_line_comment (comment):
result = []
for line in comment.split('//'):
if '' is not line:
result.append(('//'+line).strip())
return result
def parse_block_comment (comment):
result = []
for line in comment.split('\n'):
if len(comment.split('\n')) is 1:
new_line = line.strip()
else:
new_line = (line.replace('/**', '').replace('*/', '').replace('/*', '')).strip()
if '' is not new_line:
result.append(new_line)
return result
# cursor.execute("select a.comment_type, a.comment_text, a.project_name, a.version_name, a.file_name, b.version_order, a.processed_comment_id from technical_debt_summary a, tags_information b where a.project_name = b.project_name and a.version_name = b.version_name and a.version_introduced_name = a.version_removed_name ")
cursor.execute("select a.comment_type, a.comment_text, a.project_name, a.version_name, a.file_name, b.version_order, a.processed_comment_id from technical_debt_summary a, tags_information b where a.project_name = b.project_name and a.version_name = b.version_name and a.processed_comment_id = 77649 ")
results = cursor.fetchall()
for result in results:
comment_type = result[0]
comment_text = "* FIXME: Should this be renamed to match its ruby name?"
project_name = result[2]
version_name = result[3]
file_name = result[4]
version_order = result[5]
processed_comment_id = result[6]
if 'MULTLINE' == comment_type or 'LINE' == comment_type:
comment = parse_line_comment(comment_text)
# print comment
else:
comment = parse_block_comment(comment_text)
# print comment
cursor.execute("select version_name, version_order, version_hash from tags_information where project_name = '"+project_name+"' and version_order <= "+str(version_order)+" order by 2 DESC")
older_versions = cursor.fetchall()
introduced_version_name = older_versions[0][0]
introduced_version_order = older_versions[0][1]
introduced_version_hash = older_versions[0][2]
for older_version in older_versions:
# print older_version
older_version_name = older_version[0]
older_version_order = older_version[1]
older_version_hash = older_version[2]
current_version_path = str(version_order) + '.' + version_name
older_version_path = str(older_version_order) + '.' + older_version_name
# print current_version_path
# print older_version_path
cursor.execute("select file_directory from file_directory_per_version where project_name = '"+project_name+"' and version_hash = '"+older_version_hash+"' and file_name = '"+file_name+"'")
older_version_path_results = cursor.fetchall()
for older_version_path_result in older_version_path_results:
older_file_directory = older_version_path_result[0]
# print older_file_directory
# older_file_directory = file_directory.replace(current_version_path, older_version_path)
print older_file_directory
found_in_version = False
try:
with open (older_file_directory,'r') as f:
comment_index = 0
# comment_distance_threshold = 0
# comment_total_distance = 0
java_file = []
for line in f:
if comment[comment_index] in line.strip():
# value = distance.levenshtein(comment[comment_index], line.strip())
# print str(value)+' - '+line
# if value < 10:
found_in_version = True
print line
print comment[comment_index]
# comment_total_distance = comment_total_distance + value
comment_index = comment_index + 1
if comment_index == len(comment):
break
if found_in_version:
introduced_version_name = older_version_name
introduced_version_order = older_version_order
introduced_version_hash = older_version_hash
version_introduced_file_directory = older_file_directory
print 'total comment distance = '+ str(comment_total_distance)
except Exception, e:
pass
print "introduced version = " + introduced_version_name + ' ' + str(introduced_version_order)
# print "udpate technical_debt_summary set version_introduced_name = '"+introduced_version_name+"', version_introduced_hash = '"+introduced_version_hash+"', version_introduced_file_directory = '"+version_introduced_file_directory+"' where processed_comment_id = '"+str(processed_comment_id)+"'"
cursor.execute("update technical_debt_summary set version_introduced_name = '"+introduced_version_name+"', version_introduced_hash = '"+introduced_version_hash+"' where processed_comment_id = '"+str(processed_comment_id)+"'")
connection.commit() | [
"everton.maldonado@gmail.com"
] | everton.maldonado@gmail.com |
8865201f90a6632cbc9f96134f21ab5e64e6c55a | 5a05bb7d51d2f8cadb83fec2e38c7a9cd83936e9 | /de1/python/host.py | dd9a963bebbe163401aaca84df7cd3d2806f715b | [] | no_license | GalinhaLX/quickshop | a59121e6ebdf01aba6f988cd85a444f8cf1110b5 | 2d7d24dc738629cc3061fcd9b015d4ae3d030490 | refs/heads/master | 2023-03-16T02:18:17.055789 | 2018-04-06T06:09:07 | 2018-04-06T06:09:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,465 | py | import serial
import sys
import os
import time
import requests
import json
def main(args):
# init serial connection
ser = serial.Serial('/dev/tty.usbserial', 115200)
ser.flush()
ser.timeout = 15
while True:
try:
# check for valid hash requests
r = requests.get('http://store.saif.ms/needs_hashing').json();
except json.decoder.JSONDecodeError as e:
# otherwise skip request
print("got json decode error")
time.sleep(3.0)
continue
# check if there is a valid block instance to hash
if "block" not in r:
time.sleep(3.0)
else:
# formulate message for DE1 based on request info
message = "$"
message = message + str(r.get('block'))
if 'data' in r and r.get('data') is not None:
message = message + r.get('data')
if 'prev_hash' in r and r.get('prev_hash') is not None:
message = message + r.get('prev_hash')
message = message + '$'
# write to DE1 over serial
for i in range(len(message)):
ser.write(message[i].encode())
time.sleep(0.001)
ser.flush()
# get data from DE1 over serial
time.sleep(1./120)
hashData = str(ser.readline().hex())[:-4]
time.sleep(1./120)
nonce = str(ser.readline().hex())[:-4]
# send response back to server
p = requests.post('http://store.saif.ms/register_hash',
data = {'curr_hash': hashData, 'nonce': int(nonce, 16), 'block': r.get('block')})
print(p.json())
ser.close()
if __name__ == "__main__":
main(sys.argv[1:]) | [
"benjaminlang12@gmail.com"
] | benjaminlang12@gmail.com |
cc6aeb11c159d67d3188ad48a3943fd5c5bb5b57 | 34bf67017440fe47658559f91fe153c153a359f4 | /126.py | ab76eec45e690df7ee056355c5e29df63513c5d3 | [] | no_license | KevinWangTHU/LeetCode | 1be5f8f1ab587eea5365abb940785c9fe26f5214 | a7916e0818b0853ec75e24724bde94c49234c7dc | refs/heads/master | 2021-05-04T10:16:26.666260 | 2017-08-09T04:17:12 | 2017-08-09T04:18:49 | 53,427,005 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,917 | py | import collections, string
class Solution(object):
def findLadders(self, beginWord, endWord, wordlist):
"""
:type beginWord: str
:type endWord: str
:type wordlist: Set[str]
:rtype: List[List[int]]
"""
def construct_paths(source, dest, tree):
if source == dest:
return [[source]]
return [[source] + path for succ in tree[source] # path can be [] - for failed trials.
for path in construct_paths(succ, dest, tree)]
def add_path(tree, word, neigh, is_forw):
if is_forw:
tree[word] += neigh,
else:
tree[neigh] += word,
def bfs_level(cur, other, tree, is_forw, wordlist):
if not cur:
return False
if len(cur) > len(other):
return bfs_level(other, cur, tree, not is_forw, wordlist)
for word in (cur | other):
wordlist.discard(word)
next, done = set(), False
while cur:
word = cur.pop()
for neigh in [word[:idx] + c + word[idx+1:]
for c in string.ascii_lowercase
for idx in range(len(word))]:
if neigh in other:
done = True
add_path(tree, word, neigh, is_forw)
if not done and neigh in wordlist:
next.add(neigh)
add_path(tree, word, neigh, is_forw)
return done or bfs_level(next, other, tree, is_forw, wordlist)
tree, paths = collections.defaultdict(list), []
is_found = bfs_level(set([beginWord]), set([endWord]), tree, True, wordlist)
return construct_paths(beginWord, endWord, tree)
s=Solution()
print s.findLadders("hit", "dog", {"hog", "hig", "hip"})
| [
"KevinWangTHU@gmail.com"
] | KevinWangTHU@gmail.com |
6f49313c3ac544ded1d289409656abd155683b62 | c7774cdff9de9b314949cddbc2b673c80e7b04a5 | /Data structures/seq.py | 0e255f961b6eba589a5d8fb9551092fb7b6e933a | [] | no_license | semkarim/My-Trainings | 62c5a39fe4be4fb0c28cd142f946757533a9bf67 | 6fbab76db97f16732f808fc96b173d1e19c21be2 | refs/heads/master | 2022-07-10T14:54:52.216763 | 2020-05-13T16:54:30 | 2020-05-13T16:54:30 | 263,679,111 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 672 | py | shoplist = ['apples', 'mango', 'carrot', 'bananas']
name = 'ANAROG'
#Действия над списком
print('Element 0 -', shoplist[0])
print('Element 1 -', shoplist[1])
print('Element 2 -', shoplist[2])
print('Element 3 -', shoplist[3])
print('Element -1 -', shoplist[-1])
print('Element -2 -', shoplist[-2])
print('Element -3 -', shoplist[-3])
print('Element -4 -', shoplist[-4])
#Действия над словами
print(name[0])
print(name[:2])
print(name[2:])
print(name[1:3])
print(name[1:-2])
print(name[:])
#Действия над словом в списке (можно применить любую команду выше)
print(shoplist[0][2:4])
| [
"65163745+semkarim@users.noreply.github.com"
] | 65163745+semkarim@users.noreply.github.com |
97d7fca81a1b57c074e04ab9e757aae5b3fb4615 | 1c59ef00948129d8d48f4e73b335c1e419ef9b96 | /priv/demo.py | f2109fce094f4daec561fe9d5c1a716488c65609 | [] | no_license | xshrim/ethlab | a6903af93dcfcb080ca28ed0bfeb1ebfd6357215 | ad0781d3925f8dff1e8ecc4bae45bd936428b5d7 | refs/heads/master | 2022-12-15T02:55:09.217389 | 2019-07-11T00:44:32 | 2019-07-11T00:44:32 | 194,985,601 | 0 | 0 | null | 2022-12-08T23:51:52 | 2019-07-03T05:30:28 | JavaScript | UTF-8 | Python | false | false | 986 | py | import core
from web3.auto import w3
from web3 import Web3
from web3.middleware import geth_poa_middleware
# 直接使用web3自动的自动连接功能
w3.middleware_stack.inject(geth_poa_middleware, layer=0)
num = w3.eth.blockNumber
tnum = 0
btlist = []
# 计算每个区块的交易数量
for i in range(0, num + 1):
trans = w3.eth.getBlock(i).transactions
btlist.append((i, len(trans)))
for tran in trans:
tnum += 1
tranhash = Web3.toHex(tran)
res = w3.eth.getTransactionReceipt(tranhash)
print(str(i) + ':' + str(res))
#info = w3.eth.getTransaction(trans)
#pint(info)
# 调用合约内计数函数获取交易计数
contractInfo = core.getContract('./cinfo.json')
contract = w3.eth.contract(
address=contractInfo['address'],
abi=contractInfo['abi'],
)
print(contract.functions.getCount().call())
# 查看每个区块交易数
for bt in btlist:
print(bt)
# 查看区块总数和总交易数
print(num, tnum) | [
"xshrim@gmail.com"
] | xshrim@gmail.com |
a158aa15dd9c00ac920184022de0c9802124f639 | 4a7fccd99b8f34a136cdd5cd1bea558345881962 | /basics.py | 145a480f65588ad775e3382ffda14b3a9d3ec4b6 | [] | no_license | yc1838/Python-Mega-Course | 701e9cd022e09c6a56a40156b11495d240fd4035 | 6479510ceea25b993218c5be82b59b5dab3f7b72 | refs/heads/main | 2023-01-21T18:13:06.829176 | 2020-12-04T05:28:07 | 2020-12-04T05:28:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 622 | py | with open("hey.txt", "a+") as file:
file.seek(0)
cont = file.read()
file.seek(0)
file.write(cont)
#have to ask teacher for this one.
# write a program that interacts below:
# Say something: Hello
# Say Something: how are you
# Hello. How are you?
'''
def sentences_modification(stringa):
caped = stringa.capitalize()
questions = ("How","What","Why")
if(caped.startswith(questions)):
caped = caped + "?"
else:
caped += "."
return caped
l = []
while True:
user_input = input("Say something:")
if user_input == "/end":
break
else:
l.append(sentences_modification(user_input))
print(" ".join(l))
'''
| [
"cubthemagiclion@gmail.com"
] | cubthemagiclion@gmail.com |
ec1ab374642f6e84c93b717daa1ed23f3dafd3be | c9873319a4ce48dada8a078e5fd9b723b920fedb | /impy/ex/ex9stat.py | 263d93bca52996f171543afc30a40a21834274a5 | [] | no_license | imsoyounq/backupall | 7383d7ce9252770942bc2a12e4c092baaa02df96 | d7a8c8e7342687dba99af2cd41d94f25a27ca02b | refs/heads/master | 2021-01-24T08:55:48.405619 | 2016-10-02T13:16:52 | 2016-10-02T13:16:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 371 | py | # -*- encoding:utf-8 -*-
a = []
while True:
b = int(input("숫자 "))
if b == 0:
print "종료합니다."
break
else:
a.append(b)
print "mean:",sum(a)/len(a),", max:",max(a),", min:",min(a),
print ", stdev:"
continue
# mean: 10, 이 아니라 mean: 10 , 처럼 ,앞에 공백 추가
# 캡처 formatter 안됨
| [
"soyounginseoul@gmail.com"
] | soyounginseoul@gmail.com |
7e3b468b506301ebe3967b1c976b4ab45f06632f | 47ec3048a021319c19228c37acd71fb487de1716 | /spectrum/value_spectrum.py | 5e4507d117cd390200f7ee02f8e907d7f4160ee6 | [
"MIT"
] | permissive | ternandsparrow/natcap-invest-docker-flask | 5485691ec39c35d9a440cb564badee2b3b8cd1ab | 1ef3e58f6af58d1783f6c0b8c80377e645923204 | refs/heads/master | 2023-04-27T14:28:41.561597 | 2021-05-06T08:32:44 | 2021-05-06T08:32:44 | 119,615,923 | 0 | 1 | MIT | 2021-05-08T16:54:26 | 2018-01-31T01:13:17 | Python | UTF-8 | Python | false | false | 5,277 | py | #!/usr/bin/python3
# Runs all permutations and logs the results. Good for getting a feel about how the inputs affect the output
# Run with:
# python3 value_spectrum.py > output.csv
import requests
import json
import sys
def permute_inputs():
url = 'http://localhost:5000/pollination?years=0'
with sys.stdout as out, sys.stderr as err:
out.write('half_sat,p_managed,fr_spring,fr_summer,fr_autumn,fr_winter,n_cavity,n_stem,n_ground,p_dep,p_abund,pdep_y_w,y_tot,y_wild\n')
def do_http_call(data, row_num):
resp = requests.post(url, headers={'Accept': 'application/json'}, json=data)
if resp.status_code != 200:
err.write('HTTP call failed with status code = %d\n' % resp.status_code)
error_file = '/tmp/natcap-error.html'
with open(error_file, 'w') as f:
f.write(resp.text)
err.write('wrote error output to %s\n' % error_file)
exit()
resp_body = resp.json()
rec = resp_body['records'][0]
err.write('processing row %d\n' % row_num)
out.write('%.2f,%.2f,%.2f,%.2f,%.2f,%.2f,%.2f,%.2f,%.2f,%.2f,%f,%f,%f,%f\n' % (
rec['half_sat'], rec['p_managed'], rec['fr_spring'], rec['fr_summer'], rec['fr_autumn'], rec['fr_winter'], rec['n_cavity'],
rec['n_stem'], rec['n_ground'], rec['p_dep'], rec['p_abund'], rec['pdep_y_w'], rec['y_tot'], rec['y_wild']))
out.flush()
for_each_permutation(do_http_call)
def for_each_permutation(callback):
row_counter = 1
step = 33 # smaller values mean an explosion of permutations
for curr_half_sat in range(0, 101, step):
for curr_p_managed in range(0, 101, step):
for curr_fr in range(0, 101, step):
for curr_n in range(0, 101, step):
for curr_p_dep in range(0, 101, step):
half_sat = curr_half_sat / 100 or 0.01
p_managed = curr_p_managed / 100 or 0.01
fr_spring = curr_fr / 100 or 0.01
fr_summer = curr_fr / 100 or 0.01
fr_autumn = curr_fr / 100 or 0.01
fr_winter = curr_fr / 100 or 0.01
n_cavity = curr_n / 100 or 0.01
n_stem = curr_n / 100 or 0.01
n_ground = curr_n / 100 or 0.01
p_dep = curr_p_dep / 100 or 0.01
# print('%.1f %.1f %.1f %.1f %.1f %.1f %.1f %.1f %.1f %.1f' % (
# half_sat, p_managed, fr_spring, fr_summer, fr_autumn, fr_winter, n_cavity, n_stem, n_ground, p_dep))
data = get_data(half_sat, p_managed, fr_spring, fr_summer,
fr_autumn, fr_winter, n_cavity, n_stem, n_ground, p_dep)
callback(data, row_counter)
row_counter += 1
def get_data(half_sat, p_managed, fr_spring, fr_summer, fr_autumn, fr_winter, n_cavity, n_stem, n_ground, p_dep):
return {
"farm": {
"type": "FeatureCollection",
"features": [
{
"type": "Feature",
"properties": {
"crop_type": "canola",
"half_sat": half_sat,
"p_managed": p_managed,
"season": "summer",
"fr_spring": fr_spring,
"fr_summer": fr_summer,
"fr_autumn": fr_autumn,
"fr_winter": fr_winter,
"n_cavity": n_cavity,
"n_stem": n_stem,
"n_ground": n_ground,
"p_dep": p_dep
},
"geometry": {
"type": "Polygon",
"coordinates": [
[
[
138.7976351232967,
-34.91457258034658
],
[
138.78809242247843,
-34.9099008125332
],
[
138.80525654750377,
-34.90863402266019
],
[
138.78674319454652,
-34.90015396745101
],
[
138.80292820051716,
-34.885391212669326
],
[
138.81336158117088,
-34.904660062887984
],
[
138.81360488755553,
-34.91344927305799
],
[
138.7976351232967,
-34.91457258034658
]
]
]
}
}
]
},
"reveg": {
"type": "Feature",
"properties": {},
"geometry": {
"type": "Polygon",
"coordinates": [
[
[
138.8381214829456,
-34.884585119806616
],
[
138.83812359941683,
-34.86297820218762
],
[
138.84311529146916,
-34.862721911365924
],
[
138.84396922110494,
-34.88095005155782
],
[
138.85344290205637,
-34.880946703445765
],
[
138.85358081628885,
-34.88413143299829
],
[
138.8381214829456,
-34.884585119806616
]
]
]
}
}
}
if __name__ == "__main__":
permute_inputs() | [
"tom.saleeba@gmail.com"
] | tom.saleeba@gmail.com |
14a4b02855d5b08a9a4c3b2eb8ee8e69474fed12 | cf76b7ee525a60661402e053548ebdd279fd64e4 | /Day_3_Assignment/Day_3(Question2).py | 7d198588b044dec40f8fa74db2eab1a7380d3200 | [] | no_license | Atularyan/Letsupgrade-Assignment | 79ffff1b273c2f098676f17cd73e91463b8fe0c9 | 4eb90b172612b07c4102328980a4be34ef2a1376 | refs/heads/main | 2023-02-28T10:26:42.613035 | 2021-02-05T21:40:06 | 2021-02-05T21:40:06 | 330,799,285 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 338 | py | """
Question 2
Define a function swap that should swap two values and print the swapped variables outside the
swap function.
"""
def swap(n):
rev=0
while(n>0):
rem=n%10
rev=(rev*10)+rem
n=n//10
return (rev+n)
n=int(input("Enter the number = "))
res=swap(n)
print("swapped value = ",res) | [
"noreply@github.com"
] | Atularyan.noreply@github.com |
85cac1f5bd1565fd1f4ee1b51dd351185d24ad6b | 0911eef4f271bbe014e3027457574fd9297c6865 | /clustering/attributer.py | 5fa075956952f316fc61dd0a0f9f263c015068e3 | [
"MIT"
] | permissive | bernhardtj/DetectorChar | a7d5dd3a25e88965f6afb8b387b5d4daddb62ed3 | be9fffc0a56c9c8848c67917a839d743a0380ce2 | refs/heads/master | 2020-12-15T03:29:55.456797 | 2020-01-19T23:12:09 | 2020-01-22T02:15:43 | 234,979,506 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,726 | py | """
attributer.py
get some cluster attributes.
"""
from gwpy.frequencyseries import FrequencySeries
from gwpy.plot import Plot
from gwpy.time import to_gps
from gwpy.timeseries import TimeSeriesDict, TimeSeries
from ligotimegps import LIGOTimeGPS
from numpy import stack, median, diff
from cluster import DEFAULT_FILENAME, colors
from util import get_logger, get_path, write_to_disk, better_aa_opts, data_exists, Progress, writing_opts, path2h5file
# initialize logging.
logger, log_path = get_logger(__name__, verbose=True)
print(f'Writing log to: {log_path}')
def threshold_table(start, stop, reading_channels, channels, bands, label='kmeans-labels', filename=DEFAULT_FILENAME,
prefix='.'):
"""
Makes a html table of 'percent increase' from the largest cluster by band and channel.
"""
data = TimeSeriesDict.read(filename, reading_channels + [label], start=to_gps(start), end=to_gps(stop))
labels = data[label]
clusters = list(range(max(labels.value) + 1))
cluster_counts = list(len(labels.value[labels.value == c]) for c in clusters)
largest_cluster = cluster_counts.index(max(cluster_counts))
clusters.remove(largest_cluster)
logger.info(
f'Largest cluster found to be Nº{largest_cluster} ({100 * max(cluster_counts) // len(labels.value)}%). Doing {clusters}.')
cluster_counts.remove(max(cluster_counts))
def amplitude(channel, cluster):
"""return median amplitude for channel in cluster."""
try:
chan = data[channel]
except KeyError:
return 0.0
return median([chan.value[i] for i, c in enumerate(labels.value) if c == cluster])
def threshold(cluster, channel, band) -> str:
f_channel = f'{channel}_BLRMS_{band}.mean'
base = amplitude(f_channel, largest_cluster)
if base != 0.0:
return str(int(100 * (amplitude(f_channel, cluster) - base) / base)) + '%'
else:
return str(amplitude(f_channel, cluster))
range_chan = 'L1:DMT-SNSH_EFFECTIVE_RANGE_MPC.mean'
if range_chan in reading_channels:
base_range = amplitude(range_chan, largest_cluster)
if base_range != 0.0:
snsh = lambda c: 'SNSH: ' + str(int(100 * (amplitude(range_chan, c) - base_range) / base_range)) + '%'
else:
snsh = lambda c: 'SNSH: 0.0'
else:
snsh = lambda c: ''
with Progress('taking thresholds', len(clusters)) as progress:
for i, cluster in enumerate(clusters):
buffer = [[''] + bands]
for channel in channels:
buffer.append([channel] + [progress(threshold, i, cluster, channel, band) for band in bands])
html_table(f'cluster {cluster} ({colors[cluster]}) {snsh(cluster)}',
csv_writer(buffer, get_path(f'{cluster}', 'csv', prefix=prefix)),
get_path(f'{cluster}', 'html', prefix=prefix))
html_table('Index', csv_writer(
[['clusters:']] + [[f'<a href="{cluster}.html">Nº{cluster} ({colors[cluster]})</a>'] for cluster in clusters],
get_path('idx', 'csv', prefix=prefix)), get_path('index', 'html', prefix=prefix))
def representative_spectra(channels, start, stop, rate, label='kmeans-labels', filename=DEFAULT_FILENAME, prefix='.',
downloader=TimeSeriesDict.get, cluster_numbers=None, groups=None, **kwargs):
"""
Make representative spectra for each cluster based on the median psd for minutes in that cluster.
Downloads only the raw minutes in the cluster to save.
"""
if groups is None:
groups = channels
# read the labels from the save file.
labels = TimeSeries.read(filename, label, start=to_gps(start), end=to_gps(stop))
logger.info(f'Read labels {start} to {stop} from {filename}')
if cluster_numbers is None:
clusters = list(range(max(labels.value) + 1))
cluster_counts = list(len(labels.value[labels.value == c]) for c in clusters)
largest_cluster = cluster_counts.index(max(cluster_counts))
clusters.remove(largest_cluster)
logger.info(
f'Largest cluster found to be Nº{largest_cluster} ({100 * max(cluster_counts) // len(labels.value)}%). Doing {clusters}.')
cluster_counts.remove(max(cluster_counts))
else:
clusters = cluster_numbers
cluster_counts = list(len(labels.value[labels.value == c]) for c in clusters)
t, v, d = labels.times, labels.value, diff(labels.value)
pairs = list(zip([t[0]] + list(t[:-1][d != 0]), list(t[1:][d != 0]) + [t[-1]]))
values = list(v[:-1][d != 0]) + [v[-1]]
assert len(pairs) == len(values) # need to include start-| and |-end
# l|r l|r l|r l|r
# l,r l,r l,r l,r
# l r,l r,l r,l r # zip(start + l[1:], r[:-1] + stop)
print(pairs)
for pair in pairs:
print(int(pair[1].value) - int(pair[0].value))
print(values)
# use h5py to make a mutable object pointing to a file on disk.
save_file, filename = path2h5file(get_path(f'spectra-cache {start}', 'hdf5', prefix=prefix))
logger.debug(f'Initiated hdf5 stream to {filename}')
logger.info(f'Patching {filename}...')
for i, (dl_start, end) in enumerate(pairs):
if values[i] in clusters:
if not data_exists(channels, to_gps(end).seconds, save_file):
logger.debug(f'Downloading Nº{values[i]} from {dl_start} to {end}...')
try:
dl = downloader(channels, start=to_gps(dl_start) - LIGOTimeGPS(60),
end=to_gps(end) + LIGOTimeGPS(seconds=1))
out = TimeSeriesDict()
for n in dl:
out[n] = dl[n].resample(**better_aa_opts(dl[n], rate))
write_to_disk(out, to_gps(dl_start).seconds, save_file)
except RuntimeError: # Cannot find all relevant data on any known server
logger.warning(f"SKIPPING Nº{values[i]} from {dl_start} to {end} !!")
logger.info('Reading data...')
data = TimeSeriesDict.read(save_file, channels)
logger.info('Starting PSD generation...')
f = data[channels[0]].crop(start=to_gps(data[channels[0]].times[-1]) - LIGOTimeGPS(60),
end=to_gps(data[channels[0]].times[-1])).psd().frequencies
d = (to_gps(labels.times[-1]).seconds - to_gps(labels.times[1]).seconds)
for i, cluster in enumerate(clusters):
try:
psds = {channel: FrequencySeries.read(filename, f'{cluster}-{channel}') for channel in channels}
logger.info(f'Loaded Nº{cluster}.')
except KeyError:
logger.info(f'Doing Nº{cluster} ({100 * cluster_counts[i] / len(labels.value):.2f}% of data)...')
with Progress(f'psd Nº{cluster} ({i + 1}/{len(clusters)})', len(channels) * d) as progress:
psds = {channel: FrequencySeries(median(stack([progress(data[channel].crop,
pc * d + (to_gps(time).seconds - to_gps(
labels.times[1]).seconds),
start=to_gps(time) - LIGOTimeGPS(60),
end=to_gps(time)).psd().value
for c, time in zip(labels.value, labels.times) if
c == cluster]),
axis=0), frequencies=f, name=f'{cluster}-{channel}')
for pc, channel in enumerate(channels)}
for name in psds.keys():
psds[name].write(filename, **writing_opts)
# plotting is slow, so show a nice progress bar.
logger.debug('Initiating plotting routine...')
with Progress('plotting', len(groups)) as progress:
for p, (group, lbls, title) in enumerate(groups):
# plot the group in one figure.
plt = Plot(*(psds[channel] for channel in group), separate=False, sharex=True, zorder=1, **kwargs)
# plt.gca().set_xlim((30,60))
# modify the figure as a whole.
# plt.add_segments_bar(dq, label='')
plt.gca().set_xscale('log')
plt.gca().set_yscale('log')
plt.suptitle(title)
plt.legend(lbls)
# save to png.
progress(plt.save, p, get_path(f'{cluster}-{title}', 'png', prefix=f'{prefix}/{cluster}'))
def csv_writer(buffer, filename, delimiter=','):
with open(filename, "w+") as f:
f.writelines([delimiter.join(line) + '\n' for line in buffer])
return filename
def html_table(title, in_filename, out_filename):
filein = open(in_filename, "r")
fileout = open(out_filename, "w+")
data = filein.readlines()
table = f"<!doctype html><html lang='en'><head><title>{title}</title></head><body><h1>{title}</h1><table>"
# Create the table's column headers
header = data[0].split(",")
table += " <tr>\n"
for column in header:
table += " <th>{0}</th>\n".format(column.strip())
table += " </tr>\n"
# Create the table's row data
for line in data[1:]:
row = line.split(",")
table += " <tr>\n"
for column in row:
table += " <td>{0}</td>\n".format(column.strip())
table += " </tr>\n"
table += "</table></body></html>"
fileout.writelines(table)
fileout.close()
filein.close()
| [
"31599460+bernhardtj@users.noreply.github.com"
] | 31599460+bernhardtj@users.noreply.github.com |
4e8773dfd7c43372b1e0e2487c9908b3ce02e2ec | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02695/s928241641.py | 8a3650d94b02032a7e04ac7856e18f47bbcccc2d | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 888 | py | import copy
def gen_mono_inc_seqs(N, M, cur_list, cur_len):
if cur_len == N:
return cur_list
result = []
for l in cur_list:
last_val = l[len(l)-1]
for i in range(last_val, M+1):
tmp = copy.copy(l)
tmp.append(i)
result.append(tmp)
return gen_mono_inc_seqs(N, M, result, cur_len+1)
def mono_inc_seqs(N, M):
l = [ [i] for i in range(1, M+1) ]
return gen_mono_inc_seqs(N, M, l, 1)
N, M, Q = map(int, input().split())
a, b, c, d = [0] * Q, [0] * Q, [0] * Q, [0] * Q
for i in range(Q):
a_, b_, c_, d_ = map(int, input().split())
a[i], b[i], c[i], d[i] = a_, b_, c_, d_
max_result = -1
seqs = mono_inc_seqs(N, M)
for seq in seqs:
tmp = 0
for i in range(Q):
if seq[b[i]-1] - seq[a[i]-1] == c[i]:
tmp += d[i]
max_result = max(max_result, tmp)
print(max_result)
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
d38c140e26cd68592ad73ae95fa562711473f5a4 | edd025ddd5ff9ff8830bd3dda598715a935d6447 | /openclnoise/addcolor.py | 00101a4dcefc11b4800a17981eaea2d14b305755 | [
"MIT"
] | permissive | antiface/OpenCLNoise | f142fb3fd5cc559803e3be8ecfa89ad63ef3c503 | 31b5ff6739340ce0a9a6a57f6012d71d6a27614f | refs/heads/master | 2020-12-26T04:48:11.260071 | 2013-02-05T21:36:25 | 2013-02-05T21:36:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 590 | py | from basefilter import *
class AddColor(BaseFilter):
_filename = "addcolor.cl"
def __init__(self, color=(0.25,0.25,0.25,0)):
BaseFilter.__init__(self)
self.color = color
def get_name(self):
return "AddColor"
def get_number_of_inputs(self):
return 1
@property
def color(self):
return self._defines['COLOR']
@color.setter
def color(self,value):
self._defines['COLOR'] = value
self.on_code_dirty(self)
def __repr__(self):
return "AddColor(color={0})".format(self.color)
| [
"eswanson@alloscomp.com"
] | eswanson@alloscomp.com |
cde0caf06127a2d54753d2be9a1c321b2d3d73cc | f3abfa06395375bf88245ca172bb0af64b19adf3 | /dcivil/migrations/0003_auto_20200202_1316.py | 9392179bb018eac6d08ab42b599ba6462915fb42 | [] | no_license | loyolabechara/intelicity_be | 5394e356132f2532ad5dbf295a49fc207d1d8f86 | 81dc055ba1e686b2838d7294c641561886473b2c | refs/heads/master | 2020-12-05T19:10:18.721371 | 2020-02-29T03:28:31 | 2020-02-29T03:28:31 | 232,219,166 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,235 | py | # Generated by Django 3.0 on 2020-02-02 16:16
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('dcivil', '0002_auto_20200202_1257'),
]
operations = [
migrations.AddField(
model_name='dirigente',
name='celular',
field=models.CharField(default=1, max_length=11),
preserve_default=False,
),
migrations.AddField(
model_name='responsavel',
name='celular',
field=models.CharField(default='', max_length=11),
preserve_default=False,
),
migrations.RemoveField(
model_name='ponto_apoio',
name='dirigente',
),
migrations.AddField(
model_name='ponto_apoio',
name='dirigente',
field=models.ManyToManyField(blank=True, to='dcivil.Dirigente'),
),
migrations.RemoveField(
model_name='ponto_apoio',
name='responsavel',
),
migrations.AddField(
model_name='ponto_apoio',
name='responsavel',
field=models.ManyToManyField(blank=True, to='dcivil.Responsavel'),
),
]
| [
"loyola@jlb.net.br"
] | loyola@jlb.net.br |
efcacf5019e593a4bf64f6c3a04e37e1c9331b44 | c6588d0e7d361dba019743cacfde83f65fbf26b8 | /x12/5030/435005030.py | a57f914a95dac66f74356e3869e7f5bc1cf84657 | [] | no_license | djfurman/bots-grammars | 64d3b3a3cd3bd95d625a82204c3d89db6934947c | a88a02355aa4ca900a7b527b16a1b0f78fbc220c | refs/heads/master | 2021-01-12T06:59:53.488468 | 2016-12-19T18:37:57 | 2016-12-19T18:37:57 | 76,887,027 | 0 | 0 | null | 2016-12-19T18:30:43 | 2016-12-19T18:30:43 | null | UTF-8 | Python | false | false | 879 | py | from bots.botsconfig import *
from records005030 import recorddefs
syntax = {
'version' : '00403', #version of ISA to send
'functionalgroup' : 'RK',
}
structure = [
{ID: 'ST', MIN: 1, MAX: 1, LEVEL: [
{ID: 'SID', MIN: 1, MAX: 9999, LEVEL: [
{ID: 'N9', MIN: 0, MAX: 30},
{ID: 'DTM', MIN: 0, MAX: 10},
{ID: 'LQ', MIN: 0, MAX: 100, LEVEL: [
{ID: 'MSG', MIN: 0, MAX: 100},
]},
{ID: 'LX', MIN: 0, MAX: 4, LEVEL: [
{ID: 'N9', MIN: 0, MAX: 50},
{ID: 'LH3', MIN: 0, MAX: 100},
{ID: 'LH2', MIN: 0, MAX: 8},
{ID: 'LFH', MIN: 0, MAX: 20},
{ID: 'LEP', MIN: 0, MAX: 3},
{ID: 'LH4', MIN: 0, MAX: 4},
{ID: 'CRC', MIN: 0, MAX: 5},
]},
]},
{ID: 'SE', MIN: 1, MAX: 1},
]}
]
| [
"jason.capriotti@gmail.com"
] | jason.capriotti@gmail.com |
711e8f71814bfa10e937a9521b55bec2afaaa889 | adf001bee2e322d9583c60ae42c2d3d5e9cfdd3d | /04-clean-evaluation-set.py | f0d6b27108dc013149ee02965b5112692f83fbf5 | [] | no_license | katerega/entity-expansion | 92e42df687194641cfd9139e7d07f89120ecb8b0 | 56db32ea4baac3a7ea44e872a545e353b7e7246d | refs/heads/master | 2022-01-22T08:52:44.228988 | 2019-07-18T18:56:50 | 2019-07-18T18:56:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,103 | py | #!/usr/bin/env python
# coding: utf-8
# In[ ]:
import os
import json
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("dataset")
args = parser.parse_args()
dataset = args.dataset
cur_dir = os.path.join(os.getcwd())
# In[ ]:
gold_set = {}
with open('data/gold_set.json', 'r', encoding='utf-8') as fin:
gold_set = json.load(fin)
# In[ ]:
entity2eid = {}
with open('data/{}/intermediate/final_entities2eid.json'.format(dataset), 'r') as fin:
entity2eid = json.load(fin)
good_entities = list(entity2eid.keys())
# In[ ]:
new_gold_set = {}
cnt = 0
for setname in gold_set:
tmp = gold_set[setname]['relevant_entities_all'].keys()
res = [x.lower().replace(" ", "_") for x in tmp]
out = [x for x in res if x in good_entities]
if len(out) >= 7:
cnt += 1
new_gold_set[setname] = out
print(len(new_gold_set))
# In[ ]:
new_list = []
with open('data/50sets.txt','r') as fin:
lines = fin.readlines()
cnt = 0
for line in lines:
if line.strip('\n').strip() in new_gold_set:
new_list.append(line.strip('\n').strip())
print("{}/{}".format(len(new_list),len(lines)))
# In[ ]:
for setname in new_list:
ents = new_gold_set[setname]
if not os.path.exists('data/eval/filter_sets/{}'.format(dataset)):
os.makedirs('data/eval/filter_sets/{}'.format(dataset))
with open('data/eval/filter_sets/{}/{}.set'.format(dataset, setname), 'w+') as fout:
for ent in ents:
fout.write(ent+"\n")
# In[ ]:
# set names of eval sets that could complement existing sets
renewable_lists = ['C020', 'C023', 'C027', 'C058', 'C083', 'C089', 'C013', 'C062']
renewable_names = [gold_set[x]['title'] for x in renewable_lists]
corres_names = ['fruits', 'colors', 'elements', 'animals', 'body', 'instruments', 'states', 'transportation']
# In[ ]:
for idx in range(len(renewable_lists)):
with open('data/eval/set/merge/{}.set'.format(corres_names[idx])) as fin:
tmp = list(gold_set[renewable_lists[idx]]['relevant_entities_all'].keys())
x = [i.lower() for i in tmp]
lines = fin.readlines()
y = []
for line in lines:
y.append(line.strip().lower().replace(" ", "_"))
outs = [e for e in list(set(x).union(y)) if e in good_entities]
if len(outs) >= 7:
with open('data/eval/filter_sets/{}/{}.set'.format(dataset,renewable_lists[idx]), 'w+') as fout:
for ent in outs:
fout.write(ent+"\n")
# In[ ]:
for setname in ['grains', 'family_members', 'vegetables', 'days']:
with open('data/eval/set/standalone/{}.set'.format(setname)) as fin:
lines = fin.readlines()
tmp = []
for line in lines:
tmp.append(line.strip().lower().replace(" ", "_"))
outs = [e for e in tmp if e in good_entities]
outss = list(set(outs))
if len(outss) >= 7:
with open('data/eval/filter_sets/{}/{}.set'.format(dataset,setname), 'w+') as fout:
for ent in outss:
fout.write(ent+"\n")
| [
"pxyu@cs.umass.edu"
] | pxyu@cs.umass.edu |
dafdf6e9e4f6f88b4153c18807b15bc3e4ee67ce | e71ef588cd26c6342d7cd75a28f71cc3e74eae2f | /portal/dashboard/admin.py | 3330f09c9532d72d756bd5749c60872beff5cce8 | [] | no_license | muska-choudhary/newProject | dfb4724348be983d2834f650c3fd1cbdb7176d0f | 4dd6e2d43c6d8037ba9feb4b7cd9d73d216aa1cf | refs/heads/main | 2023-07-17T07:41:32.631742 | 2021-08-23T16:15:04 | 2021-08-23T16:15:04 | 398,965,920 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 114 | py | from django.contrib import admin
from . models import *
# Register your models here.
admin.site.register(Notes) | [
"noreply@github.com"
] | muska-choudhary.noreply@github.com |
f9dfaccdf5e756e0032eeef3b25c1ae84e611a85 | b34e62032a142aca2f1180dd6a683c51f5f723e9 | /src/work_time.py | b378ac7588972d4afcfc07014d8fb13ba1ed6475 | [] | no_license | scoutiii/SoloScraper | 3de821fa8f9a11f5327f9389a4c21f69fd89e77a | b486ba674f9518a794f6108009bb8faaf68a594e | refs/heads/master | 2022-11-17T22:51:08.738112 | 2020-07-15T18:57:02 | 2020-07-15T18:57:02 | 261,906,105 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,229 | py | import csv
import sys
import re
from datetime import datetime
from tqdm import tqdm
# Class which has all characteristics of a message
class message_info:
def __init__(self, message):
self.msg_full = re.sub('[^a-zA-Z0-9 \`\~\!\@\#\$\%\^\&\*\(\)\_\+\-\=\[\{\]\}\\\|\;\:\'\"\,\<\.\>\/\?\\n]*', "",
message)
reg_res = re.match("(.*) \((.*)\) - (.*)\\n(.*)", self.msg_full)
if reg_res is not None:
self.name = reg_res.group(1)
self.title = reg_res.group(2).lower()
self.date = datetime.strptime(reg_res.group(3), "%m/%d/%Y %H:%M")
self.msg = reg_res.group(4)
self.type = "Other"
self.sub_type = "Other"
self.time = "Non"
else:
self.name = "NA"
self.title = "NA"
self.date = "NA"
self.msg = "NA"
self.type = "NA"
self.sub_type = "NA"
self.time = "NA"
# Class which classifies messages, and determins timings
class message_timings:
# Lists of notable titles
props_titles = ["proposalist", "junior proposalist", "senior proposalist"]
QA_titles = ["proposal qa", "super admin"]
# Takes a message and its prior to determine what type it is
def classify_message(self, previous, msg):
# Finds time type
if msg.msg.find("URGENT CHECKED") != -1:
msg.time = "Real"
elif previous.time == "NA":
msg.time = "Standard"
else:
msg.time = previous.time
# Checks for easy to catch note types
# END NOTES:
# TYPE: end, archive
if msg.msg.find("Customer Archived") != -1:
msg.type = "End"
msg.sub_type = "Archive"
msg.time = "Standard"
return
# TYPE: end, QA
if msg.msg.find("Proposal(s) Completed and needs QA") != -1:
msg.type = "End"
msg.sub_type = "Archive"
msg.time = "Standard"
return
# TYPE: end, sent
if msg.msg.find("New Solar Proposal") != -1:
msg.type = "End"
msg.sub_type = "Sent"
msg.time = "Standard"
return
# REJECTION NOTES:
# TYPE: rejection, rejection
if msg.msg.find("Proposal Rejected for") != -1:
msg.type = "Rejection"
msg.sub_type = "Rejected"
return
# RESPONSE NOTES:
# TYPE: response, prop Response
if msg.title in message_timings.props_titles:
msg.type = "Response"
msg.sub_type = "Prop Response"
return
# TYPE: response, QA Response
if msg.title in message_timings.QA_titles:
msg.type = "Response"
msg.sub_type = "QA Response"
return
# REQUEST NOTES:
# TYPE: request, create
if msg.msg.find("New customer created successfully") != -1:
msg.type = "Request"
msg.sub_type = "Create"
return
# TYPE (DEFAULT): request, other
msg.type = "Request"
msg.sub_type = "Other"
return
# Goes through the series of classified messages, and determines if there are any work time events
def get_entries(self):
entries = []
start = None
end = None
target = None
i = 0
while i < len(self.messages):
msg = self.messages[i]
if start is None:
if msg.type == "Request":
start = msg
target = "Response"
elif msg.type == "Response":
start = msg
target = "End"
elif msg.type == "Rejection":
start = msg
target = "End"
else:
if msg.type == target:
end = msg
entries.append(self.__create_entry__(start, end))
if start.type == "Response":
start = end = target = None
continue
start = end = target = None
i += 1
return (entries)
# Takes a start and end and creates a work time event
def __create_entry__(self, start, end):
if start.type == "Request" and end.type == "Response":
type = "Queue Time"
elif start.type == "Response" and end.type == "End":
type = "Prop Work Time"
elif start.type == "Rejection" and end.type == "End":
type = "Rejection Work Time"
else:
type = "Other"
diff = end.date - start.date
work_time = divmod(diff.total_seconds(), 60)[0]
time = start.time
name = end.name
title = end.title
id = self.customer_id
date = end.date
entry = {"Type": type, "Name": name, "Title": title, "Work_Time": work_time,
"Time_Type": time, "Date": date, "Customer_Id": id}
return (entry)
# Takes a list series of messages and classifies them
def __init__(self, messages, customer_id):
self.messages = []
self.customer_id = customer_id
for msg in messages:
self.messages.append(message_info(msg))
for i in range(len(self.messages)):
if i == 0:
prev = message_info("")
else:
prev = self.messages[i - 1]
self.classify_message(prev, self.messages[i])
# Takes a customer id and returns a list of every message
def get_messages(driver, customer_id):
driver.get('https://phx.gosolo.io/customer/' + str(customer_id))
msg_elmt = driver.find_elements_by_xpath('//*[@id="sideNotes"]/div')
messages = []
for msg in msg_elmt:
messages.append(msg.text)
return (messages)
# Is called first, will return a list of entries to put into the csv
def create_entries(driver, customer_id):
# Gets notes section for the cutomer
messages = get_messages(driver, customer_id)
# Processes messages to get timings
timings = message_timings(messages, customer_id)
entries = timings.get_entries()
return (entries)
# run functions goes through customers and creates a csv with time worked info
def run(driver, file_in, file_out):
print("\nStarting work_time routine\n")
sys.stdout.flush()
driver.minimize_window()
# opens files and csv files
f_in = open(file_in, "r", encoding="utf8")
f_ut = open(file_out, "w")
csv_in = csv.DictReader(f_in)
csv_ut = csv.DictWriter(f_ut, ["Type", "Name", "Title", "Work_Time", "Time_Type", "Date", "Customer_Id"])
csv_ut.writeheader()
# Gets all ids first, so that we can have a progress bar
customer_ids = set()
for line in csv_in:
customer_ids.add(line["customer_id"])
# loops through all customer ids, and writes the csv entries
customer_ids = list(customer_ids)
customer_ids.sort()
for customer_id in tqdm(customer_ids):
entries = create_entries(driver, customer_id)
csv_ut.writerows(entries)
f_in.close()
f_ut.close()
print("\nRoutine Complete\n")
| [
"33967844+scoutiii@users.noreply.github.com"
] | 33967844+scoutiii@users.noreply.github.com |
cb472c96f064f2152d5691a9dcb04ca57b0bcebe | 990e3a0920f08727ddc521bfbf62cfc5547a013c | /remediation-functions/rds_instance/rdsinstance_performanceinsights.py | 7379513042347868ee77eee2eb6778e8a949ddc5 | [
"MIT"
] | permissive | xeaser/aws-auto-remediation | 73916390063faa7f6d81d8e33f6ae2632621a2ff | 6847e2756111f16bafb34529e07a1c383f99bebf | refs/heads/master | 2021-03-21T12:41:40.884595 | 2020-03-12T14:18:30 | 2020-03-12T14:18:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,538 | py | '''
RDS instance performance insights
'''
from botocore.exceptions import ClientError
def run_remediation(rds, RDSInstanceName):
print("Executing RDS Instance remediation")
performance_insights=''
try:
response = rds.describe_db_instances(DBInstanceIdentifier=RDSInstanceName)['DBInstances']
DBInstanceClass=response[0]['DBInstanceClass']
performance_insights=response[0]['PerformanceInsightsEnabled']
except ClientError as e:
responseCode = 400
output = "Unexpected error: " + str(e)
except Exception as e:
responseCode = 400
output = "Unexpected error: " + str(e)
if DBInstanceClass not in ['db.t2.micro', 'db.t2.small', 'db.t3.micro', 'db.t3.small']:
if not performance_insights:
while response[0]['DBInstanceStatus'] not in ['available', 'stopped']:
try:
response = rds.describe_db_instances(DBInstanceIdentifier=RDSInstanceName)['DBInstances']
except ClientError as e:
responseCode = 400
output = "Unexpected error: " + str(e)
except Exception as e:
responseCode = 400
output = "Unexpected error: " + str(e)
try:
result = rds.modify_db_instance(
DBInstanceIdentifier=RDSInstanceName,
ApplyImmediately=True,
EnablePerformanceInsights =True
)
responseCode = result['ResponseMetadata']['HTTPStatusCode']
if responseCode >= 400:
output = "Unexpected error: %s \n" % str(result)
else:
output = "Performance insights enabled for rds-instance : %s \n" % RDSInstanceName
except ClientError as e:
responseCode = 400
output = "Unexpected error: " + str(e)
print(output)
except Exception as e:
responseCode = 400
output = "Unexpected error: " + str(e)
print(output)
else:
responseCode=200
output='Performance insights already enabled for rds-instance : '+RDSInstanceName
print(output)
else:
responseCode=200
output='Performance insights is not supported for rds-instance : '+RDSInstanceName
print(output)
print(str(responseCode)+'-'+output)
return responseCode,output | [
"ankitrao7739@gmail.com"
] | ankitrao7739@gmail.com |
dc9a696d53a940224de5525365420e23e1c82e96 | 5077fc5d82caa3b3ed5ce0e062bfe75cd4037ebc | /forever_thinking/bilibili获取封面.py | 260f9926d90e3490e4b217ca8bb4cc9d9081eb75 | [] | no_license | asswecanfat/git_place | ee10e1057d8307d3c72f57291b5bcb6d0579017e | 244ff0de11ffbe1aa9f20308e43af39486507f6f | refs/heads/master | 2021-07-18T23:06:14.324164 | 2020-09-02T12:15:27 | 2020-09-02T12:15:27 | 210,833,462 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 2,107 | py | import requests
from bs4 import BeautifulSoup
from attr import attrib, attrs
import json
import re
import random
import os
@attrs
class BiliBili(object):
file_path = attrib(default=r'C:\Users\10248\Desktop\1.txt')
pic_path = attrib(default=r'C:\Users\10248\Desktop')
source_wab_url = attrib(default='https://search.bilibili.com/all?keyword=')
headers = attrib(default={'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) '
'AppleWebKit/537.36 (KHTML, like Gecko) '
'Chrome/75.0.3770.142 Safari/537.36'})
def update_url(self, av_num):
self.source_wab_url = '{}{}{}'.format(self.source_wab_url, av_num, '&from_source=banner_search')
def get_url_data(self, url):
return requests.get(url, headers=self.headers) # reponse
def download_cover(self):
reponse = self.get_url_data(self.source_wab_url)
self.source_wab_url.__init__()
# self.test_save_data(reponse)
pic_url = '{}{}'.format(' http:', self.deal_web_data(reponse))
final_pic_path = r'{}\{}'.format(self.pic_path, str(random.randint(0, 1000)) + '.jpg')
while os.path.exists(final_pic_path):
final_pic_path = r'{}\{}'.format(self.pic_path, str(random.randint(0, 1000)) + '.jpg')
with open(final_pic_path, 'wb') as f:
f.write(self.get_url_data(pic_url).content)
print('封面获取成功!')
def deal_web_data(self, reponse):
soup = BeautifulSoup(reponse.text, 'lxml')
point = soup.find_all('script')
# print(point[6])
real_data = re.split(r'=|;\(', point[6].text)[1]
# print(real_data)
now = json.loads(real_data)
# print(now['allData']['video'][0]['pic'])
return now['allData']['video'][0]['pic']
def test_save_data(self, reponse):
with open(self.file_path, 'wb') as f:
f.write(reponse.content)
if __name__ == '__main__':
bi = BiliBili()
av_n = input('请输入av号:')
bi.update_url(av_n)
bi.download_cover()
| [
"1024847824@qq.com"
] | 1024847824@qq.com |
b0fbdd67571f55ad9c0ff76b623e2117b8250698 | 0e477b7f21ff3e220b0fe21f5547ac3b4fb71cdd | /Run_GccCoverage_ForSourceFiles.py | 68554d4a8d619c87979fed5cc528e78eb1ed85d8 | [] | no_license | Fasten90/GccCoverageParser | 2c2cb8af41118ab59d960ae13446630565ba1da9 | e4cad4dbc012594e7dd5e9dc9cfa66cf9f6ab895 | refs/heads/master | 2022-03-21T18:07:22.205280 | 2022-02-22T00:02:33 | 2022-02-22T00:02:33 | 218,975,436 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,478 | py | import glob
import subprocess
import os
import time
from enum import Enum
import re
import argparse
# TODOs
# - UnitTest - system executing test
# Note: Similar command shall be called all gcno file
# gcov --all-blocks --function-summaries --branch-probabilities --branch-counts --unconditional-branches CMakeFiles/FastenHomeAut.dir/Src/main.c.gcno
__COMMAND = "gcov"
__COMMAND_ARG = "--all-blocks --function-summaries --branch-probabilities --branch-counts --unconditional-branches"
source_list = []
cwd_is_changed = False
cwd = None
def set_workdir(dir):
print("Set working directory to: {}\n"
" Absolute path:".format(
dir,
os.path.abspath(os.path.normpath(dir))))
os.chdir(dir)
source_list = []
def find_sources():
# Source list
# TODO: Move
global source_list
source_list += glob.glob("Src/*.c")
source_list += glob.glob("Src/**/*.c")
source_list += glob.glob("Src/**/**/*.c")
#source_list += Path('Src').glob('**/*.c')
# TODO: Shall we append another?
print("Source list:")
prev_dir = ""
for src_item in source_list:
src_item = src_item.replace("\\\\", "\\").replace("/", "\\")
[dir, name] = src_item.rsplit("\\", 1)
if dir != prev_dir:
prev_dir = dir
str_indent = " " * src_item.count("\\")
print(str_indent + "[" + dir + "]")
str_indent = " " * (src_item.count("\\") + 1)
print(str_indent + "- " + name)
def exec_gcov_on_source(files_root):
for source in source_list:
# Call for all source file
#print(source)
# TODO: Argument
source = source.replace("\\", "/")
gcno_file_path = os.path.join(files_root, source + ".gcno")
#print("file: '{}'".format(gcno_file_path))
if os.path.exists(gcno_file_path):
# Call
print("Command: {} {} {}".format(__COMMAND, __COMMAND_ARG, gcno_file_path))
return_code = subprocess.call([__COMMAND , __COMMAND_ARG, gcno_file_path], shell=True)
# Debug code
#print(" Return code: {}".format(return_code))
else:
# Do not call
print("'{}' has no gcno file".format(source))
def wait():
print("Wait...")
for i in range(5):
print(".")
time.sleep(1)
gcov_file_list = None
# TODO: Handle another format of lines
"""
function CircularBuffer_IsFull called 302 returned 100% blocks executed 100%
call 0 returned 1
call 0 never executed
unconditional 0 taken 1
%%%%%: 144-block 0
branch 0 taken 0 (fallthrough)
16: 128-block 0
"""
# Example
"""
513: 871: if ((str == NULL) || (value == NULL))
-: 872: {
#####: 873: return false;
"""
class gcov_info(Enum):
ERROR = 0
UNKNOWN = 1
UNCOVERED = 2
COVERED = 3
def get_line_data(line):
try:
[line_info, line_number, line_content] = line.split(":", 2)
except IndexError:
print("[ERROR]: Cannot parsed line: '{}'".format(line))
return (gcov_info.ERROR, None, None)
line_info = line_info.strip()
line_number = int(line_number.strip())
if line_info.isdigit():
return (gcov_info.COVERED, int(line_info), line_number)
elif "-" == line_info:
return (gcov_info.UNKNOWN, None, line_number)
elif "#####" == line_info:
return (gcov_info.UNCOVERED, None, line_number)
else:
print("[ERROR]: gcov info could not recognize: '{}' at line {}.".format(line_info, line_number))
return (gcov_info.ERROR, None, line_number)
# Function detection
# Limitations:
# more line declarated functions
# MACRO FUNCTION
# Example
# https://regex101.com/r/PgMQnh/2
"""
void function1(void)
void function2(void) {
void function3(int blabla)
void function4(int blabla) {
int function5(void)
int * function6(int bla1, int bla2)
INLINE_FUNCTION void function7(int blabla)
void function8 ( int * bla )
void function9 ( uint8_t * ehh, Type_ omg )
bool BUTTON_GetButtonState(ButtonType_t button)
"""
# Examples
"""
/* Noooooooooooooo */
/* Do not accept because the empty () */
void function()
IO_Output_SetStatus(IO_LED_Blue, IO_Output_Cmd_SetToggle);
UNUSED_ARGUMENT(source);
if (UnitTest_InvalidCnt)
state = (HAL_GPIO_ReadPin(BUTTON_USER_GPIO_PORT, BUTTON_USER_GPIO_PIN) == GPIO_PIN_SET) ? true : false;
else if (str[1] == 80)
else if (circBuff->readCnt > circBuff->writeCnt)
else if (!StrCmp("settime", argv[1]) && argc == 3)
else if (Logic_Display_ActualState < AppType_Count)
else if (TaskList[i].isRequestScheduling)
/* Commented line */
* Test data in end of buffer (overflow!)
"""
regex_function_detect = re.compile(r"^ *([\w]+[\w\* ]*) (?P<function_name>[^\(\=\? ]+) *\( *[^\)\=\>\<.]+ *\) *\{*$")
gcov_info_list = {}
def parse_gcov_file(file_path):
global gcov_info_list
with open(file_path, 'r') as file:
print("Start gcov parseing: '{}'".format(file_path))
file_name = file_path.split(".gcov")[0]
gcov_info_list[file_name] = {}
file_content = file.readlines()
prev_func_exists = False
prev_func_name = ""
for i, line in enumerate(file_content):
# Is detect function?
try:
line_try_parse_for_function = line.split(":", 2)[2]
except IndexError:
print("[ERROR]: Cannot parsed line: '{}' at line {}".format(line, i))
continue
actual_line_is_function_decl = regex_function_detect.match(line_try_parse_for_function)
if actual_line_is_function_decl:
# New function declaration, break the previous!
function_name = actual_line_is_function_decl.group("function_name")
# Check line data
(line_info, line_data, line_number) = get_line_data(line)
# line data is line number
if not (line_info == gcov_info.COVERED or line_info == gcov_info.UNKNOWN or line_info.UNCOVERED):
print("[ERROR]: Cannot parsed line: '{}' at line {}".format(line, i))
continue
function_is_covered = True if line_info == gcov_info.COVERED else False
# TODO: line_data not used
gcov_info_list[file_name][function_name] = {
"covered_function": function_is_covered,
"function_decl_line": line_number,
"coverage": []
}
# TODO: Check
if prev_func_exists:
# New started
pass
else:
# First started
prev_func_exists = True
pass
print("Started new function declaration: '{}' at line '{}'".format(function_name, i+1))
prev_func_name = function_name
else:
# Not Function declaration line, so branch or not necessary code parts
if prev_func_exists:
# Important, check
# Check line
(line_info, line_data, line_number) = get_line_data(line)
if line_info == gcov_info.COVERED or gcov_info.UNCOVERED:
# Save information
branch_is_covered = True if line_info == gcov_info.COVERED else False
# TODO: line_data not used
gcov_info_list[file_name][prev_func_name]['coverage'].append((line_number, branch_is_covered))
else:
print("[ERROR]: Unknown status of line: '{}' at line {}".format(line, i))
else:
# not in function, dont care, go out
pass
def check_gcov_files():
print("----------------------------------------")
print("Start gcov parseing...")
print()
global gcov_file_list
gcov_file_list = glob.glob("*.gcov")
# Check all gcovs
for gcov_file in gcov_file_list:
print(gcov_file)
parse_gcov_file(gcov_file)
def print_gcov_results(export_file_path="GccCoverage.txt"):
# Print gcov result
export_file_path = os.path.abspath(export_file_path)
print("Export file to '{}'".format(export_file_path))
gcov_export_file = open(export_file_path, "w+")
def gcov_print(str):
print(str)
gcov_export_file.write(str + "\n")
for gcov_file in gcov_info_list:
# Functions
gcov_print("File: {}".format(gcov_file))
for function in gcov_info_list[gcov_file]:
gcov_print(" Function: {} at line {}".format(
function,
gcov_info_list[gcov_file][function]["function_decl_line"]))
# Could print all dictionary, but not necessary, if the function has not covered
if gcov_info_list[gcov_file][function]["covered_function"]:
gcov_print(" " + "Tested")
for branch_item in gcov_info_list[gcov_file][function]["coverage"]:
gcov_print(" " + str(branch_item[0]) + ": " + str(branch_item[1]))
else:
gcov_print(" " + "Not tested")
gcov_export_file.close()
def run_gcov_task(source_root_dir=".",
gcno_files_root=".",
gcov_file_root=".",
export_file_path="GccCoverage.txt"):
# Calculate from actual dir
source_root_dir = os.path.abspath(source_root_dir)
gcno_files_root = os.path.abspath(gcno_files_root)
gcov_file_root = os.path.abspath(gcov_file_root)
export_file_path = os.path.abspath(export_file_path)
set_workdir(source_root_dir)
find_sources()
set_workdir(gcov_file_root)
exec_gcov_on_source(gcno_files_root)
wait()
set_workdir(gcov_file_root)
check_gcov_files()
print_gcov_results(export_file_path)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# E.g. 'CMakeFiles\FastenHomeAut.dir'
parser.add_argument("--source-root-dir",
help="Directory of sources",
default=".")
parser.add_argument("--gcno-files-root",
help="where will have been generated the gcno files\n" \
"E.g. Out\CMakeDir\CMakeFiles\Project.dir",
default=".")
parser.add_argument("--gcov-files-root",
help="where will be generated the gcov files\n" \
"E.g. Out\CMakeDir",
default=".")
parser.add_argument("--export-file-path",
help="Result export file path",
default="GccCoverage.txt")
args = parser.parse_args()
print("Run GccCoveragePraser")
run_gcov_task(source_root_dir=args.source_root_dir,
gcno_files_root=args.gcno_files_root,
gcov_file_root=args.gcov_files_root,
export_file_path=args.export_file_path)
| [
"fasten90@gmail.com"
] | fasten90@gmail.com |
afca61d5d8ba52a219c2ad7064268eca41cd96c6 | 495ce92166457a6d5818d786a6a3303d3280fcd0 | /src/registration/urls.py | ac889db2d836112cd2cb69c66483cb85276e9187 | [] | no_license | patrickhusi/django-inspectional-registration | 616e7d44716c41b09b32c30415a1cf86d3b7324f | c0aee3ddc4f1a5e870643a605d8a9575b3a7520f | refs/heads/master | 2020-12-25T22:57:45.123082 | 2015-08-01T00:19:32 | 2015-08-01T00:19:32 | 39,487,644 | 0 | 0 | null | 2015-07-22T05:35:21 | 2015-07-22T05:35:21 | null | UTF-8 | Python | false | false | 2,964 | py | # coding=utf-8
"""
URLconf for django-inspectional-registration
"""
__author__ = 'Alisue <lambdalisue@hashnote.net>'
from registration.compat import url
from registration.compat import patterns
from registration.views import RegistrationView
from registration.views import RegistrationClosedView
from registration.views import RegistrationCompleteView
from registration.views import ActivationView
from registration.views import ActivationCompleteView
urlpatterns = patterns('',
url(r'^activate/complete/$', ActivationCompleteView.as_view(),
name='registration_activation_complete'),
url(r'^activate/(?P<activation_key>\w+)/$', ActivationView.as_view(),
name='registration_activate'),
url(r'^register/$', RegistrationView.as_view(),
name='registration_register'),
url(r'^register/closed/$', RegistrationClosedView.as_view(),
name='registration_disallowed'),
url(r'^register/complete/$', RegistrationCompleteView.as_view(),
name='registration_complete'),
)
# django.contrib.auth
from registration.conf import settings
from django.contrib.auth import views as auth_views
if settings.REGISTRATION_DJANGO_AUTH_URLS_ENABLE:
prefix = settings.REGISTRATION_DJANGO_AUTH_URL_NAMES_PREFIX
suffix = settings.REGISTRATION_DJANGO_AUTH_URL_NAMES_SUFFIX
import django
if django.VERSION >= (1, 6):
uidb = r"(?P<uidb64>[0-9A-Za-z_\-]+)"
token = r"(?P<token>[0-9A-Za-z]{1,13}-[0-9A-Za-z]{1,20})"
password_reset_confirm_rule = (
r"^password/reset/confirm/%s/%s/$" % (uidb, token)
)
else:
uidb = r"(?P<uidb36>[0-9A-Za-z]+)"
token = r"(?P<token>.+)"
password_reset_confirm_rule = (
r"^password/reset/confirm/%s-%s/$" % (uidb, token)
)
urlpatterns += patterns('',
url(r'^login/$', auth_views.login,
{'template_name': 'registration/login.html'},
name=prefix+'login'+suffix),
url(r'^logout/$', auth_views.logout,
{'template_name': 'registration/logout.html'},
name=prefix+'logout'+suffix),
url(r'^password/change/$', auth_views.password_change,
name=prefix+'password_change'+suffix),
url(r'^password/change/done/$', auth_views.password_change_done,
name=prefix+'password_change_done'+suffix),
url(r'^password/reset/$', auth_views.password_reset,
name=prefix+'password_reset'+suffix, kwargs=dict(
post_reset_redirect=prefix+'password_reset_done'+suffix)),
url(password_reset_confirm_rule,
auth_views.password_reset_confirm,
name=prefix+'password_reset_confirm'+suffix),
url(r'^password/reset/complete/$', auth_views.password_reset_complete,
name=prefix+'password_reset_complete'+suffix),
url(r'^password/reset/done/$', auth_views.password_reset_done,
name=prefix+'password_reset_done'+suffix),
)
| [
"lambdalisue@hashnote.net"
] | lambdalisue@hashnote.net |
ba6306487c83faeec431bad2bb06879a4608adfe | 84cd1e3493cf0020d39a38c90c5eb7e7b103cf54 | /card.py | 51012e2d75aa1f6030fccf472e6a64e272e98c51 | [] | no_license | jbrunsting/poker-player | cf7c8b12a56ce1d52d330bb1ceeb42814476ad90 | 0e81c9f6bacfbd17bda16b573e3b0641c14e72cc | refs/heads/master | 2022-04-21T13:16:48.133194 | 2020-04-20T16:52:11 | 2020-04-20T16:52:11 | 254,972,880 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 831 | py | SUIT_INDICES = "shdc"
MIN_CARD = 2
MAX_CARD = 14
NUM_SUITS = 4
class Card:
def __init__(self, suit, val):
self.suit = suit
self.val = val
def __str__(self):
unicode_card = ord('🂠')
if self.val <= 10:
unicode_card += self.val
elif self.val == 11:
unicode_card += 11
elif self.val == 12:
unicode_card += 13
elif self.val == 13:
unicode_card += 14
elif self.val == 14:
unicode_card += 1
unicode_card += self.suit * 16
return chr(unicode_card)
def __eq__(self, other):
return self.suit == other.suit and self.val == other.val
def __lt__(self, other):
if self.val == other.val:
return self.suit < other.suit
return self.val < other.val
| [
"jbrunsting@uwaterloo.ca"
] | jbrunsting@uwaterloo.ca |
1be95d863dfee38a01b91acfc9abf1c957153b61 | 5e4bacde637e872ae9057dc1eeea6fa452e315c4 | /first_project/populate_test_app.py | 4dbe44ab661119f6f592964006d370453dc852fe | [] | no_license | get2nav/django_test_deploy | 9c738161c36df00b5693924b49534e65f6bbca95 | 8baab4644209b424bcda9cec2d3ee2ab83e2906b | refs/heads/master | 2021-07-13T00:21:55.530280 | 2017-10-18T05:14:20 | 2017-10-18T05:14:20 | 107,361,775 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 941 | py | import os
os.environ.setdefault('DJANGO_SETTINGS_MODULE','first_project.settings')
import django
django.setup()
#fake pop script
import random
from test_app.models import AccessRecord, Topic, Webpage
from faker import Faker
fakegen = Faker()
topics = ['Search', 'Social', 'Marketplace', 'News', 'Games']
def add_topic():
t = Topic.objects.get_or_create(top_name = random.choice(topics))[0]
t.save()
return t
def populate(N=5):
for entry in range(N):
top = add_topic()
fake_url = fakegen.url()
fake_date = fakegen.date()
fake_name = fakegen.company()
webpg = Webpage.objects.get_or_create(topic= top, name = fake_name, url = fake_url)[0]
webpg.save()
accspg = AccessRecord.objects.get_or_create(name = webpg, date = fake_date)[0]
accspg.save()
if __name__ == '__main__':
print("populate start")
populate(20)
print("populate end")
| [
"get2nav@gmail.com"
] | get2nav@gmail.com |
ab6eedac97421d8a06a3c9e76013d65e82b63991 | 405ed3b49248e033789bd17bd8dfd30e5a9ef7bc | /lesson_3_task_4.py | 4f6c2419c91de2ae3490938613fc9646c830f15a | [] | no_license | GorobetsYury/python_geekbrains | c50d0909645c479bbc90d104ee1e0ffa789da563 | 1639f596b1666c5f36742409597f8e9c7bde394e | refs/heads/main | 2023-01-20T13:34:30.795615 | 2020-12-02T17:29:31 | 2020-12-02T17:29:31 | 309,039,606 | 0 | 0 | null | 2020-12-02T17:29:32 | 2020-11-01T06:47:51 | Python | UTF-8 | Python | false | false | 785 | py | # Определить, какое число в массиве встречается чаще всего.
import random
SIZE = 10
MIN_ITEM = 0
MAX_ITEM = 100
array = [random.randint(MIN_ITEM, MAX_ITEM) for _ in range(SIZE)]
# При равенстве повторов показывает то число, у которого индекс меньше
count = 0
max_count = 0
number = None
for i in array:
for j in array:
if i == j:
count += 1
if count >= max_count:
max_count = count
number = i
count = 0
if max_count == 1:
print('Нет повторяющихся чисел.')
elif max_count != 1:
print('Наиболее повторяющееся число в массиве - {}'.format(number))
print(array)
| [
"u_gorobec@rambler.ru"
] | u_gorobec@rambler.ru |
4c76cab52caacba887c04c32272b2911ae465066 | e601b0f85267f884f94f4b4dd4da31de38d2e57d | /utils.py | 06bf308890806d2550ac1fd552dae30d28d1f1cc | [] | no_license | yq605879396/CapVis | fc0295d46f90787c76c82aaa4b76d36b58b0108d | e517732731d64ca83bcbc595e79d5cdaff0945a0 | refs/heads/master | 2023-03-23T00:15:47.474006 | 2021-03-16T23:17:07 | 2021-03-16T23:17:07 | 259,845,398 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,425 | py | import os, io, time, json
import numpy as np
import nltk
# get current path in you computer
current_path = os.getcwd()
# define the path: these files are trained of line
# related codes are in usefule_code directory
Labels = np.loadtxt('./pretrained/labels_100')
All_vec = np.loadtxt('./pretrained/All_vectors')
Centers = np.loadtxt('./pretrained/center_100')
test_vec = np.loadtxt('uploaded_image/test_photo_vec')
f_caption = open('./pretrained/all_caption.csv', 'r')
caption_lines = f_caption.readlines()
tempt_seq=""
tempt_seq_clear =""
tempt_caption_list = []
tempt_caption_words=[]
tempt_total_words=[]
tempt_catg_num = 0
for i in range(len(caption_lines)):
caption_lines[i] = caption_lines[i].strip('\n')
def get_label(image_vector):
min_dis = float('Inf')
index = None
for i in range(len(Centers)):
dis = np.sum((image_vector - Centers[i])**2)
if dis < min_dis:
min_dis = dis
index = i
return index
def get_top_images(index, image_vector, k=5):
result_index = []
for i in range(len(Labels)):
if Labels[i] == index:
result_index.append(i+1)
result_index_dis = []
for i in range(len(result_index)):
dis = np.sum((image_vector - All_vec[result_index[i]-1])**2)
result_index_dis.append(dis)
result_index_dis = np.array(result_index_dis)
sorted_index = result_index_dis.argsort()
result = []
for i in range(k):
result.append(result_index[sorted_index[i]])
return result
def add_caption(image_list):
result = []
for i in range(len(image_list)):
result.append(caption_lines[image_list[i]])
return result
def word_static(index):
result_index = []
for i in range(len(Labels)):
if Labels[i] == index:
result_index.append(i+1)
dic_num = {}
dic_tag = {}
#not_consider = ['a', 'in', 'of', 'the', 'on']
#dic_ratio = {}
for i in range(len(result_index)):
cap_temp = caption_lines[result_index[i]]
words = cap_temp.split(' ')
word_tag = nltk.pos_tag(words)
for j in range(len(word_tag)):
if word_tag[j][1] in dic_tag:
dic_tag[word_tag[j][1]] += 1
else:
dic_tag[word_tag[j][1]] = 1
for j in range(len(words)):
if words[j] in dic_num:
dic_num[words[j]] += 1
else:
dic_num[words[j]] = 1
total_words = sum(dic_num.values())
dic_num = sorted(dic_num.items(), key=lambda item:item[1], reverse=True)
dic_array = np.array(dic_num)
labels = []
X = []
dump_data_1 = []
for i in range(24):
labels.append(dic_array[i][0])
X.append(int(dic_array[i][1]))
labels.append('others')
temp = sum(X)
X.append(total_words-temp)
for i in range(len(labels)):
dic_temp = {}
dic_temp["name"] = labels[i]
dic_temp["value"] = X[i]
dump_data_1.append(dic_temp)
fw_1 = open('static/show_as_website/text/pic_frequency.json', 'w')
json.dump(dump_data_1, fw_1)
tag_name = []
tag_num = []
dump_data_2 = []
result_temp = 0
dic_tag = sorted(dic_tag.items(), key=lambda item:item[1], reverse=True)
for i in range(len(dic_tag)):
if i<9:
tag_name.append(dic_tag[i][0])
tag_num.append(dic_tag[i][1])
else:
result_temp += dic_tag[i][1]
tag_name.append('others')
tag_num.append(result_temp)
for i in range(len(tag_name)):
dic_temp = {}
dic_temp["name"] = tag_name[i]
dic_temp["value"] = tag_num[i]
dump_data_2.append(dic_temp)
fw_2 = open('static/show_as_website/text/pic_tag.json', 'w')
json.dump(dump_data_2, fw_2)
return dic_num, total_words, dic_array.shape[0], len(dic_tag)
# used for mask image
def generate_new_image(can_w,can_h,x,y,w,h,img):
# since the visualize image on web may zoom original image
# we need to compute the location of original image
x = float(x)
y = float(y)
w = float(w)
h = float(h)
raw_w,raw_h = img.shape[1],img.shape[0]
ratio_w, ratio_h = float(raw_w)/float(can_w), float(raw_h)/float(can_h)
x = int(x*ratio_w)
y = int(y*ratio_h)
w = int(w*ratio_w)
h = int(h*ratio_h)
for i in range(h):
for j in range(w):
img[i+y][x+j] =[0,0,0]
return img | [
"yq605879396@gmail.com"
] | yq605879396@gmail.com |
d5c6bf8fc78a0d43c339a77d1a7de8e8d671ddf9 | 3448030a8da01a4c66ecbf52372bb71e929cd873 | /seed2xprv.py | 1dbe7932ab0ce4e2fdfb6c9e0ad1a2ef2a9a86cb | [
"MIT"
] | permissive | brunokrauss/crypto-key-derivation | 193662816a4a3f1e27a0144157fe1a056ec59c23 | 78285fca158aaf31a276406286d265318ee359ee | refs/heads/master | 2023-06-24T21:30:54.786702 | 2021-07-06T12:34:23 | 2021-07-06T12:34:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 161 | py | #!./venv/bin/python
from lib import mbp32, utils
xprv = mbp32.XKey.from_seed(bytes.fromhex(utils.one_line_from_stdin()))
print(xprv.to_xkey().decode('ascii'))
| [
"errge@nilcons.com"
] | errge@nilcons.com |
fb2b8d39435d1abedcb800a15882e0bbdc634344 | 7fc6f40f442579fdc774061aa0c678264ff7873e | /venv/Scripts/easy_install-script.py | 219e5aadb997aca0465a804a5336bdd0bfc7ea88 | [] | no_license | devgunk/ShoppingWebsite | c142bee205b87fb00ff7cb50d437045f75fdd4a4 | a06e339b8409a0ba164109e7c59e4a09cc1920e4 | refs/heads/master | 2022-12-06T09:54:45.039961 | 2020-08-23T12:26:23 | 2020-08-23T12:26:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 430 | py | #!C:\ShoppingWebsite\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==39.1.0','console_scripts','easy_install'
__requires__ = 'setuptools==39.1.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==39.1.0', 'console_scripts', 'easy_install')()
)
| [
"karandevgunsharma@gmail.com"
] | karandevgunsharma@gmail.com |
5d241edba0322488b4b7f84cee1a16c8cd0b1bd6 | cdd0fa35e6867932d9821b54f3e9897306139d1a | /myPracticeProblems/ordered_dict.py | ac21f387d95bb5f5a10a305313ea69109d20cc7d | [] | no_license | jisshub/python-development | cfd4246981999d5bc8cfe4cc15a57ebfada2691e | 392e7362bf8e83930d410984e985d73a0a2f40d1 | refs/heads/master | 2021-01-05T02:25:12.896814 | 2020-03-23T16:05:25 | 2020-03-23T16:05:25 | 240,844,792 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 326 | py | from collections import OrderedDict
ordered_dict = OrderedDict()
ordered_dict["jissmon"] = 33
ordered_dict["jissmon"] = 33
ordered_dict["jissmon"] = 33
ordered_dict["jissmon"] = 33
ordered_dict["jissmon"] = 33
print(ordered_dict)
new_dict = dict()
new_dict["a"] = 44
new_dict["a"] = 44
new_dict["b"] = 44
print(new_dict)
| [
"jissmon476@gmail.com"
] | jissmon476@gmail.com |
68939c3d483e5097c1f1e33bf06e1a96c678806c | 3ff045a27d4cd828f1568ef4551077dd3732b97f | /GoodleLanceJanice.py | c0857272da38ec9c7da90a1a5e4ffb3dbedc0221 | [] | no_license | nightowl97/goodle | 4ae6792e8eb43fb453a43716968419b27f73550b | 6718c3151cae837bbe96ee3cdce42b545a5b27f4 | refs/heads/master | 2021-01-19T21:13:14.145428 | 2017-01-10T23:43:30 | 2017-01-10T23:43:30 | 88,626,557 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 454 | py |
test = "Yvzs! I xzm'g yvorvev Lzmxv olhg srh qly zg gsv xlolmb!!"
def answer(s):
res = ''
for i in range(len(s)):
if ord(s[i]) <= 96: # If number is not a lowercase alphabet
res += s[i] # Then just keep it
else:
# Get the resulting char by using its distance from 'a'
# and subtract from the distance to 'z'
res += chr(122 - (ord(s[i]) - 97))
return res
print answer(test) | [
"feverier@hotmail.fr"
] | feverier@hotmail.fr |
b2cfeb0e568ca923f191916dc1ad85a9a2cad65e | 499b280cdaf8714aae193fd9b1aac95926def3b4 | /2017038076_조준희 2주차과제.py | 906467128e359f1fea673d747c8a1161ab7eb171 | [] | no_license | Joonehee-JO/python | 2972b52bb2472decffb12a4d13b556ac86adc75b | 857d15153f10a583bc4ad6a17ecf4f09fdcb68c5 | refs/heads/master | 2022-11-05T05:41:57.374225 | 2020-06-23T12:58:40 | 2020-06-23T12:58:40 | 264,381,305 | 1 | 0 | null | 2020-05-16T07:16:20 | 2020-05-16T07:16:19 | null | UTF-8 | Python | false | false | 218 | py | mem = int(input("참석자의 수를 입력하세요 : "))
chicken = mem * 1
beer = mem * 2
cake = mem * 4
print("치킨의 수 : %d\n" %chicken)
print("맥주의 수 : %d\n" %beer)
print("케잌의 수 : %d\n" %cake)
| [
"noreply@github.com"
] | Joonehee-JO.noreply@github.com |
b0b42a8618f56c00d5b0d03cce3873bd96adb26e | d41d18d3ea6edd2ec478b500386375a8693f1392 | /plotly/validators/scatter3d/line/_showscale.py | 534d53f00aee0a02ffb55e951c76e575cebf5dfe | [
"MIT"
] | permissive | miladrux/plotly.py | 38921dd6618650d03be9891d6078e771ffccc99a | dbb79e43e2cc6c5762251537d24bad1dab930fff | refs/heads/master | 2020-03-27T01:46:57.497871 | 2018-08-20T22:37:38 | 2018-08-20T22:37:38 | 145,742,203 | 1 | 0 | MIT | 2018-08-22T17:37:07 | 2018-08-22T17:37:07 | null | UTF-8 | Python | false | false | 425 | py | import _plotly_utils.basevalidators
class ShowscaleValidator(_plotly_utils.basevalidators.BooleanValidator):
def __init__(
self, plotly_name='showscale', parent_name='scatter3d.line', **kwargs
):
super(ShowscaleValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type='calc',
role='info',
**kwargs
)
| [
"adam.kulidjian@gmail.com"
] | adam.kulidjian@gmail.com |
657691f0e148e8ba93dc360f4d8b4aad40fc643d | b249363c964248fbf8e567e14ecd7c1fc7fde5b2 | /server/newscraper.py | 376c3bd9cd2c28bb5f7a900018ff55e13babbd86 | [] | no_license | Justintlai/watersports | fec7e8d9c75ab5b6085f83273207bf851bab5afb | 0b443299884e4f558c6a7d802365b3c843a4f7de | refs/heads/master | 2021-07-11T21:28:39.253446 | 2017-10-09T00:12:23 | 2017-10-09T00:12:23 | 106,175,648 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,080 | py | import random
import time
from datetime import datetime
import requests
from bs4 import BeautifulSoup
from pws import Bing
from pws.google import strip_tags
word_file = "words.txt"
WORDS = open(word_file).read().splitlines()
def main():
# c = gnp_fixed.get_google_news_query("earth")
# print(c)
# return
# cnn_paper = newspaper.build('http://cnn.com/search/?q=trump')
r = Bing.search_news(10, 0, True, 'h', query='github')
print(r)
return
for article in cnn_paper.articles:
print(article.url)
if "trump" in str(article).lower():
article.download()
article.parse()
article.nlp()
print(article.summary)
break
def get_articles_test(topic, skipNum):
r = [get_article_test(topic, skipNum) for x in range(0, 10)]
return sorted(r, key=getKey, reverse=True)
def generate_news_url(query, num, start, recent, country_code):
query = '+'.join(query.split())
url = 'https://www.google.com/search?q=' + query + '&num=' + num + '&start=' + start
url += '&tbm=nws#q=' + query + '&tbas=0&tbs=sbd:1&tbm=nws'
if recent in ['h', 'd', 'w', 'm', 'y']:
url += '&tbs=qdr:' + recent
if country_code is not None:
url += '&gl=' + country_code
return url
def get_info(i):
pass
def convert_to_epoch_time(param):
time_ago = [int(s) for s in param.split(" ") if s.isdigit()][0]
if "second" in param:
multiplier = 1
elif "minute" in param:
multiplier = 60
elif "hour" in param:
multiplier = 60 * 60
elif "day" in param:
multiplier = 60 * 60 * 24
elif "week" in param:
multiplier = 60 * 60 * 24 * 7
elif "month" in param:
multiplier = 60 * 60 * 24 * 30
elif "year" in param:
multiplier = 60 * 60 * 24 * 365.25
else:
try:
pattern = '%d %b %Y'
return int(time.mktime(time.strptime(param, pattern)))
except Exception as e:
print(e)
raise Exception("Unexpected time duration! {}".format(str(param)))
seconds_ago = multiplier * time_ago
now_epoch_time = int(time.time())
return now_epoch_time - seconds_ago
def scrape_news_result(soup):
raw_results = soup.find_all('div', {'class': 'g'})
results = []
for result in raw_results:
link = result.find('a').get('href')[7:]
raw_link_text = result.find('a')
link_text = strip_tags(str(raw_link_text))
raw_link_info = result.find('div', attrs={'class': 'st'})
link_info = strip_tags(str(raw_link_info))
raw_source = result.find('span', attrs={'class': 'f'})
raw_source = strip_tags(str(raw_source)).split(' - ')
source = raw_source[0]
time = convert_to_epoch_time(raw_source[1])
additional_links = dict()
# Crazy hack! Fix it. + Buggy!
try:
raw_a_links = result.find_all('a')[1:]
if raw_a_links:
raw_source = list(map(strip_tags, list(map(str, result.find_all('span', attrs={'class': 'f'})[1:]))))
for idx in range(len(raw_a_links) - 1):
additional_links[strip_tags(str(raw_a_links[idx]))] = (
raw_a_links[idx].get('href'), raw_source[idx])
except Exception as e:
print(e)
temp = {'link': link,
'link_text': link_text,
'link_info': link_info,
'additional_links': additional_links,
'source': source,
'time': time,
}
results.append(temp)
return results
def scrape_news_result_bing(soup):
raw_results = soup.find_all('div', attrs={'class': 'newsitem'})
results = []
for result in raw_results:
link = result.find('a').get('href')
raw_link_text = result.find('a')
link_text = strip_tags(str(raw_link_text))
additional_links = dict() # For consistancy
raw_link_info = result.find('span', attrs={'class': 'sn_snip'})
link_info = strip_tags(str(raw_link_info))
raw_source = result.find('cite', attrs={'class': 'sn_src'})
source = strip_tags(str(raw_source))
raw_time = result.find('span', attrs={'class': 'sn_tm'})
time = convert_to_epoch_time(strip_tags(str(raw_time)))
temp = {'link': link,
'link_text': link_text,
'link_info': link_info,
'additional_links': additional_links,
'source': source,
'time': time,
}
results.append(temp)
return results
def generate_news_url_bing(query, first, recent, country_code):
"""(str, str) -> str
A url in the required format is generated.
"""
query = '+'.join(query.split())
url = 'http://www.bing.com/news/search?q=' + query + '&first' + first
if recent in ['h', 'd', 'w', 'm',
'y']: # A True/False would be enough. This is just to maintain consistancy with google.
url = url + '&qft=sortbydate%3d%221%22'
if country_code is not None:
url += '&cc=' + country_code
return url
def search_news(query, num=10, start=0, recent=None, country_code=None):
# url = generate_news_url_bing(query, str(start), recent, country_code)
url = generate_news_url(query, str(num), str(start), country_code, recent)
soup = BeautifulSoup(requests.get(url).text, "html.parser")
if "Our systems have detected unusual traffic from your computer network." in str(soup):
pass
results = scrape_news_result(soup)
# results = scrape_news_result_bing(soup)
# raw_total_results = soup.find('div', attrs={'class': 'sd'}).string
# total_results = int(str(raw_total_results).replace(",","").replace("About ","").replace(" results","").strip())
temp = {'results': results,
'url': url,
'num': num,
'start': start,
'search_engine': 'google',
'total_results': 0,
'country_code': country_code,
}
return temp
def getKey(item):
return item["time"]
def get_articles(topic, skipNum):
r = search_news(str(topic), 10, skipNum)
return sorted(r["results"], key=getKey, reverse=True)
def get_random_date():
year = random.choice(range(2001, 2017))
month = random.choice(range(1, 13))
day = random.choice(range(1, 29))
t = datetime(year, month, day)
birth_date = (t - datetime(1970, 1, 1)).total_seconds()
return str(birth_date)
def get_article_test(topic, skipNum):
temp = {'link': str(skipNum),
'link_text': "".join([random.choice(WORDS) + " " for x in range(0, 10)]),
'link_info': "link_info",
'additional_links': "additional_links",
'source': str(topic),
'time': get_random_date(),
}
return temp
if __name__ == "__main__":
start = "1232131"
end = "1232131"
r = get_articles("slack", 0)
for i in r:
print(i["time"])
pass
| [
"alexanderfanthome@googlemail.com"
] | alexanderfanthome@googlemail.com |
b1a1e15b3a0558a5a77872235e3522ea33bab5cc | 43ab33b2f50e47f5dbe322daa03c86a99e5ee77c | /rcc/models/jaxb_element.py | 49e4e3b8f1e30a23cafa6a6b5a8c3fbc12ef4791 | [] | no_license | Sage-Bionetworks/rcc-client | c770432de2d2950e00f7c7bd2bac22f3a81c2061 | 57c4a621aecd3a2f3f9faaa94f53b2727992a01a | refs/heads/main | 2023-02-23T05:55:39.279352 | 2021-01-21T02:06:08 | 2021-01-21T02:06:08 | 331,486,099 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,874 | py | # coding: utf-8
"""
nPhase REST Resource
REDCap REST API v.2 # noqa: E501
The version of the OpenAPI document: 2.0
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from rcc.configuration import Configuration
class JAXBElement(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'name': 'QName',
'value': 'object',
'nil': 'bool',
'global_scope': 'bool',
'type_substituted': 'bool'
}
attribute_map = {
'name': 'name',
'value': 'value',
'nil': 'nil',
'global_scope': 'globalScope',
'type_substituted': 'typeSubstituted'
}
def __init__(self, name=None, value=None, nil=None, global_scope=None, type_substituted=None, local_vars_configuration=None): # noqa: E501
"""JAXBElement - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._name = None
self._value = None
self._nil = None
self._global_scope = None
self._type_substituted = None
self.discriminator = None
if name is not None:
self.name = name
if value is not None:
self.value = value
if nil is not None:
self.nil = nil
if global_scope is not None:
self.global_scope = global_scope
if type_substituted is not None:
self.type_substituted = type_substituted
@property
def name(self):
"""Gets the name of this JAXBElement. # noqa: E501
:return: The name of this JAXBElement. # noqa: E501
:rtype: QName
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this JAXBElement.
:param name: The name of this JAXBElement. # noqa: E501
:type: QName
"""
self._name = name
@property
def value(self):
"""Gets the value of this JAXBElement. # noqa: E501
:return: The value of this JAXBElement. # noqa: E501
:rtype: object
"""
return self._value
@value.setter
def value(self, value):
"""Sets the value of this JAXBElement.
:param value: The value of this JAXBElement. # noqa: E501
:type: object
"""
self._value = value
@property
def nil(self):
"""Gets the nil of this JAXBElement. # noqa: E501
:return: The nil of this JAXBElement. # noqa: E501
:rtype: bool
"""
return self._nil
@nil.setter
def nil(self, nil):
"""Sets the nil of this JAXBElement.
:param nil: The nil of this JAXBElement. # noqa: E501
:type: bool
"""
self._nil = nil
@property
def global_scope(self):
"""Gets the global_scope of this JAXBElement. # noqa: E501
:return: The global_scope of this JAXBElement. # noqa: E501
:rtype: bool
"""
return self._global_scope
@global_scope.setter
def global_scope(self, global_scope):
"""Sets the global_scope of this JAXBElement.
:param global_scope: The global_scope of this JAXBElement. # noqa: E501
:type: bool
"""
self._global_scope = global_scope
@property
def type_substituted(self):
"""Gets the type_substituted of this JAXBElement. # noqa: E501
:return: The type_substituted of this JAXBElement. # noqa: E501
:rtype: bool
"""
return self._type_substituted
@type_substituted.setter
def type_substituted(self, type_substituted):
"""Sets the type_substituted of this JAXBElement.
:param type_substituted: The type_substituted of this JAXBElement. # noqa: E501
:type: bool
"""
self._type_substituted = type_substituted
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, JAXBElement):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, JAXBElement):
return True
return self.to_dict() != other.to_dict()
| [
"thomas.yu@sagebase.org"
] | thomas.yu@sagebase.org |
7b96378092ff574b8986da643f93a35f77f65f61 | ab43c27eeac4866aef9b9282d912f24b8238803e | /app/views/__init__.py | 8db42f13b7048140839fd65a2b555069cc2f1a09 | [] | no_license | RubenVanEldik/solar | 31c3018322841f271fa70883eb78759141345dd9 | 71d48fea12791a72558ddc0d512a1f6b7e8f4574 | refs/heads/master | 2021-10-18T22:33:05.663817 | 2019-02-14T19:59:07 | 2019-02-14T19:59:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 118 | py | from .charts import *
from .index import *
from .json import *
from .settings import *
from .user_management import *
| [
"74729@protonmail.com"
] | 74729@protonmail.com |
ce72e0f8bc1244eb6aab5eb1508279184c6fec5c | 1e5bf133026ff7afa9659652cc3ec1023f98dc1c | /326 - Power of Three/PythonSolution2.py | 14af1e155eee448238449ceaea5fa410eb42ddab | [] | no_license | DishantK1807/Leetcode-Practice | 51ca7d38889e1e2351968b8802185117ab629e78 | 094b33b38f81ce2b5572cdfecc1f5fb5bbd55816 | refs/heads/master | 2022-04-08T14:01:24.534693 | 2020-03-11T22:13:54 | 2020-03-11T22:13:54 | 232,390,299 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 113 | py | class Solution:
def isPowerOfThree(self, n: int) -> bool:
return n > 0 and 1853020188851841 % n == 0
| [
"dishant.khanna1807@gmail.com"
] | dishant.khanna1807@gmail.com |
a772928bcbefc46afefe57c8cef08919a0e31326 | 8942d1537271a4f6707db6e81a786ca13451e469 | /_examples/example_pipelines/skullstrip_afni_ants_example/porcupine_generated_pipeline.py | 6ead358190e3d19af7f227b9bb95aa7b885fddfc | [
"MIT"
] | permissive | GiraffeTools/Documentation | ee78b565ba489e20172e23b88e1cbf6424e2f467 | 40fe4a141a444aa5f016a5d5073ddffba77187d0 | refs/heads/master | 2022-11-07T16:18:36.793918 | 2020-08-16T20:44:37 | 2020-08-16T20:44:37 | 174,414,952 | 1 | 1 | MIT | 2022-10-19T22:26:58 | 2019-03-07T20:26:15 | Python | UTF-8 | Python | false | false | 3,347 | py | #This is a Nipype generator. Warning, here be dragons.
import sys
import nipype
import nipype.pipeline as pe
import nipype.interfaces.io as io
import nipype.interfaces.ants as ants
import nipype.interfaces.afni as afni
import nipype.interfaces.fsl as fsl
WorkingDirectory = "~/Porcupipelines/ThisStudy"
#Generic datagrabber module that wraps around glob in an
NodeHash_30bb950 = pe.Node(io.S3DataGrabber(outfields=['outfiles']), name = 'NodeName_30bb950')
NodeHash_30bb950.inputs.anon = True
NodeHash_30bb950.inputs.bucket = 'openneuro'
NodeHash_30bb950.inputs.bucket_path = 'ds000101/ds000101_R2.0.0/uncompressed/'
NodeHash_30bb950.inputs.local_directory = '/tmp'
NodeHash_30bb950.inputs.sort_filelist = True
NodeHash_30bb950.inputs.template = 'sub-01/anat/sub-01_T1w.nii.gz'
#Wraps command **N4BiasFieldCorrection**
NodeHash_1ea4b50 = pe.Node(interface = ants.N4BiasFieldCorrection(), name = 'NodeName_1ea4b50')
NodeHash_1ea4b50.inputs.copy_header = False
NodeHash_1ea4b50.inputs.dimension = 3
NodeHash_1ea4b50.inputs.num_threads = 4
NodeHash_1ea4b50.inputs.save_bias = True
#Wraps command **3dUnifize**
NodeHash_291d6d0 = pe.Node(interface = afni.Unifize(), name = 'NodeName_291d6d0')
NodeHash_291d6d0.inputs.outputtype = 'NIFTI_GZ'
#Wraps command **3dSkullStrip**
NodeHash_1ddfa30 = pe.Node(interface = afni.SkullStrip(), name = 'NodeName_1ddfa30')
NodeHash_1ddfa30.inputs.outputtype = 'NIFTI_GZ'
#Wraps command **3dcalc**
NodeHash_3bd6370 = pe.Node(interface = afni.Calc(), name = 'NodeName_3bd6370')
NodeHash_3bd6370.inputs.expr = 'a*step(b)'
NodeHash_3bd6370.inputs.outputtype = 'NIFTI_GZ'
#Wraps command **fslmaths**
NodeHash_49ddb10 = pe.Node(interface = fsl.Threshold(), name = 'NodeName_49ddb10')
NodeHash_49ddb10.inputs.args = '-bin'
NodeHash_49ddb10.inputs.thresh = 1.e-3
#Wraps command **3dUnifize**
NodeHash_229c200 = pe.Node(interface = afni.Unifize(), name = 'NodeName_229c200')
NodeHash_229c200.inputs.gm = True
NodeHash_229c200.inputs.outputtype = 'NIFTI_GZ'
#Generic datasink module to store structured outputs
NodeHash_3207070 = pe.Node(interface = io.DataSink(), name = 'NodeName_3207070')
NodeHash_3207070.inputs.base_directory = '/tmp'
#Create a workflow to connect all those nodes
analysisflow = nipype.Workflow('MyWorkflow')
analysisflow.connect(NodeHash_30bb950, 'outfiles', NodeHash_1ea4b50, 'input_image')
analysisflow.connect(NodeHash_1ea4b50, 'output_image', NodeHash_291d6d0, 'in_file')
analysisflow.connect(NodeHash_291d6d0, 'out_file', NodeHash_1ddfa30, 'in_file')
analysisflow.connect(NodeHash_1ea4b50, 'bias_image', NodeHash_3207070, 'bias_image')
analysisflow.connect(NodeHash_291d6d0, 'out_file', NodeHash_3bd6370, 'in_file_a')
analysisflow.connect(NodeHash_1ddfa30, 'out_file', NodeHash_3bd6370, 'in_file_b')
analysisflow.connect(NodeHash_3bd6370, 'out_file', NodeHash_49ddb10, 'in_file')
analysisflow.connect(NodeHash_3bd6370, 'out_file', NodeHash_229c200, 'in_file')
analysisflow.connect(NodeHash_49ddb10, 'out_file', NodeHash_3207070, 'out_mask')
analysisflow.connect(NodeHash_229c200, 'out_file', NodeHash_3207070, 'out_file')
#Run the workflow
plugin = 'MultiProc' #adjust your desired plugin here
plugin_args = {'n_procs': 1} #adjust to your number of cores
analysisflow.write_graph(graph2use='flat', format='png', simple_form=False)
analysisflow.run(plugin=plugin, plugin_args=plugin_args)
| [
"timvanmourik@gmail.com"
] | timvanmourik@gmail.com |
1ab8aa09439b7feb4e64e7287cafca2b7cd40a94 | cc2fd2bd9947d97cbd351553dad75044f6978cf9 | /driver2.py | e63c6f4cb68c2d2442c635a3df87c770a4d7fbfe | [] | no_license | jsitaraman/MDOF | 7dc1178254e14dabb86c4670f8f619546fe75b5f | 96162e408c887ccf8954e117b5bff016bd359110 | refs/heads/main | 2022-12-24T13:44:51.690443 | 2020-10-08T00:36:51 | 2020-10-08T00:36:51 | 301,932,777 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,107 | py | #
# driver code for sizing a simple
# rotor.
#
# Design Variables are
# [ Cl -> mean Lift Coefficient,
# R -> Radius of the rotor,
# Omega -> Rotor frequency
# sigma -> Rotor solidity]
#
# Objective:
# Maximize pay-load with a constraint of maximum power available
#
import numpy as np
from AeroSolver import AeroModel
from WeightSolver import WeightModel
from MDOF import MDOFinputs
from MDOF import FunctionsAndConstraints
from MDOF import optimizer
#
modelParams={'units': 'SI',
'rho':1.2256,
'bladeDensity':600,
'emptyWeight':49000,
'payLoad':0,
'grossWeight':82000}
designvar=['Cl','R','Omega','sigma']
#
inputObject=MDOFinputs(designvar)
inputs=inputObject.inputVar()
aero=AeroModel('momentumTheory',modelParams)
weight=WeightModel('simpleWeight',modelParams)
#
# create the input to output mapping
#
# input--->----- Aero Model
# | |
# ----- Weight Model ---> output
#
x1=aero.getModel(inputs)
outputs=weight.getModel(x1)
#
# create the objective function and
# constraints from vehicle response
#
fc=FunctionsAndConstraints(inputObject,inputs,outputs)
objective=fc.get('function','PayLoad',fsign=-1)
gradient=fc.get('gradient','PayLoad',fsign=-1)
ineqconstraint=[]
ineqconstraintgrad=[]
eqconstraint=[]
eqconstraintgrad=[]
eqconstraint.append(fc.get('constraint','Power',constraintValue=1e6))
eqconstraintgrad.append(fc.get('constraintgrad','Power',constraintValue=1e6))
#
# intialize optimizer object
# provide it the objectives, gradients and constraints
#
opt2=optimizer(objective,gradient,eqconstraint,eqconstraintgrad,ineqconstraint,ineqconstraintgrad)
#
# starting values and
# bounds
#
x0=np.array([0.6,7.5,25.0,0.08],'d')
lb=[0.1,6.0,10.0,0.06]
ub=[1.0,9.0,30.0,0.12]
#
# perform actual optimization
# functions and gradients are only
# evaluated here
#
x=opt2.optimize(x0,lb,ub,1.0,method='SLSQP')
#
print('designNames :',designvar)
print('values :',x)
#
resp=inputObject.getResponse()
print('stateNames :',resp['varNames'])
print('values :',resp['values'])
#
| [
"jaina@onyx.erdc.hpc.mil"
] | jaina@onyx.erdc.hpc.mil |
88844da5196efc28ee33108e41edf931aadaebd1 | cf54ddb10342bfac8f868eddbfcba11f9729200f | /regex.py | 6c2685af92327db7e3c69460aa3e061f6e3419ab | [] | no_license | winters23/projects-python | 589a88a7d2828685807685646c1b58e1ce32dfac | d920cc62cd1ccc79d43a6869916713045509fd9e | refs/heads/master | 2020-04-23T06:38:55.883746 | 2019-03-22T08:06:29 | 2019-03-22T08:06:29 | 170,980,214 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 77 | py | import re
txt="The rain is in Spain"
x=re.search("^The.*Spain$",txt)
print(x) | [
"jain.diksha2398@gmail.com"
] | jain.diksha2398@gmail.com |
8dbddfb4997cc91e26fe303a38174baf39a53b42 | 53eaf9bb812f6f9317a143a08f60bb5515e7de8c | /setup.py | c401f0283d85762481b013d5265207f65a2113b7 | [] | no_license | JeffMv/jmm-util-libs | 103d77972ee7d3f196593396b4a78a6ce7db6617 | 137696ec9126ea35512dcb29500d3d7c6ffdde56 | refs/heads/master | 2023-07-11T20:04:18.782120 | 2023-07-01T17:44:45 | 2023-07-01T17:44:45 | 209,101,111 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,046 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Note: To use the 'upload' functionality of this file, you must:
# $ pipenv install twine --dev
import io
import os
# import sys
# from shutil import rmtree
from setuptools import find_packages, setup, Command
# from jmm import version
# Package meta-data.
NAME = 'jmm'
DESCRIPTION = 'A collection of personal utility functions.'
URL = 'https://github.com/JeffMv/jmm-util-libs'
EMAIL = 'jeffrey.mvutu@gmail.com'
AUTHOR = 'Jeffrey Mvutu Mabilama'
REQUIRES_PYTHON = '>=3.0.0'
VERSION = "0.1.3.2.0"
# What packages are required for this module to be executed?
REQUIRED = [
# 'requests', 'maya', 'records',
]
# What packages are optional?
EXTRAS = {
# 'fancy feature': ['django'],
'conversion': ['pandas'],
'selenium': ['selenium', 'requests'],
'parsing': ['bs4', 'lxml'],
'advanced': ['PIL'],
}
# The rest you shouldn't have to touch too much :)
# ------------------------------------------------
# Except, perhaps the License and Trove Classifiers!
# If you do change the License, remember to change the Trove Classifier for that!
here = os.path.abspath(os.path.dirname(__file__))
# Import the README and use it as the long-description.
# Note: this will only work if 'README.md' is present in your MANIFEST.in file!
try:
with io.open(os.path.join(here, 'README.md'), encoding='utf-8') as fh:
long_description = '\n' + fh.read()
except FileNotFoundError:
long_description = DESCRIPTION
# Load the package's __version__.py module as a dictionary.
about = {}
if not VERSION:
project_slug = NAME.lower().replace("-", "_").replace(" ", "_")
with open(os.path.join(here, project_slug, '__version__.py')) as fh:
exec(fh.read(), about)
else:
about['__version__'] = VERSION
# class UploadCommand(Command):
# """Support setup.py upload."""
# description = 'Build and publish the package.'
# user_options = []
# @staticmethod
# def status(s):
# """Prints things in bold."""
# print('\033[1m{0}\033[0m'.format(s))
# def initialize_options(self):
# pass
# def finalize_options(self):
# pass
# def run(self):
# try:
# self.status('Removing previous builds…')
# rmtree(os.path.join(here, 'dist'))
# except OSError:
# pass
# self.status('Building Source and Wheel (universal) distribution…')
# os.system('{0} setup.py sdist bdist_wheel --universal'.format(sys.executable))
# self.status('Uploading the package to PyPI via Twine…')
# os.system('twine upload dist/*')
# self.status('Pushing git tags…')
# os.system('git tag v{0}'.format(about['__version__']))
# os.system('git push --tags')
# sys.exit()
# Where the magic happens:
setup(
name=NAME,
version=about['__version__'],
description=DESCRIPTION,
long_description=long_description,
long_description_content_type='text/markdown',
author=AUTHOR,
author_email=EMAIL,
python_requires=REQUIRES_PYTHON,
url=URL,
packages=find_packages(exclude=["tests", "*.tests", "*.tests.*", "tests.*"]),
# If your package is a single module, use this instead of 'packages':
# py_modules=['mypackage'],
# entry_points={
# 'console_scripts': ['mycli=mymodule:cli'],
# },
install_requires=REQUIRED,
extras_require=EXTRAS,
# include_package_data=True,
# license='MIT',
classifiers=[
# Trove classifiers
# Full list: https://pypi.python.org/pypi?%3Aaction=list_classifiers
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
# 'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
# 'Programming Language :: Python :: Implementation :: CPython',
# 'Programming Language :: Python :: Implementation :: PyPy'
],
# # $ setup.py publish support.
# cmdclass={
# 'upload': UploadCommand,
# },
)
| [
"jeffrey.mvutu@gmail.com"
] | jeffrey.mvutu@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.