blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2 values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 213 values | src_encoding stringclasses 30 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 2 10.3M | extension stringclasses 246 values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
34aaf1cbe469aa18b95a2eb1bd4386a5239eb618 | 3996bd434ba9b21349e7538f934a1c9959306119 | /src/gluonts/model/tft/_layers.py | f6f45b7a150642cf97a78e2fbeba16725dd40926 | [
"Apache-2.0"
] | permissive | dibgerge/gluon-ts | 674af7863027e731d37c4da19e494118c11b91f2 | 77029660c6ec4b6e80d6450d6dd640d6652f5b06 | refs/heads/master | 2021-06-19T20:23:57.481626 | 2021-05-22T10:50:16 | 2021-05-22T10:50:16 | 214,877,421 | 1 | 0 | Apache-2.0 | 2019-11-08T00:06:16 | 2019-10-13T19:05:36 | Python | UTF-8 | Python | false | false | 16,588 | py | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
import math
from typing import List, Optional, Tuple
import mxnet as mx
import numpy as np
from mxnet import gluon, init
from mxnet.gluon import HybridBlock, nn, rnn
from gluonts.core.component import validated
from gluonts.mx import Tensor
from gluonts.mx.block.feature import FeatureEmbedder
class GatedLinearUnit(HybridBlock):
@validated()
def __init__(self, axis: int = -1, nonlinear: bool = True, **kwargs):
super(GatedLinearUnit, self).__init__(**kwargs)
self.axis = axis
self.nonlinear = nonlinear
def hybrid_forward(self, F, x: Tensor) -> Tensor:
val, gate = F.split(x, axis=self.axis, num_outputs=2)
if self.nonlinear:
val = F.tanh(val)
gate = F.sigmoid(gate)
return F.broadcast_mul(gate, val)
class GatedResidualNetwork(HybridBlock):
@validated()
def __init__(
self,
d_hidden: int,
d_input: Optional[int] = None,
d_output: Optional[int] = None,
d_static: Optional[int] = None,
dropout: float = 0.0,
**kwargs,
):
super(GatedResidualNetwork, self).__init__(**kwargs)
self.d_hidden = d_hidden
self.d_input = d_input or d_hidden
self.d_static = d_static or 0
if d_output is None:
self.d_output = self.d_input
self.add_skip = False
else:
self.d_output = d_output
if d_output != self.d_input:
self.add_skip = True
with self.name_scope():
self.skip_proj = nn.Dense(
units=self.d_output,
in_units=self.d_input,
flatten=False,
weight_initializer=init.Xavier(),
)
else:
self.add_skip = False
with self.name_scope():
self.mlp = nn.HybridSequential(prefix="mlp_")
self.mlp.add(
nn.Dense(
units=self.d_hidden,
in_units=self.d_input + self.d_static,
flatten=False,
weight_initializer=init.Xavier(),
)
)
self.mlp.add(nn.ELU())
self.mlp.add(
nn.Dense(
units=self.d_hidden,
in_units=self.d_hidden,
flatten=False,
weight_initializer=init.Xavier(),
)
)
self.mlp.add(nn.Dropout(dropout)),
self.mlp.add(
nn.Dense(
units=self.d_output * 2,
in_units=self.d_hidden,
flatten=False,
weight_initializer=init.Xavier(),
)
)
self.mlp.add(
GatedLinearUnit(
axis=-1,
nonlinear=False,
)
)
self.lnorm = nn.LayerNorm(axis=-1, in_channels=self.d_output)
def hybrid_forward(
self,
F,
x: Tensor,
c: Optional[Tensor] = None,
) -> Tensor:
if self.add_skip:
skip = self.skip_proj(x)
else:
skip = x
if self.d_static > 0 and c is None:
raise ValueError("static variable is expected.")
if self.d_static == 0 and c is not None:
raise ValueError("static variable is not accpeted.")
if c is not None:
x = F.concat(x, c, dim=-1)
x = self.mlp(x)
x = self.lnorm(F.broadcast_add(x, skip))
return x
class VariableSelectionNetwork(HybridBlock):
@validated()
def __init__(
self,
d_hidden: int,
n_vars: int,
dropout: float = 0.0,
add_static: bool = False,
**kwargs,
) -> None:
super(VariableSelectionNetwork, self).__init__(**kwargs)
self.d_hidden = d_hidden
self.n_vars = n_vars
self.add_static = add_static
with self.name_scope():
self.weight_network = GatedResidualNetwork(
d_hidden=self.d_hidden,
d_input=self.d_hidden * self.n_vars,
d_output=self.n_vars,
d_static=self.d_hidden if add_static else None,
dropout=dropout,
)
self.variable_network = []
for n in range(self.n_vars):
var_net = GatedResidualNetwork(
d_hidden=self.d_hidden,
dropout=dropout,
)
self.register_child(var_net, name=f"var_{n+1}")
self.variable_network.append(var_net)
def hybrid_forward(
self,
F,
variables: List[Tensor],
static: Optional[Tensor] = None,
) -> Tuple[Tensor, Tensor]:
if len(variables) != self.n_vars:
raise ValueError(
f"expect {self.n_vars} variables, {len(variables)} given."
)
if self.add_static and static is None:
raise ValueError("static variable is expected.")
if not self.add_static and static is not None:
raise ValueError("static variable is not accpeted.")
flatten = F.concat(*variables, dim=-1)
if static is not None:
static = F.broadcast_like(static, variables[0])
weight = self.weight_network(flatten, static)
weight = F.expand_dims(weight, axis=-2)
weight = F.softmax(weight, axis=-1)
var_encodings = []
for var, net in zip(variables, self.variable_network):
var_encodings.append(net(var))
var_encodings = F.stack(*var_encodings, axis=-1)
var_encodings = F.sum(F.broadcast_mul(var_encodings, weight), axis=-1)
return var_encodings, weight
class SelfAttention(HybridBlock):
@validated()
def __init__(
self,
context_length: int,
prediction_length: int,
d_hidden: int,
n_head: int = 1,
bias: bool = True,
share_values: bool = False,
dropout: float = 0.0,
temperature: float = 1.0,
**kwargs,
):
super(SelfAttention, self).__init__(**kwargs)
if d_hidden % n_head != 0:
raise ValueError(
f"hidden dim {d_hidden} cannot be split into {n_head} heads."
)
self.context_length = context_length
self.prediction_length = prediction_length
self.d_hidden = d_hidden
self.n_head = n_head
self.d_head = d_hidden // n_head
self.bias = bias
self.share_values = share_values
self.temperature = temperature
with self.name_scope():
self.dropout = nn.Dropout(dropout)
self.q_proj = nn.Dense(
units=self.d_hidden,
in_units=self.d_hidden,
use_bias=self.bias,
flatten=False,
weight_initializer=init.Xavier(),
prefix="q_proj_",
)
self.k_proj = nn.Dense(
units=self.d_hidden,
in_units=self.d_hidden,
use_bias=self.bias,
flatten=False,
weight_initializer=init.Xavier(),
prefix="k_proj_",
)
self.v_proj = nn.Dense(
units=self.d_head if self.share_values else self.d_hidden,
in_units=self.d_hidden,
use_bias=self.bias,
flatten=False,
weight_initializer=init.Xavier(),
prefix="v_proj_",
)
self.out_proj = nn.Dense(
units=self.d_hidden,
in_units=self.d_hidden,
use_bias=self.bias,
flatten=False,
weight_initializer=init.Xavier(),
prefix="out_proj_",
)
def _split_head(self, F, x: Tensor) -> Tensor:
x = F.reshape(data=x, shape=(0, 0, -4, self.n_head, self.d_head))
x = F.swapaxes(data=x, dim1=1, dim2=2)
return x
def _merge_head(self, F, x: Tensor) -> Tensor:
x = F.swapaxes(data=x, dim1=1, dim2=2)
x = F.reshape(data=x, shape=(0, 0, self.d_hidden))
return x
def _compute_qkv(self, F, x: Tensor) -> Tuple[Tensor, Tensor, Tensor]:
cx = F.slice_axis(x, axis=1, begin=-self.prediction_length, end=None)
q = self.q_proj(cx)
k = self.k_proj(x)
q = self._split_head(F, q)
k = self._split_head(F, k)
v = self.v_proj(x)
if self.share_values:
v = F.broadcast_like(v.expand_dims(axis=1), k)
else:
v = self._split_head(F, v)
return q, k, v
def _apply_mask(
self, F, score: Tensor, key_mask: Optional[Tensor]
) -> Tensor:
k_idx = F.contrib.arange_like(score, axis=-1)
k_idx = (
k_idx.expand_dims(axis=0).expand_dims(axis=0).expand_dims(axis=0)
)
q_idx = F.contrib.arange_like(score, axis=-2) + self.context_length
q_idx = (
q_idx.expand_dims(axis=-1).expand_dims(axis=0).expand_dims(axis=0)
)
unidir_mask = F.broadcast_lesser_equal(k_idx, q_idx)
unidir_mask = F.broadcast_like(unidir_mask, score)
score = F.where(unidir_mask, score, F.ones_like(score) * -1e9)
if key_mask is not None:
key_mask = key_mask.expand_dims(axis=1) # head
key_mask = key_mask.expand_dims(axis=2) # query
key_mask = F.broadcast_like(key_mask, score)
score = F.where(key_mask, score, F.ones_like(score) * -1e9)
return score
def _compute_attn_score(
self,
F,
q: Tensor,
k: Tensor,
mask: Optional[Tensor],
) -> Tensor:
score = F.batch_dot(lhs=q, rhs=k, transpose_b=True)
score = self._apply_mask(F, score, mask)
score = score / (math.sqrt(self.d_head) * self.temperature)
score = F.softmax(score, axis=-1)
score = self.dropout(score)
return score
def _compute_attn_output(self, F, score: Tensor, v: Tensor) -> Tensor:
v = F.batch_dot(score, v)
v = self._merge_head(F, v)
v = self.out_proj(v)
return v
def hybrid_forward(self, F, x: Tensor, mask: Optional[Tensor]) -> Tensor:
q, k, v = self._compute_qkv(F, x)
score = self._compute_attn_score(F, q, k, mask)
v = self._compute_attn_output(F, score, v)
return v
class TemporalFusionEncoder(HybridBlock):
@validated()
def __init__(
self,
context_length: int,
prediction_length: int,
d_input: int,
d_hidden: int,
**kwargs,
) -> None:
super(TemporalFusionEncoder, self).__init__(**kwargs)
self.context_length = context_length
self.prediction_length = prediction_length
with self.name_scope():
self.encoder_lstm = rnn.HybridSequentialRNNCell(prefix="encoder_")
self.encoder_lstm.add(
rnn.LSTMCell(
hidden_size=d_hidden,
input_size=d_input,
)
)
self.decoder_lstm = rnn.HybridSequentialRNNCell(prefix="decoder_")
self.decoder_lstm.add(
rnn.LSTMCell(
hidden_size=d_hidden,
input_size=d_input,
)
)
self.gate = nn.HybridSequential()
self.gate.add(
nn.Dense(units=d_hidden * 2, in_units=d_hidden, flatten=False)
)
self.gate.add(GatedLinearUnit(axis=-1, nonlinear=False))
if d_input != d_hidden:
self.skip_proj = nn.Dense(
units=d_hidden, in_units=d_input, flatten=False
)
self.add_skip = True
else:
self.add_skip = False
self.lnorm = nn.LayerNorm(axis=-1, in_channels=d_hidden)
def hybrid_forward(
self,
F,
ctx_input: Tensor,
tgt_input: Tensor,
states: List[Tensor],
) -> Tensor:
ctx_encodings, states = self.encoder_lstm.unroll(
length=self.context_length,
inputs=ctx_input,
begin_state=states,
merge_outputs=True,
)
tgt_encodings, _ = self.decoder_lstm.unroll(
length=self.prediction_length,
inputs=tgt_input,
begin_state=states,
merge_outputs=True,
)
encodings = F.concat(ctx_encodings, tgt_encodings, dim=1)
skip = F.concat(ctx_input, tgt_input, dim=1)
if self.add_skip:
skip = self.skip_proj(skip)
encodings = self.gate(encodings)
encodings = self.lnorm(F.broadcast_add(skip, encodings))
return encodings
class TemporalFusionDecoder(HybridBlock):
@validated()
def __init__(
self,
context_length: int,
prediction_length: int,
d_hidden: int,
d_var: int,
n_head: int,
dropout: float = 0.0,
**kwargs,
):
super(TemporalFusionDecoder, self).__init__(**kwargs)
self.context_length = context_length
self.prediction_length = prediction_length
with self.name_scope():
self.enrich = GatedResidualNetwork(
d_hidden=d_hidden,
d_static=d_var,
dropout=dropout,
)
self.attention = SelfAttention(
context_length=context_length,
prediction_length=prediction_length,
d_hidden=d_hidden,
n_head=n_head,
share_values=True,
dropout=dropout,
)
self.att_net = nn.HybridSequential(prefix="attention_")
self.att_net.add(nn.Dropout(dropout))
self.att_net.add(
nn.Dense(
units=d_hidden * 2,
in_units=d_hidden,
flatten=False,
weight_initializer=init.Xavier(),
)
)
self.att_net.add(
GatedLinearUnit(
axis=-1,
nonlinear=False,
)
)
self.att_lnorm = nn.LayerNorm(
axis=-1,
in_channels=d_hidden,
)
self.ff_net = nn.HybridSequential()
self.ff_net.add(
GatedResidualNetwork(
d_hidden,
dropout=dropout,
)
)
self.ff_net.add(
nn.Dense(
units=d_hidden * 2,
in_units=d_hidden,
flatten=False,
weight_initializer=init.Xavier(),
)
)
self.ff_net.add(
GatedLinearUnit(
axis=-1,
nonlinear=False,
)
)
self.ff_lnorm = nn.LayerNorm(axis=-1, in_channels=d_hidden)
def hybrid_forward(
self, F, x: Tensor, static: Tensor, mask: Tensor
) -> Tensor:
static = F.tile(
static, reps=(1, self.context_length + self.prediction_length, 1)
)
skip = F.slice_axis(x, axis=1, begin=self.context_length, end=None)
x = self.enrich(x, static)
mask_pad = F.slice_axis(F.ones_like(mask), axis=1, begin=0, end=1)
mask_pad = F.tile(mask_pad, reps=(1, self.prediction_length))
mask = F.concat(mask, mask_pad, dim=1)
att = self.attention(x, mask)
att = self.att_net(att)
x = F.slice_axis(x, axis=1, begin=self.context_length, end=None)
x = self.att_lnorm(F.broadcast_add(x, att))
x = self.ff_net(x)
x = self.ff_lnorm(F.broadcast_add(x, skip))
return x
| [
"noreply@github.com"
] | dibgerge.noreply@github.com |
440b78a37a96a8dae061a6196c7d596ca42a380b | 4afeb654edac3e995a319ea7a380332be0225563 | /Recursion/fibonacci.py | e3745d2038a7c3c9790614fd0c021a3bee69d583 | [] | no_license | KUMAWAT55/Data-Structure | 345d7abf70c2c84a03575fc0f9565c9265e29136 | f9755a161b91822c7227f0d682398f7d6e95ac53 | refs/heads/master | 2022-09-08T08:21:26.345903 | 2020-05-28T06:38:42 | 2020-05-28T06:38:42 | 260,788,567 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 98 | py | def fibR(n):
if n==1 or n==2:
return 1
return fibR(n-1)+fibR(n-2)
print (fibR(5)) | [
"RHTKUMAWAT55@GMAIL.COM"
] | RHTKUMAWAT55@GMAIL.COM |
11554a99127ec3db0ed7491c4cd89a7d44588d9b | 11e4bd1b29a66b97df9b3b32b2827eac88a24fd8 | /pysrc/128.py | 6bbdf078a543056afc353ab0e21f3fc963fb8512 | [] | no_license | linkinpark213/leetcode-practice | 4db17462b67e7a1a34184aada041cb3854f78385 | 13379e6fdd9299c606889fefa0a38426ef4fa5e7 | refs/heads/master | 2021-07-08T16:16:28.428003 | 2020-09-16T14:30:51 | 2020-09-16T14:30:51 | 185,179,184 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 564 | py | from typing import List
class Solution:
def longestConsecutive(self, nums: List[int]) -> int:
if len(nums) == 0:
return 0
s = set()
for num in nums:
s.add(num)
longest = 1
for num in s:
if not num - 1 in s:
i = num + 1
while i in s:
i += 1
longest = max(longest, i - num)
return longest
if __name__ == '__main__':
solution = Solution()
print(solution.longestConsecutive([100, 4, 200, 1, 3, 2]))
| [
"linkinpark213@outlook.com"
] | linkinpark213@outlook.com |
753e7c8e535bbb3c45f626ed661a5971f336b2bd | e896e5c884f4e813709fdcba1dffe9dcab0897b2 | /blog/migrations/0002_auto_20200630_1441.py | 13312ed96e5261d83beeb622e387e1c1e9f12e99 | [] | no_license | iamrraj/Django_Restful_API | 66a0a923200db8c0d9e7cf7387c6fb8b8ce92893 | 224e980ebd5f627bd2e6ac60fcfae906bcdf14da | refs/heads/main | 2023-01-13T11:10:31.315985 | 2020-11-08T10:08:36 | 2020-11-08T10:08:36 | 311,024,639 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 624 | py | # Generated by Django 2.2.8 on 2020-06-30 12:41
from django.conf import settings
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('taggit', '0003_taggeditem_add_unique_index'),
('blog', '0001_initial'),
]
operations = [
migrations.RenameModel(
old_name='Blog',
new_name='Blog1',
),
migrations.AlterModelOptions(
name='blog1',
options={'verbose_name': 'blog1', 'verbose_name_plural': 'blogs1'},
),
]
| [
"rajr97333@gmail.com"
] | rajr97333@gmail.com |
01b8a2bfc4987b27f6b00f5271730eb49b19b702 | 87fd4beaf5c55211898f3a6805f0ed1a1aba44d5 | /Blender Python Scripts/src/LoadObjects.py | b0f2772d13d1b429626481158256041c323e1382 | [] | no_license | YogiOnBioinformatics/ProteinVR | 97c7b4efd86e4724ea1db624ab554608a2c607b5 | 7ce60498c82819f1522eb0bb02215a772b89c82d | refs/heads/master | 2021-07-07T10:50:27.237357 | 2020-07-30T15:45:22 | 2020-07-30T15:45:22 | 163,353,055 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 956 | py | import os
import bpy
# get list of all files in directory
os.chdir('/home/yor5/Desktop/4.3i_LifeScienceDB_JP') #CHANGE THIS TO BE YOUR DIRECTORY WHERE YOUR TXT LIST OF ALL OBJECTS THAT YOU WANT TO IMPORT IS LOCATED
file_list = [line.strip() for line in open("objs.txt")] #CHANGE THIS TO BE THE TEXT FILE WITHIN THE CHOSEN DIRECTORY THAT CONTAINS THE TXT LIST OF ALL OBJECTS YOU WANT TO IMPORT
# get a list of files ending in 'obj'
obj_list = [item for item in file_list if "bone" in item.lower()] #IF YOU WANT TO FILTER THROUGH AND IMPORT FILES THAT CONTAIN A KEY WORD, THIS IS THE LINE THAT TAKES CARE OF IT! CHANGE "bone" TO WHATEVER KEYWORD YOU WANT!
# loop through the strings in obj_list and add the files to the scene
for path_to_file in obj_list: #IF YOU COMMENTED OUT THE PREVIOUS LINE "OBJ_LIST =...", SINCE YOU DIDN'T WANT TO FILTER BY KEYWORD, THEN REPLACE "obj_list" WITH "file_list"
bpy.ops.import_scene.obj(filepath = path_to_file) | [
"yraghav97@gmail.com"
] | yraghav97@gmail.com |
90d7a3b87c8ff1e45d159609712f2a68b758362a | dedaef4189a237edccba045e02fd6c9b6ab8d4bc | /mininet_wifi/scripts/uno.py | 86776319d635514ba625f8a0a7c38f9b40442aca | [] | no_license | miraitowa/Mechanism-to-optimize-the-Access-Point-selection | 3530a4a9fe85e999f6f1931549f76810e7ad3451 | 7f9ffd1ca21bd11ddd291b1e7b2eb73b432081a1 | refs/heads/master | 2022-12-26T00:46:39.807663 | 2020-10-05T14:21:09 | 2020-10-05T14:21:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,058 | py | #!/usr/bin/python
"""AP1------AP2------AP3"""
from mininet.net import Mininet
from mininet.node import Controller
from mininet.link import TCLink
from mn_wifi.node import OVSKernelAP
from mininet.cli import CLI
from mn_wifi.cli import CLI_wifi
from mininet.log import setLogLevel
from mn_wifi.net import Mininet_wifi
def topology():
"Create a network."
#net = Mininet(controller=Controller, link=TCLink, accessPoint=OVSKernelAP)
net = Mininet_wifi(controller=Controller, link=TCLink, accessPoint=OVSKernelAP)
print "*** Creating nodes"
sta1 = net.addStation('sta1', mac='00:00:00:00:00:02', ip='10.0.0.2/8')
sta2 = net.addStation('sta2', mac='00:00:00:00:00:03', ip='10.0.0.3/8')
sta3 = net.addStation('sta3', mac='00:00:00:00:00:04', ip='10.0.0.4/8')
sta4 = net.addStation('sta4', mac='00:00:00:00:00:05', ip='10.0.0.5/8')
sta5 = net.addStation('sta5', mac='00:00:00:00:00:06', ip='10.0.0.6/8')
sta6 = net.addStation('sta6', mac='00:00:00:00:00:07', ip='10.0.0.7/8')
sta7 = net.addStation('sta7', mac='00:00:00:00:00:08', ip='10.0.0.8/8')
"""sta8 = net.addStation('sta8', mac='00:00:00:00:00:09', ip='10.0.0.9/8')
sta9 = net.addStation('sta9', mac='00:00:00:00:00:10', ip='10.0.0.10/8')
sta10 = net.addStation('sta10', mac='00:00:00:00:00:11', ip='10.0.0.11/8')"""
ap1 = net.addAccessPoint('ap1', ssid='ssid-ap1', mode='g', channel='1', position='30,50,0', range=20)
ap2 = net.addAccessPoint('ap2', ssid='ssid-ap2', mode='g', channel='6', position='60,50,0', range=20) # range: set the AP range
ap3 = net.addAccessPoint('ap3', ssid='ssid-ap3', mode='g', channel='11', position='90,50,0', range=20)
c1 = net.addController('c1', controller=Controller)
print "*** Configuring wifi nodes"
net.configureWifiNodes()
print "*** Associating and Creating links"
net.addLink(ap1, ap2)
net.addLink(ap2, ap3)
"""plotting graph"""
net.plotGraph(max_x=120, max_y=130)
"""association control"""
#net.associationControl('ssf')
"""Seed"""
#net.seed(1)
""" *** Available models:
RandomWalk, TruncatedLevyWalk, RandomDirection, RandomWayPoint, GaussMarkov
*** Association Control (AC) - mechanism that optimizes the use of the APs:
llf (Least-Loaded-First)
ssf (Strongest-Signal-First)"""
"""net.startMobility( time=0 )
net.mobility( sta1, 'start', time=1, position='10,50,0' )
net.mobility( sta1, 'stop', time=29, position='120,50,0' )
net.stopMobility( time=30 )"""
#net.startMobility(time=0, model='RandomWayPoint', max_x=120, max_y=100, min_v=0.8, max_v=2)
net.startMobility(time=0, model='RandomWayPoint', max_x=120, max_y=100, min_v=0.8, max_v=2, seed=1, associationControl='ssf')
print "*** Starting network"
net.build()
c1.start()
ap1.start([c1])
ap2.start([c1])
ap3.start([c1])
print "*** Running CLI"
CLI(net)
print "*** Stopping network"
net.stop()
if __name__ == '__main__':
setLogLevel('info')
topology()
| [
"ajcastillo@unicauca.edu.co"
] | ajcastillo@unicauca.edu.co |
bdcd07770f438fe47532383868fd4b8fa6fdd69c | 500e7f1b3873d564bc9309e3ccf5c274a313ae49 | /python_validate_password.py | 930f4e8835143516cacf4b10df672ce6b874b745 | [] | no_license | MulderPu/legendary-octo-guacamole | 5b25040e9e928cd862871f184a199a6413f24716 | 0a358c626f1670e22e448e05798f0edfd5e93832 | refs/heads/master | 2022-06-04T21:18:40.062522 | 2022-05-20T12:34:33 | 2022-05-20T12:34:33 | 102,110,100 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,222 | py | '''
Write a Python program to check the validity of a password (input from users).
Validation :
At least 1 letter between [a‐z] and 1 letter between [A‐Z].
At least 1 number between [0‐9].
At least 1 character from [$#@].
Minimum length 6 characters.
Maximum length 16 characters.
'''
import re
def validate_password():
special_char = '@#$'
while True:
password = input("Enter a password: ")
if len(password) < 6:
print("Make sure your password is at least 6 letters.")
elif len(password) > 16:
print("Make sure your password is not more than 16 letters.")
elif re.search('[0-9]',password) is None:
print("Make sure your password has a number in it.")
elif re.search('[A-Z]',password) is None:
print("Make sure your password has a capital letter in it.")
elif re.search('[a-z]',password) is None:
print("Make sure your password has a letter in it.")
elif re.search ('[@#$]', password) is None:
print("Make sure your password has a special character [@#$]")
else:
print("Your password seems fine")
break
validate_password()
| [
"mulderpu@gmail.com"
] | mulderpu@gmail.com |
dc2c99236d270737d2c0e0566f38c48bfc6e7b57 | bfd616a4af438a207a87337e342fd9f782909243 | /analyses/202011_hilc_cal_err/calibration_errors.py | ba2d70cafec0aed306a991a997f4f1ebcb6f1dcd | [] | no_license | simonsobs/fg2_awg | 273705e865a48228b06354ac2e72b9b2e25c053a | 99133e0360440e6b91f220ac76e30d3ce18dfce4 | refs/heads/master | 2021-07-25T06:14:17.786711 | 2021-07-13T13:39:16 | 2021-07-13T13:39:16 | 215,957,879 | 0 | 0 | null | 2021-07-13T13:29:20 | 2019-10-18T06:41:41 | Jupyter Notebook | UTF-8 | Python | false | false | 5,005 | py | import numpy as np
import fgbuster as fgb
import healpy as hp
import v3_calc as v3
import logging
ALMS_NERSC = '../../releases/202006_hilc_on_planck_so/workspace/cache/alms_{comp}.h5'
NOISE_COV_MATRIX_LOCAL = 'fg_noise.npz'
CMB_SPECTRA_LOCAL = 'cmb.npz'
LMAX = 4000
BIN_WIDTH = 20
FIELDS = 'TT EE BB'.split()
FSKY = 0.3922505853468785 # mean(w^2) / mean(w^4)
def get_bias(delta, field, freqs=v3.Simons_Observatory_V3_LA_bands()):
""" Post-HILC CMB bias
Parameters
----------
delta: array
Array of shape (..., ell, freq) -- or boadcastable to it. The SED of the
CMB is modeled as flat and equal to 1 (K_CMB units). However, we assume
that the actual response to the CMB in the data is (1 + delta). The
array delta can specify these correction for each fequency and/or
scales. Examples of shapes,
* (6,) or (1, 6) -> scale independent, frequency-specific correction
* (200, 1) -> frequency-independent, scale-specific correction
* (1000, 200, 6)
Stack of 1000 correction factors that are scale- and
frequency-specific
Note: the size of the ell dimension is hardcoded and equal to 200. Run
get_bias(0, 'TT') to get the reference ell for each index
field: str
Either TT, EE or BB
Result
_____
bias: array
Bias of the reconstructed CMB at each scale.
Same shape as delta except for the frequency dimension.
"""
invN = get_invN(freqs, field) # (ell, freq, freq)
cmb_dot_cmb = invN.sum((-1, -2))
cmb_dot_delta = np.einsum('...lf,...lfn->...l', delta, invN)
delta_dot_delta = np.einsum('...lf,...lfn,...ln->...l',
delta, invN, delta)
data = np.load(CMB_SPECTRA_LOCAL)
cmb_ps = data[field]
ells = data['ells']
numerator = 1 + cmb_dot_delta / cmb_dot_cmb
denominator = 1 + cmb_ps * (delta_dot_delta - cmb_dot_delta**2 / cmb_dot_cmb)
return ells, numerator, denominator
def _import_get_alm():
import os
dir_path = os.path.dirname(os.path.realpath(__file__))
os.chdir('../../releases/202006_hilc_on_planck_so/')
from hilc import get_alms
os.chdir(dir_path)
return get_alms
def _create_cached_noise_matrix():
logging.getLogger().setLevel(logging.INFO)
get_alms = _import_get_alm()
binned_covs = []
freqs = []
for field in range(3):
alms, freq = get_alms(
field, ALMS_NERSC,
'tsz ksz cib synchrotron freefree ame dust noise'.split(),
'so planck'.split(),
lmax=LMAX
)
cov = fgb.separation_recipes._empirical_harmonic_covariance(alms)
lbins = np.arange(1, LMAX+BIN_WIDTH, BIN_WIDTH)
lbins[-1] = LMAX+1
binned_cov = np.empty(cov.shape[:-1] + lbins[:-1].shape)
logging.info(f'{FIELDS[field]} cov')
for i, (lmin, lmax) in enumerate(zip(lbins[:-1], lbins[1:])):
# Average the covariances in the bin
lmax = min(lmax, cov.shape[-1])
dof = 2 * np.arange(lmin, lmax) + 1
binned_cov[..., i] = (dof / dof.sum() * cov[..., lmin:lmax]).sum(-1)
freqs.append(freq)
binned_covs.append(binned_cov/FSKY)
logging.info('Saving')
np.savez(NOISE_COV_MATRIX_LOCAL,
TT=binned_covs[0],
EE=binned_covs[1],
BB=binned_covs[2],
ells=(lbins[:-1] + lbins[1:]) / 2.,
freq_TT=freqs[0],
freq_EE=freqs[1],
freq_BB=freqs[2],
)
logging.info('Saved')
def _create_cached_cmb_spectrum():
get_alms = _import_get_alm()
binned_cls = []
for field in range(3):
alms = get_alms(field, ALMS_NERSC, ['cmb'], ['so'], lmax=LMAX)[0][-1]
cl = hp.alm2cl(alms)
lbins = np.arange(1, LMAX+BIN_WIDTH, BIN_WIDTH)
lbins[-1] = LMAX+1
binned_cl = np.empty(cl.shape[:-1] + lbins[:-1].shape)
for i, (lmin, lmax) in enumerate(zip(lbins[:-1], lbins[1:])):
# Average the covariances in the bin
lmax = min(lmax, cl.shape[-1])
dof = 2 * np.arange(lmin, lmax) + 1
binned_cl[..., i] = (dof / dof.sum() * cl[..., lmin:lmax]).sum(-1)
binned_cls.append(binned_cl/FSKY)
np.savez(CMB_SPECTRA_LOCAL,
TT=binned_cls[0],
EE=binned_cls[1],
BB=binned_cls[2],
ells=(lbins[:-1] + lbins[1:]) / 2.,
)
def get_invN(freqs, field):
try:
assert np.all(get_invN.freqs == freqs)
assert field in get_invN.invN
except (AttributeError, AssertionError):
get_invN.freqs = freqs
data = np.load(NOISE_COV_MATRIX_LOCAL)
tot_freqs = list(data[f'freq_{field}'].astype(int))
freq_idx = np.array([tot_freqs.index(f) for f in freqs])
N = data[field][freq_idx][:, freq_idx]
if not hasattr(get_invN, 'invN'):
get_invN.invN = {}
get_invN.invN[field] = fgb.separation_recipes._regularized_inverse(N.T)
return get_invN.invN[field]
| [
"noreply@github.com"
] | simonsobs.noreply@github.com |
e909e27376b545fa2284c2278f7a0a4c6b582076 | 3041762000ea7c669f6eb38a7d81f0a415368bb3 | /RctMod2d_Mesh.py | 2746a59bc8df91961ead21d0a0d7b4f0a30e9f05 | [] | no_license | buckees/Reactor-Model-2D | be70159729811e4e5e33ed1ea9432635b42c404e | 663f3d2f3632f8167659ddb3cd9d956271054583 | refs/heads/main | 2023-01-21T18:02:06.690697 | 2020-12-03T02:19:49 | 2020-12-03T02:19:49 | 309,267,884 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,204 | py | """
Mesh Module.
Create standalone mesh or
Create mesh for input geometry.
"""
import numpy as np
from copy import deepcopy
import matplotlib.pyplot as plt
class Mesh2d():
"""Define 2d Mesh."""
def __init__(self, name='Mesh2d'):
"""
Init the Shape.
name: str, var, name of the Mesh2d.
"""
self.name = name
def import_geom(self, geom):
"""Import geometry."""
self.geom = geom
def generate_mesh(self, ngrid=(11, 11)):
"""Generate mesh according to the imported geometry."""
self.width, self.height = self.geom.domain
self.ngrid = np.asarray(ngrid)
self.nx, self.nz = self.ngrid
self.res = np.divide(self.geom.domain, self.ngrid - 1)
self.delx, self.delz = self.res
tempx = np.linspace(self.geom.bl[0], self.geom.bl[0] + self.width,
self.nx)
tempz = np.linspace(self.geom.bl[1], self.geom.bl[1] + self.height,
self.nz)
self.x, self.z = np.meshgrid(tempx, tempz)
self.mat = np.zeros_like(self.x)
self._find_bndy()
self._assign_mat()
self._calc_plasma_area()
def create_mesh(self, bl=(0.0, 0.0), domain=(1.0, 1.0), ngrid=(11, 11)):
"""Create standalone mesh."""
self.bl = np.asarray(bl)
self.domain = np.asarray(domain)
self.ngrid = np.asarray(ngrid)
self.res = np.divide(self.domain, self.ngrid - 1)
self.width, self.height = self.domain
self.nx, self.nz = self.ngrid
self.delx, self.delz = self.res
tempx = np.linspace(0.0, self.width, self.nx)
tempz = np.linspace(0.0, self.height, self.nz)
self.x, self.z = np.meshgrid(tempx, tempz)
self._find_bndy()
def _find_bndy(self):
"""Add boundaries."""
self.bndy = np.zeros_like(self.x)
self.bndy_list = list()
for i in range(self.nx-1):
self.bndy_list.append((0, i))
for j in range(self.nz-1):
self.bndy_list.append((j, self.nx-1))
for i in reversed(range(1, self.nx)):
self.bndy_list.append((self.nz-1, i))
for j in reversed(range(1, self.nz)):
self.bndy_list.append((j, 0))
# sign value at bndy as 1
for idx in self.bndy_list:
self.bndy[idx] = 1
def _assign_mat(self):
"""Assign materials to nodes."""
for _idx, _x in np.ndenumerate(self.x):
_z = self.z[_idx]
_posn = np.array([_x, _z])
_label, self.mat[_idx] = self.geom.get_label(_posn)
def _calc_plasma_area(self):
"""Calc the total area of plasma region."""
self.area = 0
for _idx, _mat in np.ndenumerate(self.mat):
if not _mat:
self.area += self.delx * self.delz
def plot(self, figsize=(8, 8), dpi=600, fname='Mesh.png', ihoriz=1):
"""Plot mesh."""
colMap = plt.get_cmap('Set1')
if ihoriz:
fig, axes = plt.subplots(1, 2, figsize=figsize, dpi=dpi,
constrained_layout=True)
else:
fig, axes = plt.subplots(2, 1, figsize=figsize, dpi=dpi,
constrained_layout=True)
ax = axes[0]
ax.scatter(self.x, self.z, c=self.mat, s=10, cmap=colMap)
ax = axes[1]
ax.scatter(self.x, self.z, c=self.bndy, s=10, cmap=colMap)
fig.savefig(fname, dpi=dpi)
plt.close()
def cnt_diff(self, f):
"""
Caculate dy/dx using central differencing.
input: y
dy/dx = (y[i+1] - y[i-1])/(2.0*dx)
dy[0] = dy[1]; dy[-1] = dy[-2]
output: dy
"""
dfx = np.zeros_like(self.x)
dfz = np.zeros_like(self.z)
# Although dy[0] and dy[-1] are signed here,
# they are eventually specified in boundary conditions
# dy[0] = dy[1]; dy[-1] = dy[-2]
for i in range(1, self.nx-1):
dfx[:, i] = (f[:, i+1] - f[:, i-1])/self.delx/2.0
for j in range(1, self.nz-1):
dfz[j, :] = (f[j+1, :] - f[j-1, :])/self.delz/2.0
dfx[:, 0], dfx[:, -1] = deepcopy(dfx[:, 1]), deepcopy(dfx[:, -2])
dfz[0, :], dfz[-1, :] = deepcopy(dfz[1, :]), deepcopy(dfz[-2, :])
return dfx, dfz
def cnt_diff_2nd(self, f):
"""
Caculate d2y/dx2 using 2nd order central differencing.
input: y
d2y/dx2 = (y[i+1] - 2 * y[i] + y[i-1])/dx^2
d2y[0] = d2y[1]; d2y[-1] = d2y[-2]
output: d2y/dx2
"""
d2fx = np.zeros_like(self.x)
d2fz = np.zeros_like(self.z)
# Although dy[0] and dy[-1] are signed here,
# they are eventually specified in boundary conditions
# d2y[0] = d2y[1]; d2y[-1] = d2y[-2]
for i in range(1, self.nx-1):
d2fx[:, i] = (f[:, i+1] - 2 * f[:, i] + f[:, i-1])/self.delx**2
for j in range(1, self.nz-1):
d2fz[j, :] = (f[j+1, :] - 2 * f[j, :] + f[j-1, :])/self.delz**2
d2fx[:, 0], d2fx[:, -1] = deepcopy(d2fx[:, 1]), deepcopy(d2fx[:, -2])
d2fz[0, :], d2fz[-1, :] = deepcopy(d2fz[1, :]), deepcopy(d2fz[-2, :])
d2f = d2fx + d2fz
return d2f
if __name__ == '__main__':
"""Test Mesh."""
from RctMod2d_Geom import Geom2d, Domain, Rectangle
# build the geometry
geom2d = Geom2d(name='Geom2D_Test', is_cyl=False)
domain = Domain((-1.0, 0.0), (2.0, 4.0))
geom2d.add_domain(domain)
top = Rectangle('Metal', (-1.0, 3.5), (1.0, 4.0))
geom2d.add_shape(top)
bott = Rectangle('Metal', (-0.8, 0.0), (0.8, 0.2))
geom2d.add_shape(bott)
left = Rectangle('Metal', (-1.0, 0.0), (-0.9, 4.0))
geom2d.add_shape(left)
right = Rectangle('Metal', (0.9, 0.0), (1.0, 4.0))
geom2d.add_shape(right)
quartz = Rectangle('Quartz', (-0.9, 3.3), (0.9, 3.5))
geom2d.add_shape(quartz)
geom2d.plot(fname='geom2d.png')
print(geom2d)
# generate mesh to imported geometry
mesh2d = Mesh2d('Mesh2D_Test')
mesh2d.import_geom(geom2d)
mesh2d.generate_mesh(ngrid=(21, 41))
mesh2d.plot()
| [
"67809187+buckees@users.noreply.github.com"
] | 67809187+buckees@users.noreply.github.com |
3d5736b49e20e5231ed3c0e8ac5532b0b42c216c | d00edd6bc9d0b3e4f7a8629f353d00e51e0bffbf | /Driver/LCD1602AI2C.py | d3c7becde23923006301b3903131cd00d0276524 | [
"MIT"
] | permissive | aresfe/RPiMonitor | 8aaef360ea03057bf1878c1dd57269f59c4ed2e2 | 796b11ac90ed231c64db3cbfdf660bdc03bc6d08 | refs/heads/master | 2020-03-30T00:27:38.241813 | 2018-09-30T16:29:19 | 2018-09-30T16:29:19 | 150,524,784 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,697 | py | import time
import threading
import enum
import Adafruit_CharLCD as LCD
from ILCDDriver import *
class FlashFreq(enum.Enum):
FAST = 0.05
NORM = 0.5
SLOW = 1
class LED:
def __init__(self, driver):
self._driver = driver
self._color = (0, 0, 0)
def red(self):
self._color = (1, 0, 0)
self._driver.set_led_color(*self._color)
def blue(self):
self._color = (0, 0, 1)
self._driver.set_led_color(*self._color)
def green(self):
self._color = (0, 1, 0)
self._driver.set_led_color(*self._color)
def yellow(self):
self._color = (1, 1, 0)
self._driver.set_led_color(*self._color)
def purple(self):
self._color = (0, 1, 1)
self._driver.set_led_color(*self._color)
def cyan(self):
self._color = (1, 0, 1)
self._driver.set_led_color(*self._color)
def white(self):
self._color = (1, 1, 1)
self._driver.set_led_color(*self._color)
def on(self):
self._driver.set_led_color(*self._color)
def off(self):
self._driver.set_led_color(0, 0, 0)
def color(self):
return self._color
def switch(self):
if self._driver.get_led_color() == (0, 0, 0):
self.on()
else:
self.off()
@staticmethod
def _doflash(driver, interval, count):
if count > 0:
driver.switch()
time.sleep(interval)
driver.flash(interval, count - 1)
else:
driver.off()
def flash(self, interval=0.1, count=20):
t = threading.Thread(target=LED._doflash, args=(self, interval, count))
t.start()
def flashEx(self, freq, timelength):
self.flash(freq.value, timelength / freq.value)
class LCD1602A(LCDDriver):
def __init__(self):
self.led = LED(self)
self._color = (1, 1, 1)
def init(self):
self._lcd = LCD.Adafruit_CharLCDPlate()
def switch_backlight(self, on):
self._lcd.set_backlight(on if 1 else 0)
def switch_cursor(self, on):
self._lcd.show_cursor(on)
def switch_display(self, on):
self._lcd.enable_display(on)
def set_led_off(self):
self._color = (0, 0, 0)
self._lcd.set_color(0, 0, 0)
def set_led_color(self, red, green, blue):
self._color = (red, green, blue)
self._lcd.set_color(red, green, blue)
def get_led_color(self):
return self._color
def set_cfg_autoscroll(self, on):
self._lcd.autoscroll(on)
def set_cfg_blink(self, on):
self._lcd.blink(on)
def set_cursor_pos(self, row, col):
self._lcd.set_cursor(col, row)
def set_message(self, message):
self._lcd.message(message)
def msg_move_left(self, count=1):
while count:
self._lcd.move_left()
count -= 1
def msg_move_right(self, count=1):
while count:
self._lcd.move_right()
count -= 1
def clear(self):
self._lcd.clear()
if __name__ == '__main__':
print("start test")
dr = LCD1602A()
dr.init()
dr.switch_display(True)
dr.switch_backlight(True)
dr.set_led_off()
dr.led.white()
"""
dr.led.flashEx(FLASHFREQ.FLASH_NORM, 5)
dr.set_cfg_autoscroll(True)
dr.set_message("Hello World! Here is Raspberry Pi~~~~")
time.sleep(5)
dr.set_cfg_autoscroll(True)
dr.set_message("Hello World! Here is Raspberry Pi~~~~")
dr.switch_cursor(True)
time.sleep(5)
dr.set_cfg_blink(True)
dr.msg_move_left(3)
"""
time.sleep(5)
dr.switch_backlight(False)
dr.set_led_off()
dr.switch_display(False)
| [
"shiyaoli@ixuanqu.com"
] | shiyaoli@ixuanqu.com |
ef4c20ff087aa7e324f75b1fad2574e638e373a5 | 764583cac4157e69fe9ac58a459c942947d9b54e | /puns/pun_generator.py | 6bb929acbe244f452956368b8c8fe3bef6e2bd1a | [] | no_license | randypiper/potential-puns | 99ea0cb07aba8c46afa6021ef7c0bc457331415e | f5e962b19ce9eb0e99cc37515666ffe224fafee3 | refs/heads/master | 2021-04-29T14:53:31.053315 | 2018-03-03T03:50:34 | 2018-03-03T03:50:34 | 121,784,526 | 14 | 1 | null | 2018-03-03T03:50:35 | 2018-02-16T18:17:52 | Python | UTF-8 | Python | false | false | 1,597 | py | import collections
import itertools
class PunGenerator:
def __init__(self, phoneme_dict):
self.phoneme_dict = phoneme_dict
self.computed_puns = collections.defaultdict(set)
def generate_puns(self, pun_target):
target_phonemes = self._convert_phrase_to_possible_phonemes(pun_target)
for target_phoneme in target_phonemes:
self._iterate_puns(target_phoneme)
return self._get_all_puns(target_phonemes)
def _iterate_puns(self, target_phoneme):
if target_phoneme in self.computed_puns:
return self.computed_puns[target_phoneme]
phonemes = target_phoneme.split(" ")
if len(phonemes) == 0:
return
if self.phoneme_dict.get_words(target_phoneme) is not None:
self._add_pun(target_phoneme, self.phoneme_dict.get_words(target_phoneme))
else:
self._add_pun(target_phoneme, set())
for i in range(1, len(phonemes)):
left = " ".join(phonemes[:i])
right = " ".join(phonemes[i:])
self._iterate_puns(left)
self._iterate_puns(right)
left_puns = self.computed_puns[left]
right_puns = self.computed_puns[right]
self._add_pun(target_phoneme, { " ".join(tup) for tup in itertools.product(left_puns, right_puns) })
def _add_pun(self, phoneme, words):
self.computed_puns[phoneme].update(words)
def _convert_phrase_to_possible_phonemes(self, phrase):
phrase_phonemes = [self.phoneme_dict.get_phonemes(word) for word in phrase.split(" ")]
return [" ".join(tup) for tup in itertools.product(*phrase_phonemes)]
def _get_all_puns(self, target_phonemes):
return set().union(*[ self.computed_puns[phoneme] for phoneme in target_phonemes ])
| [
"randyart@umich.edu"
] | randyart@umich.edu |
4d49dd2084dfbcb19ae43eb20bdff022c03d3d1c | 1207e317fa2837fa4cdb49150b9b2ca99dada2f3 | /SRMS/srms/wsgi.py | 444add657ea221544e461d6a7021a3c1b48ba373 | [
"MIT"
] | permissive | ericniyon/all_in_one_repo | d14cb715776f5c23851d23930145fcb707aaca1d | 9080315fbe9e8226a21bf35c49ff7662b4b095b4 | refs/heads/master | 2022-12-16T17:04:48.602534 | 2020-01-12T00:40:54 | 2020-01-12T00:40:54 | 233,317,032 | 0 | 0 | null | 2022-12-08T01:50:51 | 2020-01-12T00:30:03 | Python | UTF-8 | Python | false | false | 481 | py | """
WSGI config for srms project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
# from whitenoise.django import DjangoWhiteNoise
# application = DjangoWhiteNoise(application)
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'srms.settings')
application = get_wsgi_application()
| [
"niyoeri6@gmail.com"
] | niyoeri6@gmail.com |
e9938fad11621df8a414b3e5c323e99c1409857a | a44975ad96e51418e62891c70bc6376a51c061e1 | /mnist/sphere_loss_v32.py | b0a86e9a7f8075581a2f8221577a634471ea0758 | [] | no_license | Apich238/Face-group-loss | 48827b29a0277bee38473f358443bd06f02987ce | 5dfd830feb6d59d5a7178172ec42ab3a120eac00 | refs/heads/master | 2022-02-05T17:07:19.405856 | 2022-01-30T17:37:59 | 2022-01-30T17:37:59 | 146,727,505 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,946 | py | import tensorflow as tf
def angle(a, b):
return tf.acos(tf.clip_by_value(tf.reduce_sum(tf.multiply(a, b), axis=0), -1., 1.))
def angles(a, b):
return tf.acos(tf.clip_by_value(tf.reduce_sum(tf.multiply(a, b), axis=-1), -1., 1.)) # axis=2
def get_centers(features, images_per_class):
# вычисляем центры для каждой персоны
with tf.variable_scope('centers_evaluation'):
features = tf.reshape(features, (-1, images_per_class, features.shape[-1]))
centers = tf.reduce_mean(features, axis=1, keepdims=False)
centers = tf.nn.l2_normalize(centers, name='centers')
return centers
def get_dists(fs, centers):
# расчитываем расстояния каждого оторбажения до центра соотв. класса
with tf.variable_scope('distances_to_centers'):
cs = tf.expand_dims(centers, 1)
dsts = angles(tf.nn.l2_normalize(fs), cs)
return dsts
def get_sd(dists):
'''
вычисляет стандартные отклонения по известным расстояниям с помощью несмещённой оценки
:param dists:
:return:
'''
with tf.variable_scope('standart_deviations'):
d = tf.sqrt(tf.to_float(tf.subtract(tf.shape(dists)[1], 1)))
return tf.sqrt(tf.reduce_sum(tf.square(dists), axis=1)) / d
def SphereIntersection(sd1, C1, sd2, C2, R, m):
i = tf.nn.relu(R * (sd1 + sd2) + m - angle(C1, C2))
return i # tf.square(i)
def get_intersection_matrix(sd, C, R, m, l):
with tf.variable_scope('intersection_matrix'):
# для составления матрицы пересечений определим её элемент
m_el = lambda i, j: tf.cond(tf.equal(i, j),
true_fn=lambda: l * sd[i], # tf.constant(0.,dtype=tf.float32),
false_fn=lambda: SphereIntersection(sd[i], C[i], sd[j], C[j], R, m))
# индексы в квадратной матрице
indices = tf.range(0, tf.shape(C)[0], dtype=tf.int32, name='indices')
# строка матрицы пересечений в зависимости от индекса
m_row = lambda i: tf.map_fn(fn=lambda j: m_el(i, j), elems=indices, dtype=tf.float32)
return tf.map_fn(fn=lambda i: m_row(i), elems=indices, dtype=tf.float32)
def SphereIntersections(sd1, cs1, sd2, cs2, R, m):
return tf.square(tf.nn.relu(m - angles(cs1, cs2))) +R* tf.square( (sd1 + sd2) / angles(cs1, cs2))
# + 0.1*tf.square(sd1) # + sd2)
def get_intersection_by_pairs(sd, centers, R, m, l):
sdp = tf.reshape(sd, [-1, 2])
sd1, sd2 = tf.split(sdp, 2, 1)
sd1 = tf.reshape(sd1, [-1], name='sdA')
sd2 = tf.reshape(sd2, [-1], name='sdB')
csp = tf.reshape(centers, [-1, 2, centers.shape[-1]])
cs1, cs2 = tf.split(csp, 2, 1)
cs1 = tf.reshape(cs1, [-1, centers.shape[-1]], name='mA')
cs2 = tf.reshape(cs2, [-1, centers.shape[-1]], name='mB')
return SphereIntersections(sd1, cs1, sd2, cs2, R, m)
def get_sphere_loss(features, images_per_class, R=3., m=0.1, l=0.1):
with tf.variable_scope('my_loss_evaluation'):
embeddings = tf.nn.l2_normalize(features, axis=-1, name='embeddings')
# разбираем отображения по персонам
embs_rs = tf.reshape(embeddings, (-1, images_per_class, embeddings.shape[-1]), name='embeddings_grouped')
centers = get_centers(features, images_per_class)
dists = get_dists(embs_rs, centers)
# стандартное отклонение (корень из дисперсии)
sd = get_sd(dists)
# определение матрицы пересечений
# mx = get_intersection_matrix(sd, centers, R, m, l)
mx = get_intersection_by_pairs(sd, centers, R, m, l)
return mx, sd, centers, embeddings
| [
"apich238@gmail.com"
] | apich238@gmail.com |
ac7bb0bee930ba2183aae75223a0b31a36b29c83 | e1a2c6ed4a4b93b4697974e3b0a32a4d67daa6f6 | /venv/Lib/site-packages/pybrain/tests/unittests/test_network_forward_backward.py | 38e4bb4001d65c53e216e81fc1e56dcef765410b | [
"MIT"
] | permissive | ishatserka/MachineLearningAndDataAnalysisCoursera | cdf0f23a58617e17d6b938e3a9df17daae8585e4 | e82e772df2f4aec162cb34ac6127df10d14a625a | refs/heads/master | 2021-09-11T01:39:26.228392 | 2018-04-05T14:33:39 | 2018-04-05T14:33:39 | 117,153,454 | 0 | 0 | MIT | 2018-03-27T05:20:37 | 2018-01-11T21:05:33 | Python | UTF-8 | Python | false | false | 1,051 | py | """
Test the forward and backward passes through a linear network.
>>> from scipy import array
>>> from pybrain import LinearLayer
>>> from pybrain.tools.shortcuts import buildNetwork
>>> n = buildNetwork(2, 4, 3, bias = False, hiddenclass = LinearLayer, recurrent=True)
The forward passes (2 timesteps), by two different but equivalent methods
>>> input = array([1,2])
>>> n.inputbuffer[0] = input
>>> n.forward()
>>> tmp = n.activate(input * 2)
The backward passes, also by two different but equivalent methods
>>> outerr = array([-0.1, 0, 1])
>>> n.outputerror[1] = outerr * 3
>>> n.backward()
>>> tmp = n.backActivate(outerr)
Verify that the inputs and outputs are proportional
>>> sum(n.outputbuffer[1]/n.outputbuffer[0])
6.0
>>> abs((n.inputerror[1]/n.inputerror[0])[1] - 3.0) < 0.0001
True
"""
__author__ = 'Tom Schaul, tom@idsia.ch'
from pybrain.tests import runModuleTestSuite
if __name__ == "__main__":
runModuleTestSuite(__import__('__main__'))
| [
"shatserka@gmail.com"
] | shatserka@gmail.com |
977cdacd46e493b5840f64f28e229c73a9186631 | aef40813a1b92cec0ea4fc25ec1d4a273f9bfad4 | /Q02__/80_Wiggle_Sort/Solution.py | c5c8f5d63a96de76b08ee985d53c20fe2a8e1f73 | [
"Apache-2.0"
] | permissive | hsclinical/leetcode | e9d0e522e249a24b28ab00ddf8d514ec855110d7 | 48a57f6a5d5745199c5685cd2c8f5c4fa293e54a | refs/heads/main | 2023-06-14T11:28:59.458901 | 2021-07-09T18:57:44 | 2021-07-09T18:57:44 | 319,078,569 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,812 | py | from typing import List
class Solution:
def wiggleSort(self, nums: List[int]) -> None:
"""
Do not return anything, modify nums in-place instead.
"""
numsLen = len(nums)
if numsLen == 2:
if nums[0] > nums[1]:
tmp = nums[0]
nums[0] = nums[1]
nums[1] = tmp
elif numsLen >= 3:
sortedNums = sorted(nums)
if numsLen % 2 == 0:
mediumNum = (sortedNums[ (numsLen // 2) - 1 ] + sortedNums[ (numsLen // 2) ])/2
else:
mediumNum = sortedNums[ (numsLen // 2) ]
smallIdx = [ i for i in range(numsLen) if (i % 2 == 0) and (nums[i] >= mediumNum) ]
largeIdx = [ i for i in range(numsLen) if (i % 2 == 1) and (nums[i] <= mediumNum) ]
smallIdxLen = len(smallIdx)
largeIdxLen = len(largeIdx)
if smallIdxLen < largeIdxLen:
removeIdx = []
for idx in largeIdx:
if nums[idx] == mediumNum:
removeIdx.append( idx )
cntToRemove = largeIdxLen - smallIdxLen
for i in range(cntToRemove):
largeIdx.remove( removeIdx[ i ] )
elif smallIdxLen > largeIdxLen:
removeIdx = []
for idx in smallIdx:
if nums[idx] == mediumNum:
removeIdx.append( idx )
cntToRemove = smallIdxLen - largeIdxLen
for i in range(cntToRemove):
smallIdx.remove( removeIdx[ i ] )
for i in range( len(smallIdx) ):
tmp = nums[ smallIdx[i] ]
nums[ smallIdx[i] ] = nums[ largeIdx[i] ]
nums[ largeIdx[i] ] = tmp
| [
"luhongisu@gmail.com"
] | luhongisu@gmail.com |
5c13d3609b7140c8b500e263c31db044a66897f8 | 374a70c15b890b9df11b52b67ec2347f6039b05a | /Pikachu/modules/error_handler.py | ccd704128e8d1dd24f50865a7f23b908ae79531c | [] | no_license | Ryu120/Pikachu | 78f56c1e7befbe2aba83a39a779374d1a72c476f | 3025295542d3b23896da71983bc70316b6d10b46 | refs/heads/main | 2023-08-27T03:55:20.534875 | 2021-10-20T13:19:04 | 2021-10-20T13:19:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,177 | py | import traceback
import requests
import html
import random
import traceback
import sys
import pretty_errors
import io
from telegram import Update, InlineKeyboardMarkup, InlineKeyboardButton
from telegram.ext import CallbackContext, CommandHandler
from Pikachu import dispatcher, DEV_USERS, OWNER_ID
pretty_errors.mono()
class ErrorsDict(dict):
"A custom dict to store errors and their count"
def __init__(self, *args, **kwargs):
self.raw = []
super().__init__(*args, **kwargs)
def __contains__(self, error):
self.raw.append(error)
error.identifier = "".join(random.choices("ABCDEFGHIJKLMNOPQRSTUVWXYZ", k=5))
for e in self:
if type(e) is type(error) and e.args == error.args:
self[e] += 1
return True
self[error] = 0
return False
def __len__(self):
return len(self.raw)
errors = ErrorsDict()
def error_callback(update: Update, context: CallbackContext):
if not update:
return
if context.error in errors:
return
try:
stringio = io.StringIO()
pretty_errors.output_stderr = stringio
output = pretty_errors.excepthook(
type(context.error), context.error, context.error.__traceback__
)
pretty_errors.output_stderr = sys.stderr
pretty_error = stringio.getvalue()
stringio.close()
except:
pretty_error = "Failed to create pretty error."
tb_list = traceback.format_exception(
None, context.error, context.error.__traceback__
)
tb = "".join(tb_list)
pretty_message = (
"{}\n"
"-------------------------------------------------------------------------------\n"
"An exception was raised while handling an update\n"
"User: {}\n"
"Chat: {} {}\n"
"Callback data: {}\n"
"Message: {}\n\n"
"Full Traceback: {}"
).format(
pretty_error,
update.effective_user.id,
update.effective_chat.title if update.effective_chat else "",
update.effective_chat.id if update.effective_chat else "",
update.callback_query.data if update.callback_query else "None",
update.effective_message.text if update.effective_message else "No message",
tb,
)
key = requests.post(
"https://nekobin.com/api/documents", json={"content": pretty_message}
).json()
e = html.escape(f"{context.error}")
if not key.get("result", {}).get("key"):
with open("error.txt", "w+") as f:
f.write(pretty_message)
context.bot.send_document(
OWNER_ID,
open("error.txt", "rb"),
caption=f"#{context.error.identifier}\n<b>An unknown error occured:</b>\n<code>{e}</code>",
parse_mode="html",
)
return
key = key.get("result").get("key")
url = f"https://nekobin.com/{key}.py"
context.bot.send_message(
OWNER_ID,
text=f"#{context.error.identifier}\n<b>An unknown error occured:</b>\n<code>{e}</code>",
reply_markup=InlineKeyboardMarkup(
[[InlineKeyboardButton("Nekobin", url=url)]]
),
parse_mode="html",
)
def list_errors(update: Update, context: CallbackContext):
if update.effective_user.id not in DEV_USERS:
return
e = {
k: v for k, v in sorted(errors.items(), key=lambda item: item[1], reverse=True)
}
msg = "<b>Errors List:</b>\n"
for x in e:
msg += f"• <code>{x}:</code> <b>{e[x]}</b> #{x.identifier}\n"
msg += f"{len(errors)} have occurred since startup."
if len(msg) > 4096:
with open("errors_msg.txt", "w+") as f:
f.write(msg)
context.bot.send_document(
update.effective_chat.id,
open("errors_msg.txt", "rb"),
caption=f"Too many errors have occured..",
parse_mode="html",
)
return
update.effective_message.reply_text(msg, parse_mode="html")
dispatcher.add_error_handler(error_callback)
dispatcher.add_handler(CommandHandler("errors", list_errors))
| [
"noreply@github.com"
] | Ryu120.noreply@github.com |
62f181a55c8b5bec98db9fbd3f348e04fab51019 | 83e11300713850820d927b928d6f4e9287a22584 | /homu/server.py | 43144dc40f2b4ef21990aea2dfde3c28eaf4ea82 | [] | no_license | nagisa/homu | 52c1e4170d6be642867711e4f485e80fadfedf16 | c604b4478ac17e4d21e08c85c3ba86e1e23091a0 | refs/heads/master | 2021-01-18T05:17:26.638974 | 2015-01-20T22:52:48 | 2015-01-20T22:52:48 | 29,559,680 | 0 | 0 | null | 2015-01-20T22:59:43 | 2015-01-20T22:59:43 | null | UTF-8 | Python | false | false | 15,678 | py | from http.server import HTTPServer, BaseHTTPRequestHandler
from threading import Thread
import hmac
import json
import urllib.parse
from .main import PullReqState, parse_commands
from . import utils
from socketserver import ThreadingMixIn
import github3
import jinja2
import requests
import pkg_resources
class RequestHandler(BaseHTTPRequestHandler):
def do_GET(self):
if self.path == '/':
resp_status = 200
resp_text = self.server.tpls['index'].render(repos=sorted(self.server.repos))
elif self.path.startswith('/queue/'):
repo_name = self.path.split('/', 2)[2]
repo = self.server.repos[repo_name]
pull_states = sorted(self.server.states[repo_name].values())
rows = []
for state in pull_states:
rows.append({
'status': 'approved' if state.status == '' and state.approved_by else state.status,
'priority': 'rollup' if state.rollup else state.priority,
'url': 'https://github.com/{}/{}/pull/{}'.format(repo.owner, repo.name, state.num),
'num': state.num,
'approved_by': state.approved_by,
'title': state.title,
'head_ref': state.head_ref,
'mergeable': 'yes' if state.mergeable is True else 'no' if state.mergeable is False else '',
'assignee': state.assignee,
})
resp_status = 200
resp_text = self.server.tpls['queue'].render(
repo_name = repo.name,
states = rows,
oauth_client_id = self.server.cfg['main']['oauth_client_id'],
total = len(pull_states),
approved = len([x for x in pull_states if x.approved_by]),
rolled_up = len([x for x in pull_states if x.rollup]),
failed = len([x for x in pull_states if x.status == 'failure' or x.status == 'error']),
)
elif self.path.startswith('/rollup'):
args = urllib.parse.parse_qs(self.path[self.path.index('?')+1:])
code = args['code'][0]
state = json.loads(args['state'][0])
res = requests.post('https://github.com/login/oauth/access_token', data={
'client_id': self.server.cfg['main']['oauth_client_id'],
'client_secret': self.server.cfg['main']['oauth_client_secret'],
'code': code,
})
args = urllib.parse.parse_qs(res.text)
token = args['access_token'][0]
repo = self.server.repos[state['repo']]
repo_cfg = self.server.repo_cfgs[repo.name]
user_gh = github3.login(token=token)
user_repo = user_gh.repository(user_gh.user().login, repo.name)
base_repo = user_gh.repository(repo.owner.login, repo.name)
rollup_states = [x for x in self.server.states[repo.name].values() if x.rollup and x.approved_by]
rollup_states.sort(key=lambda x: x.num)
if not rollup_states:
resp_status = 200
resp_text = 'No pull requests are marked as rollup'
else:
master_sha = repo.ref('heads/' + repo_cfg['master_branch']).object.sha
try:
utils.github_set_ref(
user_repo,
'heads/' + repo_cfg['rollup_branch'],
master_sha,
force=True,
)
except github3.models.GitHubError:
user_repo.create_ref(
'refs/heads/' + repo_cfg['rollup_branch'],
master_sha,
)
successes = []
failures = []
for state in rollup_states:
merge_msg = 'Rollup merge of #{} - {}, r={}\n\n{}'.format(
state.num,
state.head_ref,
state.approved_by,
state.body,
)
try: user_repo.merge(repo_cfg['rollup_branch'], state.head_sha, merge_msg)
except github3.models.GitHubError as e:
if e.code != 409: raise
failures.append(state.num)
else:
successes.append(state.num)
title = 'Rollup of {} pull requests'.format(len(successes))
body = '- Successful merges: {}\n- Failed merges: {}'.format(
', '.join('#{}'.format(x) for x in successes),
', '.join('#{}'.format(x) for x in failures),
)
try:
pull = base_repo.create_pull(
title,
repo_cfg['master_branch'],
user_repo.owner.login + ':' + repo_cfg['rollup_branch'],
body,
)
except github3.models.GitHubError as e:
resp_status = 200
resp_text = e.response.text
else:
resp_status = 302
resp_text = pull.html_url
else:
resp_status = 404
resp_text = ''
self.send_response(resp_status)
if resp_status == 302:
self.send_header('Location', resp_text)
else:
self.send_header('Content-type', 'text/html')
self.end_headers()
self.wfile.write(resp_text.encode('utf-8'))
def do_POST(self):
payload = self.rfile.read(int(self.headers['Content-Length']))
if self.path == '/github':
info = json.loads(payload.decode('utf-8'))
event_type = self.headers['X-Github-Event']
hmac_method, hmac_sig = self.headers['X-Hub-Signature'].split('=')
if hmac_sig != hmac.new(
self.server.hmac_key,
payload,
hmac_method,
).hexdigest():
return
if event_type == 'pull_request_review_comment':
action = info['action']
original_commit_id = info['comment']['original_commit_id']
head_sha = info['pull_request']['head']['sha']
if action == 'created' and original_commit_id == head_sha:
repo_name = info['repository']['name']
pull_num = info['pull_request']['number']
body = info['comment']['body']
username = info['sender']['login']
repo_cfg = self.server.repo_cfgs[repo_name]
if parse_commands(
body,
username,
repo_cfg['reviewers'],
self.server.states[repo_name][pull_num],
self.server.my_username,
self.server.db,
realtime=True,
sha=original_commit_id,
):
self.server.queue_handler()
elif event_type == 'pull_request':
action = info['action']
pull_num = info['number']
repo_name = info['repository']['name']
head_sha = info['pull_request']['head']['sha']
if action == 'synchronize':
state = self.server.states[repo_name][pull_num]
state.head_advanced(head_sha)
elif action in ['opened', 'reopened']:
state = PullReqState(pull_num, head_sha, '', self.server.repos[repo_name], self.server.db)
state.title = info['pull_request']['title']
state.body = info['pull_request']['body']
state.head_ref = info['pull_request']['head']['repo']['owner']['login'] + ':' + info['pull_request']['head']['ref']
state.base_ref = info['pull_request']['base']['ref']
state.mergeable = info['pull_request']['mergeable']
# FIXME: Needs to retrieve the status and the comments if the action is reopened
self.server.states[repo_name][pull_num] = state
elif action == 'closed':
del self.server.states[repo_name][pull_num]
elif action == 'assigned':
assignee = info['pull_request']['assignee']['login']
state = self.server.states[repo_name][pull_num]
state.assignee = assignee
elif action == 'unassigned':
assignee = info['pull_request']['assignee']['login']
state = self.server.states[repo_name][pull_num]
if state.assignee == assignee:
state.assignee = ''
else:
self.server.logger.debug('Invalid pull_request action: {}'.format(action))
elif event_type == 'push':
repo_name = info['repository']['name']
ref = info['ref'][len('refs/heads/'):]
for state in self.server.states[repo_name].values():
if state.base_ref == ref:
state.mergeable = None
if state.head_sha == info['before']:
state.head_advanced(info['after'])
elif event_type == 'issue_comment':
body = info['comment']['body']
username = info['comment']['user']['login']
repo_name = info['repository']['name']
pull_num = info['issue']['number']
repo_cfg = self.server.repo_cfgs[repo_name]
if 'pull_request' in info['issue'] and pull_num in self.server.states[repo_name]:
if parse_commands(
body,
username,
repo_cfg['reviewers'],
self.server.states[repo_name][pull_num],
self.server.my_username,
self.server.db,
realtime=True,
):
self.server.queue_handler()
resp_status = 200
resp_text = ''
elif self.path == '/buildbot':
info = urllib.parse.parse_qs(payload.decode('utf-8'))
if info['key'][0] != self.server.cfg['main']['buildbot_key']:
return
for row in json.loads(info['packets'][0]):
if row['event'] == 'buildFinished':
info = row['payload']['build']
found = False
rev = [x[1] for x in info['properties'] if x[0] == 'revision'][0]
if rev:
for repo in self.server.repos.values():
for state in self.server.states[repo.name].values():
if state.merge_sha == rev:
found = True
break
if found: break
if found and info['builderName'] in state.build_res:
builder = info['builderName']
build_num = info['number']
build_succ = 'successful' in info['text'] or info['results'] == 0
url = '{}/builders/{}/builds/{}'.format(
self.server.repo_cfgs[repo.name]['buildbot_url'],
builder,
build_num,
)
if build_succ:
state.build_res[builder] = url
if all(state.build_res.values()):
desc = 'Test successful'
utils.github_create_status(repo, state.head_sha, 'success', url, desc, context='homu')
state.set_status('success')
urls = ', '.join('[{}]({})'.format(builder, url) for builder, url in sorted(state.build_res.items()))
state.add_comment(':sunny: {} - {}'.format(desc, urls))
if state.approved_by and not state.try_:
try:
utils.github_set_ref(
repo,
'heads/' + self.server.repo_cfgs[repo.name]['master_branch'],
state.merge_sha
)
except github3.models.GitHubError:
desc = 'Test was successful, but fast-forwarding failed'
utils.github_create_status(repo, state.head_sha, 'error', url, desc, context='homu')
state.set_status('error')
state.add_comment(':eyes: ' + desc)
self.server.queue_handler()
else:
state.build_res[builder] = False
if state.status == 'pending':
desc = 'Test failed'
utils.github_create_status(repo, state.head_sha, 'failure', url, desc, context='homu')
state.set_status('failure')
state.add_comment(':broken_heart: {} - [{}]({})'.format(desc, builder, url))
self.server.queue_handler()
else:
self.server.logger.debug('Invalid commit from Buildbot: {}'.format(rev))
elif row['event'] == 'buildStarted':
info = row['payload']['build']
rev = [x[1] for x in info['properties'] if x[0] == 'revision'][0]
if rev and self.server.buildbot_slots[0] == rev:
self.server.buildbot_slots[0] = ''
self.server.queue_handler()
resp_status = 200
resp_text = ''
else:
resp_status = 404
resp_text = ''
self.send_response(resp_status)
self.send_header('Content-type', 'text/plain')
self.end_headers()
self.wfile.write(resp_text.encode('utf-8'))
class ThreadedHTTPServer(ThreadingMixIn, HTTPServer):
pass
def start(cfg, states, queue_handler, repo_cfgs, repos, logger, buildbot_slots, my_username, db):
server = ThreadedHTTPServer(('', cfg['main']['port']), RequestHandler)
tpls = {}
env = jinja2.Environment(
loader = jinja2.FileSystemLoader(pkg_resources.resource_filename(__name__, 'html')),
autoescape = True,
)
tpls['index'] = env.get_template('index.html')
tpls['queue'] = env.get_template('queue.html')
server.hmac_key = cfg['main']['hmac_key'].encode('utf-8')
server.cfg = cfg
server.states = states
server.queue_handler = queue_handler
server.repo_cfgs = repo_cfgs
server.repos = repos
server.logger = logger
server.buildbot_slots = buildbot_slots
server.tpls = tpls
server.my_username = my_username
server.db = db
Thread(target=server.serve_forever).start()
| [
"vcs@barosl.com"
] | vcs@barosl.com |
faae7838e3e5f43c3c47ab1074ba09cf723cda51 | 4d89bb603197d18470076cccbe046075ba1cd212 | /01/02.py | 9ac9172456558ca6e60c96e3d42c2a55200f6b7f | [] | no_license | yanoooooo/NLP100 | f1c930d2cb23f06527d044fdeb3f51bfcad9e20f | c340bc215cd81264b3f12a64e3a28613f8c88999 | refs/heads/master | 2020-03-21T03:28:44.838261 | 2018-02-17T15:16:37 | 2018-02-17T15:16:37 | 138,054,871 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 530 | py | # encoding: utf-8
import pycolor
title = pycolor.GREEN
title += "02. 「パトカー」+「タクシー」=「パタトクカシーー」"
title += "\n"
title += " 「パトカー」+「タクシー」の文字を先頭から交互に連結して文字列「パタトクカシーー」を得よ."
title += pycolor.END
print(title)
str1 = u"パトカー"
str2 = u"タクシー"
result = u""
for i,j in zip(str1, str2): # zipを使うことで複数の変数を同時にループ可能
result += i+j
print(result) | [
"miserablescaromioben@gmail.com"
] | miserablescaromioben@gmail.com |
d5fe2af0d0b1a6736f230d7bc4b10ac1509673c7 | 923ba584101482cdd208e567ab2abae4526b48ac | /refactor/cgat_refactor.py | b85790791690a9e70873ed33c1f77bd79557941b | [] | no_license | Charlie-George/cgat | eb00ef4879ceae460634046eb01c34c5ea1c7106 | 269cb235e549ce617e92efaea65a2eff953c2ed9 | refs/heads/master | 2021-01-15T08:32:46.090830 | 2014-09-29T13:47:26 | 2014-09-29T13:47:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,262 | py | ################################################################################
#
# MRC FGU Computational Genomics Group
#
# $Id: script_template.py 2871 2010-03-03 10:20:44Z andreas $
#
# Copyright (C) 2009 Andreas Heger
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#################################################################################
'''
cgat_refactor.py - refactor CGAT Code
=====================================
:Author:
:Release: $Id$
:Date: |today|
:Tags: Python
Purpose
-------
Usage
-----
Example::
python cgat_refactor.py --rename=rename.txt
Type::
python cgat_refactor.py --help
for command line help.
Documentation
-------------
Code
----
'''
import os
import sys
import re
import optparse
import glob
import CGAT.Experiment as E
import CGAT.IOTools as IOTools
def main( argv = None ):
"""script main.
parses command line options in sys.argv, unless *argv* is given.
"""
if not argv: argv = sys.argv
# setup command line parser
parser = E.OptionParser( version = "%prog version: $Id: script_template.py 2871 2010-03-03 10:20:44Z andreas $",
usage = globals()["__doc__"] )
parser.add_option("-r", "--rename", dest="rename", type="string",
help="rename scripts" )
parser.add_option( "--split-prefix", dest="split_prefix", type="string",
help="move scripts with prefix to subdirectory" )
parser.add_option("-n", "--dry-run", dest="dry_run", action = "store_true",
help="dry run, do not implement any changes" )
parser.set_defaults(
scriptsdir = "scripts",
dirs = ["CGAT", "CGATPipelines", "scripts", "makefiles" ],
dry_run = False )
## add common options (-h/--help, ...) and parse command line
(options, args) = E.Start( parser, argv = argv )
scriptsdir = options.scriptsdir
counter = E.Counter()
map_old2new = {}
if options.rename:
with IOTools.openFile( options.rename, "r") as inf:
for line in inf:
if line.startswith("#"): continue
if line.startswith("old"): continue
try:
old, new = line[:-1].split("\t")
except ValueError:
continue
if not os.path.exists( os.path.join( scriptsdir, old )):
E.warn( "%s does not exist - no renaming" % old )
continue
map_old2new[old] = new
elif options.split_prefix:
if not os.path.exists( os.path.join( scriptsdir, options.split_prefix )):
E.warn( "destination %s does not exist - no renaming" % options.split_prefix )
return
scripts = glob.glob( "%s/%s_*.py" % (scriptsdir, options.split_prefix ))
if len(scripts) == 0:
E.info("nothing to change")
return
for script in scripts:
scriptname = os.path.basename( script )
newname = scriptname[len(options.split_prefix)+1:]
map_old2new[ scriptname ] = "%s/%s" % (options.split_prefix, newname )
if len(map_old2new) == 0:
E.info("nothing to change")
return
for old, new in map_old2new.items():
statement = "hg mv %(scriptsdir)s/%(old)s %(scriptsdir)s/%(new)s" % locals()
counter.renamed += 1
if options.dry_run:
E.info( statement )
else:
E.run( statement )
for d in options.dirs:
for root, dirs, files in os.walk(d):
for f in files:
if f.endswith(".pyc"): continue
fn = os.path.join( root, f )
with IOTools.openFile( fn, "r") as inf:
old_data = inf.read()
changed = False
for old_name, new_name in map_old2new.items():
new_data = re.sub( old_name, new_name, old_data )
if old_data != new_data:
changed = True
E.info( "changed: %s : %s to %s" % (fn, old_name, new_name))
old_data = new_data
if changed:
counter.changed += 1
if not options.dry_run:
with IOTools.openFile( fn, "w" ) as outf:
outf.write( new_data )
E.info( str(counter) )
## write footer and output benchmark information.
E.Stop()
if __name__ == "__main__":
sys.exit( main( sys.argv) )
| [
"andreas.heger@gmail.com"
] | andreas.heger@gmail.com |
2c0fd51aa69b9bf55727c5faedccda630d6d677d | af2791239bc8dbf9d7aa94e8dcde0674180abd2f | /main.py | f3e247b62a0226d1adc3ae0bd75a1060649bae40 | [] | no_license | zhenpingli/TDTSystem | 8ac9be039a59293650355b8af7b6fe548c4c6671 | a06103ddf36a69b026cdc899fb222bb89da5b138 | refs/heads/master | 2021-01-08T04:07:28.472682 | 2019-07-25T08:09:53 | 2019-07-25T08:09:53 | 241,907,760 | 1 | 0 | null | 2020-02-20T14:39:42 | 2020-02-20T14:39:41 | null | UTF-8 | Python | false | false | 510 | py | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'test.ui'
#
# Created by: PyQt5 UI code generator 5.11.3
#
# WARNING! All changes made in this file will be lost!
import sys
from PyQt5.QtWidgets import QApplication, QMainWindow
from qt.mainwindow import Ui_MainWindow
if __name__ == '__main__':
app = QApplication(sys.argv)
ui = Ui_MainWindow()
ui.setupUi()
ui.setWindowTitle('食品安全话题检测与追踪系统')
ui.show()
sys.exit(app.exec_())
| [
"695573425@qq.com"
] | 695573425@qq.com |
82f7f9d193c63889362c78e380ec57e41e33a5b9 | 0aaf6ce59d305428611958a5bf6a5831407bca65 | /advisor_server/dashboard/urls.py | a3e10067ce02abe9f214020e5f59e6f717896c45 | [
"Apache-2.0"
] | permissive | mlaradji/advisor | d770043a5307af1037cad6be1c449d541acf87b0 | 8ec0f8b64809daa80a20d717b4e45ad9fbcadbb0 | refs/heads/master | 2023-05-26T05:59:50.169748 | 2018-10-18T10:34:42 | 2018-10-18T10:34:42 | 154,219,666 | 0 | 0 | Apache-2.0 | 2023-04-29T17:00:36 | 2018-10-22T21:27:59 | Jupyter Notebook | UTF-8 | Python | false | false | 941 | py | from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^$', views.index, name='index'),
url(r'^v1/studies$', views.v1_studies, name='v1_studies'),
url(r'^v1/studies/(?P<study_name>[\w.-]+)$', views.v1_study,
name='v1_study'),
url(r'^v1/studies/(?P<study_name>[\w.-]+)/suggestions$',
views.v1_study_suggestions,
name='v1_study_suggestions'),
url(r'^v1/trials$', views.v1_trials, name='v1_trials'),
url(r'^v1/studies/(?P<study_name>[\w.-]+)/trials/(?P<trial_id>[\w.-]+)$',
views.v1_trial,
name='v1_trial'),
url(r'^v1/studies/(?P<study_name>[\w.-]+)/trials/(?P<trial_id>[\w.-]+)/metrics$',
views.v1_study_trial_metrics,
name='v1_study_trial_metrics'),
url(r'^v1/studies/(?P<study_name>[\w.-]+)/trials/(?P<trial_id>[\w.-]+)/metrics/(?P<metric_id>[\w.-]+)$',
views.v1_study_trial_metric,
name='v1_study_trial_metric'),
]
| [
"tobeg3oogle@gmail.com"
] | tobeg3oogle@gmail.com |
38525cf23e282e1f840a5913ea2c4bec660b41b4 | f81c6c886b519b335979928345a50cfab2a46d5c | /app/__init__.py | 90789b599acf14fa6d5c3dade21ab28dc7514446 | [] | no_license | banalna/blogeek | d872cfa5c10c8607045f19d81b92bcdb6fd36031 | cbdb760343a00ffdd92e9fb903bb6db406917c6a | refs/heads/master | 2023-09-01T20:27:51.350960 | 2020-08-30T00:42:29 | 2020-08-30T00:42:29 | 281,817,897 | 0 | 0 | null | 2021-06-02T02:37:09 | 2020-07-23T01:13:00 | Python | UTF-8 | Python | false | false | 3,874 | py | # -*- coding: utf-8 -*-
import os
import logging
from logging.handlers import RotatingFileHandler, SMTPHandler
import rq
from flask import Flask, current_app
from flask import request
from flask_mail import Mail
from flask_moment import Moment
from flask_sqlalchemy import SQLAlchemy
from flask_migrate import Migrate
from flask_login import LoginManager
from flask_babel import Babel
from flask_babel import lazy_gettext as _l
from flask_bootstrap import Bootstrap
from flask_socketio import SocketIO, emit, join_room, leave_room, \
close_room, rooms, disconnect
from elasticsearch import Elasticsearch
from redis import Redis
from config import Config
app = Flask(__name__)
# Flask extensions
db = SQLAlchemy()
migrate = Migrate()
login = LoginManager()
login.login_view = 'auth.login'
login.login_message = _l('Please log in to access this page.')
# login.login_message = 'your msg'
mail = Mail()
bootstrap = Bootstrap()
moment = Moment()
socketio = SocketIO()
# for generate pot: pybabel extract -F babel.cfg -k _l -o messages.pot .
# for generate mo: pybabel init -i messages.pot -d app/translations -l <needed lang>
# for update mo: pybabel update -i messages.pot -d app/translations
# for compile po: pybabel compile -d app/translations
babel = Babel()
def create_app(config_class=Config):
_app = Flask(__name__)
_app.config.from_object(config_class)
db.init_app(_app)
migrate.init_app(_app, db)
login.init_app(_app)
mail.init_app(_app)
bootstrap.init_app(_app)
moment.init_app(_app)
babel.init_app(_app)
socketio.init_app(_app)
_app.elasticsearch = Elasticsearch([_app.config['ELASTICSEARCH_URL']]) if _app.config['ELASTICSEARCH_URL'] else None
_app.redis = Redis.from_url(_app.config['REDIS_URL'])
_app.task_queue = rq.Queue('blogeek-tasks', connection=_app.redis)
# register scheme of app
from app.errors import bp as errors_bp
_app.register_blueprint(errors_bp)
from app.auth import bp as auth_bp
_app.register_blueprint(auth_bp, url_prefix='/auth')
from app.main import bp as main_bp
_app.register_blueprint(main_bp)
from app.api import bp as api_bp
_app.register_blueprint(api_bp, url_prefix='/api')
if not _app.debug and not _app.testing:
# send mails
if _app.config['MAIL_SERVER']:
auth = None
if _app.config['MAIL_USERNAME'] or _app.config['MAIL_PASSWORD']:
auth = (_app.config['MAIL_USERNAME'], _app.config['MAIL_PASSWORD'])
secure = None
if _app.config['MAIL_USE_TLS']:
secure = ()
mail_handler = SMTPHandler(
mailhost=(_app.config['MAIL_SERVER'], _app.config['MAIL_PORT']),
fromaddr='no-reply@' + _app.config['MAIL_SERVER'],
toaddrs=_app.config['ADMINS'], subject='Blogeek Failure',
credentials=auth, secure=secure)
mail_handler.setLevel(logging.ERROR)
_app.logger.addHandler(mail_handler)
# write to file
if _app.config['LOG_TO_STDOUT']:
stream_handler = logging.StreamHandler()
stream_handler.setLevel(logging.INFO)
app.logger.addHandler(stream_handler)
else:
if not os.path.exists('logs'):
os.mkdir('logs')
file_handler = RotatingFileHandler('logs/blogeek.log', maxBytes=10240, backupCount=10)
file_handler.setFormatter(logging.Formatter(
'%(asctime)s %(levelname)s: %(message)s '
'[in %(pathname)s:%(lineno)d]'))
file_handler.setLevel(logging.INFO)
_app.logger.addHandler(file_handler)
return _app
# for auto choice lang
@babel.localeselector
def get_locale():
return request.accept_languages.best_match(current_app.config['LANGUAGES'])
from app import models
| [
"judas.priest999@gmail.com"
] | judas.priest999@gmail.com |
0ee6776c0883d67cdca9be3f1a4a000144b1a244 | 798a2885b561fb9b755ba961574b71a6f7cd1c81 | /projects/4/single-loader-generator/single-loader.py | c500570c562f04f4a3828576e09ab1be439dc9ea | [] | no_license | dcalsky/GC-web-class | 53101d7ddb1007c7335639348d9b020ff7aad077 | 3d7f557dc88a9aec51e375a2c17f8e90b3dd9515 | refs/heads/master | 2021-01-13T11:06:32.125362 | 2017-10-30T13:53:09 | 2017-10-30T13:53:09 | 68,877,124 | 10 | 2 | null | 2020-01-19T05:15:20 | 2016-09-22T02:34:46 | HTML | UTF-8 | Python | false | false | 483 | py | from math import *
r = 40
num = 8
color = '#0cf'
circle = [-3, 0, -3] + [-10 for x in range(num - 3)]
vec = [(sin(2 * pi / num * x), cos(2 * pi / num * x)) for x in range(num)]
print(vec)
for i in range(num):
print( '%.1f%% {' % (100 / num * i))
print('box-shadow:')
for j in range(num):
x, y = map(lambda v: r * v, vec[j])
print("%.2fpx %.2fpx %.2fpx %.2fpx %s%s" % (x, y, 0, circle[j], color, ',' if j < num - 1 else ';'))
print('}')
circle = circle[-1:] + circle[:-1]
| [
"softech86@163.com"
] | softech86@163.com |
e09e9776a1040c522c942498582ce34863f824cd | 8f6853461a3dee85ef261f46fe1cd5ca3b36cac0 | /fourth_week/venv/bin/easy_install-3.7 | a1e2f343a55fe4cd1ebd2ee7f957e6ea748525a2 | [] | no_license | falcon-33/PycharmProjects | b6ddf5431b158392c5f8581478f362bb64e68a2a | f827654d4c3ebaec9958743d19c6def416ce64a7 | refs/heads/master | 2021-02-13T17:53:37.791474 | 2020-03-22T12:15:38 | 2020-03-22T12:15:38 | 244,718,487 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 454 | 7 | #!/home/falcon33/PycharmProjects/fourth_week/venv/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==40.8.0','console_scripts','easy_install-3.7'
__requires__ = 'setuptools==40.8.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==40.8.0', 'console_scripts', 'easy_install-3.7')()
)
| [
"artyomsokolik@gmail.com"
] | artyomsokolik@gmail.com |
587b4dbc53a6170666ae9248085ddfce737b43ce | 83634a42113cf17dade30efb6a18cd8e4c98343d | /python/fig/eff.py | 0c88ffd17db4c459763dbf993e817f2b87893566 | [] | no_license | cms-bph/BToKstarMuMu | 3e2299bc39688e09810ab8e26872ea6a8004c55b | cf8bff2971383dc9f67f3fe1aad1b4bef87c4a78 | refs/heads/master | 2021-01-18T22:35:43.668173 | 2016-11-22T03:52:39 | 2016-11-22T03:52:39 | 9,442,557 | 2 | 3 | null | 2016-11-22T03:52:39 | 2013-04-15T06:57:25 | C++ | UTF-8 | Python | false | false | 5,172 | py | """
Module for Efficiency Figures
"""
__author__ = "Xin Shi <Xin.Shi@cern.ch>"
__copyright__ = "Copyright (c) Xin Shi"
import sys
from tls import *
from array import array
from ROOT import TH1F, TCanvas, TClonesArray, AddressOf, TLorentzVector
from atr.cuts import select_b0s
def main(args, figname):
if args[0] == 'q2mumu':
q2mumu(args[1:], figname)
else:
raise NameError(args)
def q2mumu(args, figname):
datatype = args[0]
label = args[1]
test = get_options(args, 'test')
batch = get_options(args, 'batch')
if batch:
cmd = create_batch_cmd()
bashname = '%s.sh' %figname
bashfile = create_bashfile_cmd(cmd, bashname, label, test=test)
logfile = set_logfile('fig', datatype, label, figname)
jobname = 'effq2mm'
bsub_jobs(logfile, jobname, bashfile, test)
return
figfile = set_figfile(figname, label, '.pdf', test=test)
rootfile = atr.rootfile(datatype, label, test=test)
obj = atr.root_tree_obj(datatype, label)
chain = root_chain(rootfile, obj)
canvas = TCanvas("aCanvas", "Canvas", 600, 600)
#h_mm_gen = TH1F('mumumass_gen', '#mu^{+} #mu^{-} mass', 100, 0, 25)
#h_mm_reco = TH1F('mumumass_reco', '#mu^{+} #mu^{-} mass', 100, 0, 25)
lower = array('f', [0, 2, 4.3, 8.68, 10.09, 12.86, 14.18, 16, 19, 25])
h_mm_gen = TH1F('mumumass_gen', '#mu^{+} #mu^{-} mass', 9, lower)
h_mm_reco = TH1F('mumumass_reco', '#mu^{+} #mu^{-} mass', 9, lower)
if 'B2KstarMuMu/RECO_100M_v1.1' in label:
Gen_muonPos_P4_ = TClonesArray('TLorentzVector')
chain.SetBranchAddress('Gen_muonPos_P4', AddressOf(Gen_muonPos_P4_))
Gen_muonNeg_P4_ = TClonesArray('TLorentzVector')
chain.SetBranchAddress('Gen_muonNeg_P4', AddressOf(Gen_muonNeg_P4_))
MuPP4_ = TClonesArray('TLorentzVector')
chain.SetBranchAddress('MuPP4', AddressOf(MuPP4_))
MuMP4_ = TClonesArray('TLorentzVector')
chain.SetBranchAddress('MuMP4', AddressOf(MuMP4_))
KstarP4_ = TClonesArray('TLorentzVector')
chain.SetBranchAddress('KstarP4', AddressOf(KstarP4_))
elif 'B2KstarMuMu/RECO_100M_v1.2' in label or \
'B2KstarMuMu/RECO_100M_v1.4' in label or \
'B2KstarMuMu/RECO_100M_v1.5' in label:
Gen_muonPos_P4_ = TClonesArray('TLorentzVector')
chain.SetBranchAddress('Gen_muonPos_P4', AddressOf(Gen_muonPos_P4_))
Gen_muonNeg_P4_ = TClonesArray('TLorentzVector')
chain.SetBranchAddress('Gen_muonNeg_P4', AddressOf(Gen_muonNeg_P4_))
reco_mup_p4_ = TLorentzVector()
chain.SetBranchAddress('reco_mup_p4', AddressOf(reco_mup_p4_))
reco_mum_p4_ = TLorentzVector()
chain.SetBranchAddress('reco_mum_p4', AddressOf(reco_mum_p4_))
else:
raise NameError(label)
ntot = chain.GetEntries()
if test:
ntot = 1000
if 'B2KstarMuMu/RECO_100M_v1.2' in label or \
'B2KstarMuMu/RECO_100M_v1.4' in label or \
'B2KstarMuMu/RECO_100M_v1.5' in label:
cuts_label = '5ifbv2.6.2'
cuts = select_b0s(cuts_label)
sys.stdout.write('Processing %s events ...\n' %ntot)
sys.stdout.flush()
nfill_gen = 0
nfill_reco = 0
for i in xrange(ntot):
chain.LoadTree(i)
chain.GetEntry(i)
if len(chain.Gen_muonPos_P4) > 0:
mup4_gen = chain.Gen_muonPos_P4[0]
mum4_gen = chain.Gen_muonNeg_P4[0]
try:
mumu_gen = mup4_gen + mum4_gen
except TypeError:
continue
h_mm_gen.Fill(mumu_gen.M2())
nfill_gen += 1
if '/HLT' in label and not cuts.pass_trigger(chain):
continue
if 'OfflineHLT' in label and not chain.offline_hlt_passed:
continue
if 'MCmatched' in label and not chain.mc_matched:
continue
if 'B2KstarMuMu/RECO_100M_v1.1' in label and chain.nXcand > 0:
if label in ['B2KstarMuMu/RECO_100M_v1.1/Kstar'] and \
not cuts.pass_kstarmass(chain, 0):
continue
if label in ['B2KstarMuMu/RECO_100M_v1.1/lxysig'] and \
not cuts.pass_lxysig(chain, 0):
continue
mup4 = chain.MuPP4[0]
mum4 = chain.MuMP4[0]
mumu = mup4 + mum4
h_mm_reco.Fill(mumu.M2())
nfill_reco += 1
if 'B2KstarMuMu/RECO_100M_v1.2' in label:
mup4 = chain.reco_mup_p4
mum4 = chain.reco_mum_p4
mumu = mup4 + mum4
h_mm_reco.Fill(mumu.M2())
nfill_reco += 1
if 'B2KstarMuMu/RECO_100M_v1.4' in label or \
'B2KstarMuMu/RECO_100M_v1.5' in label:
h_mm_reco.Fill(mumu_gen.M2())
nfill_reco += 1
sys.stdout.write('Filled events: GEN: %s, RECO: %s. \n' %(nfill_gen, nfill_reco))
hist = h_mm_reco
hist.Divide(h_mm_gen)
hist.SetTitle('RECO Efficiency')
hist.GetXaxis().SetTitle('q^{2} (GeV^{2}/c^{2})')
hist.Draw()
canvas.SaveAs(figfile)
hist.Delete()
| [
"xshi@xshi.org"
] | xshi@xshi.org |
f742ca945375d122388361fc41170823b4f034f4 | 60ce48bcfc0bfd89aca0419f7c65b2c0cacc0a7c | /basics/moperations/mathoperations.py.html | e50274809b86d16fbc8b495c3c7a1023159c5454 | [] | no_license | sjangada/python | b54af7b9ae33ed7d4a01f664d1cc26d89db0ca7a | e3d1521ecdc2bed866cd7bd68ff2293529a2f65c | refs/heads/main | 2023-06-19T18:07:06.761188 | 2021-07-22T14:06:53 | 2021-07-22T14:06:53 | 318,919,301 | 1 | 1 | null | 2021-07-20T02:03:06 | 2020-12-06T00:37:28 | Jupyter Notebook | UTF-8 | Python | false | false | 1,006 | html | #!/usr/bin/env python
# coding: utf-8
# In[23]:
f = open('input.txt', 'r')
# i tried to make functions for finding x, opr, and y and then putting those functions in the operation function, but that didnt work
nf = open('output.txt', 'w')
nf.write("")
nf.close()
nf = open('output.txt', 'a')
def operation():
char = f.read(2)
x = int(char)
opr = f.read(3)
char = f.read(2)
y = int(char)
if opr == " + ":
result = x + y
elif opr == " - ":
result = x - y
elif opr == " * ":
result = x * y
elif opr == " / ":
result = x / y
else:
print("there seems to be a mistake")
nf.write(str(x))
nf.write(str(opr))
nf.write(str(y))
nf.write(" = ")
nf.write(str(result))
nf.write("\n")
count = 0
while count < 8:
operation()
count += 1
f.close()
nf.close()
nf = open("output.txt", "r")
print(nf.read())
nf.close()
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
| [
"noreply@github.com"
] | sjangada.noreply@github.com |
670b7c28748c39f21a98d41a9dadc64c76c99c48 | cb9c9e5d32bd223529b8738a37092338748c03b6 | /pokerthproto/poker.py | e35cc1fb185dcc85d3cfbc5d45c805ecc6b24a4b | [] | no_license | FlorianWilhelm/pokerthproto | 8cd7d28af0b6bb42c587ec15a7167894bb8588bb | d3668c15fcdc6aaca0d72f8a28dd36b3887bdaa8 | refs/heads/master | 2021-01-25T08:48:59.965296 | 2014-07-20T15:52:37 | 2014-07-21T16:52:17 | 20,340,212 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,907 | py | # -*- coding: utf-8 -*-
"""
All data structures related to poker like poker actions, cards, rounds etc.
"""
from __future__ import print_function, absolute_import, division
from . import pokerth_pb2
__author__ = 'Florian Wilhelm'
__copyright__ = 'Florian Wilhelm'
# suits of poker cards (diamonds, hearts, spades, clubs)
suits = ['d', 'h', 's', 'c']
# ranks of poker cards (Ace, Jack, Queens, King, Ten, ...)
ranks = ['2', '3', '4', '5', '6', '7', '8', '9', 'T', 'J', 'Q', 'K', 'A']
# deck of poker cards
deck = [r + s for r in ranks for s in suits]
class Action(object):
"""
Enum of possible player actions in poker
"""
NONE = pokerth_pb2.netActionNone # for posting blinds
FOLD = pokerth_pb2.netActionFold
CHECK = pokerth_pb2.netActionCheck
CALL = pokerth_pb2.netActionCall
BET = pokerth_pb2.netActionBet
RAISE = pokerth_pb2.netActionRaise
ALLIN = pokerth_pb2.netActionAllIn
class Round(object):
"""
Enum of poker rounds where posting blinds is considered a round too.
"""
SMALL_BLIND = pokerth_pb2.netStatePreflopSmallBlind
BIG_BLIND = pokerth_pb2.netStatePreflopBigBlind
PREFLOP = pokerth_pb2.netStatePreflop
FLOP = pokerth_pb2.netStateFlop
TURN = pokerth_pb2.netStateTurn
RIVER = pokerth_pb2.netStateRiver
# Order of poker rounds
poker_rounds = [Round.SMALL_BLIND, Round.BIG_BLIND, Round.PREFLOP, Round.FLOP,
Round.TURN, Round.RIVER]
def cardToInt(card):
"""
Converts a poker card into an integer representation.
:param card: poker card like 2d, Th, Qc etc.
:return: integer
"""
assert len(card) == 2
return 13*suits.index(card[1]) + ranks.index(card[0])
def intToCard(i):
"""
Converts an integer into a poker card
:param i: integer
:return: poker card like 2d, Th, Qc etc.
"""
assert 0 <= i <= 51
return ranks[i % 13] + suits[i // 13]
| [
"Florian.Wilhelm@gmail.com"
] | Florian.Wilhelm@gmail.com |
1d2d1837bc0443a4b595694cd247e0ac5e368747 | 229d8b1af3c5f407d263863449eb3e8dff72b3fb | /venv/bin/easy_install | faf999cb2896d434c6ee3f6b6dd678bb75b23aa0 | [] | no_license | zuest/instapyMiniProject | 94455d769579cc4e703ebdb564eceaf4dae082e5 | 632ce5f535cf041e8f809d8f26ff3c87d2b4c7c0 | refs/heads/master | 2020-06-04T11:36:18.172327 | 2019-06-14T21:04:11 | 2019-06-14T21:04:11 | 192,005,407 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 451 | #!/Users/macbe/PycharmProjects/py-instapy-project/venv/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==40.8.0','console_scripts','easy_install'
__requires__ = 'setuptools==40.8.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==40.8.0', 'console_scripts', 'easy_install')()
)
| [
"zuests@gmail.com"
] | zuests@gmail.com | |
247326b5c0c2161663922ce88b3834c7b55e3880 | b72e42f7f15ea8d359512cc0fe524f5407f358e5 | /CS50_web_dev/src/src4/passengers1.py | 58da8bf20b745ba44169503aada18219753110ac | [
"MIT"
] | permissive | ChuaCheowHuan/web_app_DPTH | ec9f96d66c69ebd7e04df8d4b92578a3aaa7e392 | dd901e6359fe76f15b69701c53f76666c3219173 | refs/heads/master | 2021-06-18T11:31:10.959634 | 2020-07-23T04:04:52 | 2020-07-23T04:04:52 | 205,556,446 | 0 | 0 | MIT | 2021-06-10T21:55:14 | 2019-08-31T14:42:35 | HTML | UTF-8 | Python | false | false | 938 | py | from flask import Flask, render_template, request
from models import *
app = Flask(__name__)
app.config["SQLALCHEMY_DATABASE_URI"] = os.getenv("DATABASE_URL")
app.config["SQLALCHEMY_TRACK_MODIFICATIONS"] = False
db.init_app(app)
def main():
flights = Flight.query.all()
for flight in flights:
print(f"Flight {flight.id}: {flight.origin} to {flight.destination}, {flight.duration} minutes.")
# Prompt user to choose a flight.
flight_id = int(input("\nFlight ID: "))
flight = Flight.query.get(flight_id)
# Make sure flight is valid.
if flight is None:
print("Error: No such flight.")
return
passengers = Passenger.query.filter_by(flight_id=flight_id).all()
print("\nPassengers:")
for passenger in passengers:
print(passenger.name)
if len(passengers) == 0:
print("No passengers.")
if __name__ == "__main__":
with app.app_context():
main()
| [
"17569306+ChuaCheowHuan@users.noreply.github.com"
] | 17569306+ChuaCheowHuan@users.noreply.github.com |
74c24be118435be72f012a2130e6ba667651adb6 | 0aa58b87f0e913c8edaf35352c7306d6e47bd158 | /app/blog/urls.py | 24895a690b14d86b05ce94d45ec1c27fe65fb350 | [] | no_license | AlexUM97/prototipo_app | ed63ced021b1d8884c58b48edaf4bed21638b05f | 36f49095ee82636555669e178a9b79d0459c075e | refs/heads/master | 2021-07-24T20:50:46.193279 | 2019-07-10T10:05:22 | 2019-07-10T10:05:22 | 196,181,365 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 372 | py | from django.urls import path
from . import views
urlpatterns = [
path('', views.post_list, name='post_list'),
path('post/<int:pk>/', views.post_detail, name='post_detail'),
path('post/new', views.post_new, name='post_new'),
path('post/<int:pk>/edit/', views.post_edit, name='post_edit'),
path('post/<int:pk>/email/', views.send_email, name='send_email'),
] | [
"aumoreno97@hotmail.com"
] | aumoreno97@hotmail.com |
df9d2873657e113300916c23ba81894feac367ab | 2aace9bb170363e181eb7520e93def25f38dbe5c | /build/idea-sandbox/system/python_stubs/cache/84e7f69b07298bc92449e3f4fe241fd427cfb7e9706b274b6410bc04b30fee6a/pandas/_libs/groupby.py | e344975788a3e8c457706c98e8f9bf9238624185 | [] | no_license | qkpqkp/PlagCheck | 13cb66fd2b2caa2451690bb72a2634bdaa07f1e6 | d229904674a5a6e46738179c7494488ca930045e | refs/heads/master | 2023-05-28T15:06:08.723143 | 2021-06-09T05:36:34 | 2021-06-09T05:36:34 | 375,235,940 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,451 | py | # encoding: utf-8
# module pandas._libs.groupby
# from C:\Users\Doly\Anaconda3\lib\site-packages\pandas\_libs\groupby.cp37-win_amd64.pyd
# by generator 1.147
# no doc
# imports
import builtins as __builtins__ # <module 'builtins' (built-in)>
import numpy as np # C:\Users\Doly\Anaconda3\lib\site-packages\numpy\__init__.py
from pandas._libs.algos import (groupsort_indexer,
take_2d_axis1_float64_float64)
# Variables with simple values
_int64_max = 9223372036854775807
# functions
def group_add_complex128(*args, **kwargs): # real signature unknown
""" Only aggregates on axis=0 """
pass
def group_add_complex64(*args, **kwargs): # real signature unknown
""" Only aggregates on axis=0 """
pass
def group_add_float32(*args, **kwargs): # real signature unknown
""" Only aggregates on axis=0 """
pass
def group_add_float64(*args, **kwargs): # real signature unknown
""" Only aggregates on axis=0 """
pass
def group_any_all(*args, **kwargs): # real signature unknown
"""
Aggregated boolean values to show truthfulness of group elements.
Parameters
----------
out : array of values which this method will write its results to
labels : array containing unique label for each group, with its
ordering matching up to the corresponding record in `values`
values : array containing the truth value of each element
mask : array indicating whether a value is na or not
val_test : str {'any', 'all'}
String object dictating whether to use any or all truth testing
skipna : boolean
Flag to ignore nan values during truth testing
Notes
-----
This method modifies the `out` parameter rather than returning an object.
The returned values will either be 0 or 1 (False or True, respectively).
"""
pass
def group_cummax(*args, **kwargs): # real signature unknown
"""
Cumulative maximum of columns of `values`, in row groups `labels`.
Parameters
----------
out : array
Array to store cummax in.
values : array
Values to take cummax of.
labels : int64 array
Labels to group by.
ngroups : int
Number of groups, larger than all entries of `labels`.
is_datetimelike : bool
True if `values` contains datetime-like entries.
Notes
-----
This method modifies the `out` parameter, rather than returning an object.
"""
pass
def group_cummin(*args, **kwargs): # real signature unknown
"""
Cumulative minimum of columns of `values`, in row groups `labels`.
Parameters
----------
out : array
Array to store cummin in.
values : array
Values to take cummin of.
labels : int64 array
Labels to group by.
ngroups : int
Number of groups, larger than all entries of `labels`.
is_datetimelike : bool
True if `values` contains datetime-like entries.
Notes
-----
This method modifies the `out` parameter, rather than returning an object.
"""
pass
def group_cumprod_float64(*args, **kwargs): # real signature unknown
"""
Cumulative product of columns of `values`, in row groups `labels`.
Parameters
----------
out : float64 array
Array to store cumprod in.
values : float64 array
Values to take cumprod of.
labels : int64 array
Labels to group by.
ngroups : int
Number of groups, larger than all entries of `labels`.
is_datetimelike : bool
Always false, `values` is never datetime-like.
skipna : bool
If true, ignore nans in `values`.
Notes
-----
This method modifies the `out` parameter, rather than returning an object.
"""
pass
def group_cumsum(*args, **kwargs): # real signature unknown
"""
Cumulative sum of columns of `values`, in row groups `labels`.
Parameters
----------
out : array
Array to store cumsum in.
values : array
Values to take cumsum of.
labels : int64 array
Labels to group by.
ngroups : int
Number of groups, larger than all entries of `labels`.
is_datetimelike : bool
True if `values` contains datetime-like entries.
skipna : bool
If true, ignore nans in `values`.
Notes
-----
This method modifies the `out` parameter, rather than returning an object.
"""
pass
def group_fillna_indexer(*args, **kwargs): # real signature unknown
"""
Indexes how to fill values forwards or backwards within a group.
Parameters
----------
out : array of int64_t values which this method will write its results to
Missing values will be written to with a value of -1
labels : array containing unique label for each group, with its ordering
matching up to the corresponding record in `values`
mask : array of int64_t values where a 1 indicates a missing value
direction : {'ffill', 'bfill'}
Direction for fill to be applied (forwards or backwards, respectively)
limit : Consecutive values to fill before stopping, or -1 for no limit
Notes
-----
This method modifies the `out` parameter rather than returning an object
"""
pass
def group_last(*args, **kwargs): # real signature unknown
""" Only aggregates on axis=0 """
pass
def group_max(*args, **kwargs): # real signature unknown
""" Only aggregates on axis=0 """
pass
def group_mean_float32(*args, **kwargs): # real signature unknown
pass
def group_mean_float64(*args, **kwargs): # real signature unknown
pass
def group_median_float64(*args, **kwargs): # real signature unknown
""" Only aggregates on axis=0 """
pass
def group_min(*args, **kwargs): # real signature unknown
""" Only aggregates on axis=0 """
pass
def group_nth(*args, **kwargs): # real signature unknown
""" Only aggregates on axis=0 """
pass
def group_ohlc_float32(*args, **kwargs): # real signature unknown
""" Only aggregates on axis=0 """
pass
def group_ohlc_float64(*args, **kwargs): # real signature unknown
""" Only aggregates on axis=0 """
pass
def group_prod_float32(*args, **kwargs): # real signature unknown
""" Only aggregates on axis=0 """
pass
def group_prod_float64(*args, **kwargs): # real signature unknown
""" Only aggregates on axis=0 """
pass
def group_quantile(*args, **kwargs): # real signature unknown
"""
Calculate the quantile per group.
Parameters
----------
out : ndarray
Array of aggregated values that will be written to.
labels : ndarray
Array containing the unique group labels.
values : ndarray
Array containing the values to apply the function against.
q : float
The quantile value to search for.
Notes
-----
Rather than explicitly returning a value, this function modifies the
provided `out` parameter.
"""
pass
def group_rank(*args, **kwargs): # real signature unknown
"""
Provides the rank of values within each group.
Parameters
----------
out : array of float64_t values which this method will write its results to
values : array of rank_t values to be ranked
labels : array containing unique label for each group, with its ordering
matching up to the corresponding record in `values`
ngroups : int
This parameter is not used, is needed to match signatures of other
groupby functions.
is_datetimelike : bool, default False
unused in this method but provided for call compatibility with other
Cython transformations
ties_method : {'average', 'min', 'max', 'first', 'dense'}, default
'average'
* average: average rank of group
* min: lowest rank in group
* max: highest rank in group
* first: ranks assigned in order they appear in the array
* dense: like 'min', but rank always increases by 1 between groups
ascending : boolean, default True
False for ranks by high (1) to low (N)
na_option : {'keep', 'top', 'bottom'}, default 'keep'
pct : boolean, default False
Compute percentage rank of data within each group
na_option : {'keep', 'top', 'bottom'}, default 'keep'
* keep: leave NA values where they are
* top: smallest rank if ascending
* bottom: smallest rank if descending
Notes
-----
This method modifies the `out` parameter rather than returning an object
"""
pass
def group_shift_indexer(*args, **kwargs): # real signature unknown
pass
def group_var_float32(*args, **kwargs): # real signature unknown
pass
def group_var_float64(*args, **kwargs): # real signature unknown
pass
def _group_add(*args, **kwargs): # real signature unknown
""" Only aggregates on axis=0 """
pass
def _group_mean(*args, **kwargs): # real signature unknown
pass
def _group_ohlc(*args, **kwargs): # real signature unknown
""" Only aggregates on axis=0 """
pass
def _group_prod(*args, **kwargs): # real signature unknown
""" Only aggregates on axis=0 """
pass
def _group_var(*args, **kwargs): # real signature unknown
pass
def __pyx_unpickle_Enum(*args, **kwargs): # real signature unknown
pass
# no classes
# variables with complex values
tiebreakers = {
'average': 0,
'dense': 5,
'first': 3,
'max': 2,
'min': 1,
}
__loader__ = None # (!) real value is '<_frozen_importlib_external.ExtensionFileLoader object at 0x0000024496960940>'
__spec__ = None # (!) real value is "ModuleSpec(name='pandas._libs.groupby', loader=<_frozen_importlib_external.ExtensionFileLoader object at 0x0000024496960940>, origin='C:\\\\Users\\\\Doly\\\\Anaconda3\\\\lib\\\\site-packages\\\\pandas\\\\_libs\\\\groupby.cp37-win_amd64.pyd')"
__test__ = {}
| [
"qinkunpeng2015@163.com"
] | qinkunpeng2015@163.com |
c7890e6ce1aa57b2d4cea837d71e289bbf7fcb58 | 16fc5c2708525efc440c767c53c1e9704545fcee | /python3/trees/traverse_iter_2.py | 142c03b366481a5d3c00d19e1e872e1ce8720a88 | [] | no_license | arnabs542/achked | c530c880a2df31242ef5a2c8efc7546a56ab28b8 | 9218e2cd24f8111d8e7de403f4aab73720a2d179 | refs/heads/master | 2022-02-11T15:33:26.590353 | 2019-08-12T03:53:37 | 2019-08-12T03:53:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,621 | py | #!/usr/bin/env python3
from tree_creation import *
def pushleft(node, st):
while node:
if node.right:
st.append(node.right)
st.append(node)
node = node.left
def post_iter(root):
st = []
node = root
pushleft(node, st)
while st:
node = st.pop(-1)
if st:
nxt_node = st[-1]
else:
nxt_node = None
if not node.right and not node.left:
# leaf node here.
print(node.data, end = ' ')
elif node.right == nxt_node:
# Switch the order of node and its right to indicate that
# the right node has been processed. Right node will be
# pushed in the pushleft() routine.
st.pop(-1)
st.append(node)
pushleft(nxt_node, st)
else:
# here, the node with only left child will be processed.
print(node.data, end = ' ')
print()
def post_iter2(root):
st = []
node = root
pushleft(node, st)
while st:
node = st.pop(-1)
nxt_node = st[-1] if st else None
#if not node.right and not node.left:
if node.right == nxt_node:
# Switch the order of node and its right to indicate that
# the right node has been processed. Right node will be
# pushed in the pushleft() routine.
st.pop(-1)
st.append(node)
pushleft(nxt_node, st)
else:
# here, the node with only left child will be processed.
print(node.data, end = ' ')
print()
def pre_order(root):
node = root
st = []
st.append(node)
while st:
node = st.pop(-1)
print(node.data, end = ' ')
if node.right:
st.append(node.right)
if node.left:
st.append(node.left)
print()
def pushleft_only(node, st):
while node:
st.append(node)
node = node.left
def inorder_iter(root):
st = []
node = root
pushleft_only(node, st)
while st:
node = st.pop(-1)
print(node.data, end = ' ')
if node.right:
pushleft_only(node.right, st)
print()
def main():
l = [10, 6, 9, 8, 7, 11, 13, 12]
root = create_tree(l)
print_level_tree(root)
print("Postorder traversal: ", end = '')
post_iter(root)
print("Postorder traversal2: ", end = '')
post_iter2(root)
print("Preorder traversal: ", end = '')
pre_order(root)
print("Inorder traversal: ", end = '')
inorder_iter(root)
if __name__ == '__main__':
main()
| [
"pathak.animesh@gmail.com"
] | pathak.animesh@gmail.com |
50a0fd453d0d10d876296eb29c477509d40f512a | 518ec4ba6c41d0ff276e6d52fac85bc218ff4a72 | /tes.py | c4ee8088e6fc42a6c4fdf13d288d5503ef341224 | [] | no_license | rho557/26415003 | c190b2be8e333e8d74f1030945a716f6549a79bf | d1658dccaf44f761a8fc80756b171386d2bcbb34 | refs/heads/master | 2022-12-24T19:36:33.513841 | 2016-12-01T03:17:24 | 2016-12-01T03:17:24 | 68,509,688 | 0 | 2 | null | 2022-12-11T10:01:11 | 2016-09-18T09:03:28 | Python | UTF-8 | Python | false | false | 323 | py |
#!/bin/bash
beli=`curl -s http://www.bankmandiri.co.id/resource/kurs.asp | grep USD -A1 | cut -d">" -f2 | cut -d"<" -f1|xargs|cut -d" " -f2`
jual=`curl -s http://www.bankmandiri.co.id/resource/kurs.asp | grep USD -A4 | cut -d">" -f2 | cut -d"<" -f1 |xargs|cut -d" " -f4`
echo "Kurs Beli:"$beli
echo "Kurs Jual:"$jual
| [
"m26415003@opensource.petra.ac.id"
] | m26415003@opensource.petra.ac.id |
5fc83da50a9f4b6e2a19f38727db0db27cd1c31b | 35272be7274919c285ad9eab078593a3aaf9c618 | /dirbrute.py | 5d91881b5958cd4cd7e025bbf4bf3294e0661275 | [] | no_license | icysun/bughunter | d44e23d8ed901aad04c335817232924dde93ccf2 | aaf82eaf6cf366372a90e6f80d8a354d393d4f4e | refs/heads/master | 2020-03-07T10:29:42.006022 | 2017-09-12T02:45:45 | 2017-09-12T02:45:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 937 | py | #coding:utf-8
'''
爆破目录,暂时用BBscan扫描
'''
from gevent import monkey
from gevent.pool import Pool
from lib.lib import *
import os
monkey.patch_all()
def check(temp):
ip = temp[0]
port = temp[1]
domains = getdomainfromip(ip)
for domain in domains:
print "checking ip {}".format(domain)
url = "http://{}:{}".format(domain,port)
print "python3 dirbrute/dirsearch.py -u \"{0}\" -e php".format(url)
os.system("python3 dirbrute/dirsearch.py -u \"{0}\" -e php,jsp,html".format(url))
def test():
#check(('127.0.0.1',"8089/samples-spring-1.2.3"))
check(('127.0.0.1',"8080"))
def run():
res = get_from_database("select address,port from blog.scan_scan_port where service like 'http%' or port='80' or port='443' or service ='' or service='unknown'")
res = list(set(res))
print "检查共{0}个IP".format(len(res))
p = Pool(200)
p.map(check, res)
if __name__ == "__main__":
run()
| [
"dongguangli@fangdd.com"
] | dongguangli@fangdd.com |
9b2a0af83e82369b79f14969dad82070cbdfe3f6 | f9bce8bce1c3f284aa81e49b316d28b762d8e738 | /typeidea/typeidea/blog/function-views.py | 125aeceacd4d3f594b5ba2abb1e16d57a40af7f7 | [] | no_license | TheBlack1024/typeidea | 00f802796e1ce38639fe48c520b1e41a3e7b0fbd | 8581e425c24df161150ee92f4e7ef24db5bcaa01 | refs/heads/master | 2022-12-01T02:24:10.403013 | 2019-06-23T12:40:08 | 2019-06-23T12:40:08 | 183,023,465 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,933 | py | from django.shortcuts import render
"""
URl到View的数据映射关系展示:
from django.http import HttpResponse
def post_list(request,category_id=None,tag_id=None):
content = 'post_list category_id={category_id},tag_id={tag_id}'.format(
category_id=category_id,
tag_id=tag_id,
)
return HttpResponse(content)
def post_detail(request,post_id):
return HttpResponse('detail')
"""
from .models import Post,Tag,Category
from config.models import SideBar
def post_list(request,category_id=None,tag_id=None):
tag = None
category = None
"""
if tag_id:
try:
tag = Tag.objects.get(id=tag_id)
except Tag.DoesNotExist:
post_list = []
else:
post_list = tag.post_set.filter(status=Post.STATUS_NORMAL)
else:
post_list = Post.objects.filter(status=Post.STATUS_NORMAL)
if category_id:
try:
category = Category.objects.get(id=category_id)
except Category.DoesNotExist:
category = None
else:
post_list = post_list.filter(category_id=category_id)
"""
if tag_id:
post_list, tag = Post.get_by_tag(tag_id)
elif category_id:
post_list, category = Post.get_by_category(category_id)
else:
post_list = Post.latest_posts()
context = {
'category': category,
'tag': tag,
'post_list': post_list,
'sidebars': SideBar.get_all(),
}
context.update(Category.get_navs())
return render(request,'blog/list.html',context=context)
def post_detail(request,post_id=None):
try:
post = Post.objects.get(id=post_id)
except Post.DoesNotExist:
post = None
context = {
'post': post,
'sidebars': SideBar.get_all(),
}
context.update(Category.get_navs())
return render(request,'blog/detail.html',context=context)
| [
"92182005@qq.com"
] | 92182005@qq.com |
481de81dba429e0407e166febee60e50c78f6a60 | cb4eb83b2aa6b47310478aa7a62bb5ef0b9241d7 | /matrix.py | b688f065f09cd9aa73a9552be57b5f6c2dc66b3a | [] | no_license | josh-minch/scrape | 69eb688054cb2f60a5696ea0295c9d2e035328d1 | cc215914ec803660ca66382a09e6e4e408fd505e | refs/heads/master | 2022-12-26T15:11:56.741519 | 2020-10-17T07:08:53 | 2020-10-17T07:08:53 | 302,195,124 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,151 | py | import json
import collections
import numpy as np
from helper import get_json, write_json
def get_ranked_ingreds(ingreds, recipe_matrix, all_ingreds):
"""Return ingreds from recipes in order of occurence with input ingreds."""
ingred_to_ix = {k: v for v, k in enumerate(all_ingreds)}
ix_to_ingred = {v: k for v, k in enumerate(all_ingreds)}
if isinstance(ingreds, str):
ingreds = [ingreds]
ixs = [ingred_to_ix[ingred] for ingred in ingreds]
# Get only rows for our ingreds
ingred_rows = recipe_matrix[ixs]
# for each recipe, sum occurences of each ingred.
ingred_sum = np.sum(ingred_rows, 0)
# check where this sum equals the len of our ingred list.
# This ensures we only get recipes that contain all our ingreds.
match_recipe_ixs = np.argwhere(ingred_sum == len(ixs))
match_recipes_m = recipe_matrix[:, match_recipe_ixs.flatten()]
# Then sum total occurences of each ingredient for each recipe.
match_ingred_sum = np.sum(match_recipes_m, 1)
ranked_ixs = np.flip(np.argsort(match_ingred_sum))
ranked_ingreds = {}
for ranked_ix in ranked_ixs:
cooccurrences = match_ingred_sum[ranked_ix]
if cooccurrences == 0:
break
ranked_ingreds[ix_to_ingred[ranked_ix]] = cooccurrences
return ranked_ingreds
# TODO: Duplicates in recipes in recipe_data_filtered causes recipe matrix to
# have elements that equal 2, causing ranking functions to misbehave.
def make_recipe_matrix():
'''2D matrix whose rows are ingredients and cols are recipes titles.
A 1 denotes the occurence of an ingredient in a given recipe.'''
ingreds = get_json('all_ingreds_filtered.json')
recipes = get_json('recipe_data_filtered.json')
titles = []
for recipe in recipes:
titles.append(recipe['title'])
df = pd.DataFrame(0, ingreds, titles)
ingreds = set(ingreds)
for recipe in recipes:
recipe_ingreds = set(recipe['ingreds'])
matches = recipe_ingreds & ingreds
if len(matches) > 0:
df.loc[list(matches), recipe['title']] += 1
return df.to_numpy()
def get_cooc(df):
df = make_recipe_matrix()
m = df.to_numpy()
m = m.dot(m.transpose())
np.fill_diagonal(m, 0)
return m
def get_ranked_ingreds_from_cooc(ingred):
ingreds = get_json('all_ingreds_filtered.json')
ingred_to_ix = {k: v for v, k in enumerate(ingreds)}
ix_to_ingred = {v: k for v, k in enumerate(ingreds)}
cooc = np.array(get_json('cooc.json'))
ingred_ix = ingred_to_ix[ingred]
ranked_ixs = np.argsort(cooc[ingred_ix])
ranked_ixs = np.flip(ranked_ixs)
ranked_ingreds = {}
for ranked_ix in ranked_ixs:
cooccurrences = cooc[ingred_ix, ranked_ix]
if cooccurrences == 0:
break
ranked_ingreds[ix_to_ingred[ranked_ix]] = cooccurrences
return ranked_ingreds
def main():
ingreds = 'onion'
recipe_matrix = np.array(get_json('recipe_matrix.json'))
all_ingreds = get_json('all_ingreds_filtered.json')
get_ranked_ingreds_naive(ingreds,recipe_matrix,all_ingreds)
if __name__ == "__main__":
main()
| [
"josh.minch@gmail.com"
] | josh.minch@gmail.com |
71c25031f25c1f6105b173234a0c319624fb9787 | 2eec5d2c07b949196497df434756476e55c2fdeb | /unitedstates/form_parsing/utils/data_munge.py | 8e64e4ea4cbfd99678b7741ea83310df375c940c | [] | no_license | influence-usa/scrapers-us-federal | b1db90fbe8e4ed68c9f393b8da93a3363d37d25c | b9350a6f22061205403bd1640497830387188682 | refs/heads/master | 2016-09-15T14:34:33.736507 | 2016-03-02T00:08:07 | 2016-03-02T00:08:07 | 42,490,961 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,764 | py | from datetime import datetime
import locale
import re
from functools import reduce
REPLACE_MAP = {u' ': u'',
u'\xa0': u'',
u'\u200b': u'',
u' ': u''}
locale.setlocale(locale.LC_ALL, 'en_US.UTF-8')
DATE_FORMATS = ['%m/%d/%Y',
'%m/%d/%Y %I:%M:%S %p',
'%m/%d/%y',
'%Y/%m/%d',
'%m-%d-%Y',
'%m-%d-%y',
'%m.%d.%Y']
LEAP_DAY_CHECKS = [
re.compile(r'^(?P<year>(19|20)[0-9]{2})'
r'[/-]'
r'(?P<month>0?2)'
r'[/-]'
r'(?P<day>29)$'),
re.compile(r'^(?P<month>0?2)'
r'[/-]'
r'(?P<day>29)'
r'[/-]'
r'(?P<year>(19|20)?[0-9]{2})$')
]
def get_key(my_dict, key):
return reduce(dict.get, key.split("."), my_dict)
def set_key(my_dict, key, value):
keys = key.split(".")
my_dict = reduce(dict.get, keys[:-1], my_dict)
my_dict[keys[-1]] = value
def del_key(my_dict, key):
keys = key.split(".")
my_dict = reduce(dict.get, keys[:-1], my_dict)
del my_dict[keys[-1]]
def map_vals(copy_map, original, template={}):
_original = original.copy()
_transformed = template
for orig_loc, trans_loc in copy_map:
val = get_key(_original, orig_loc)
set_key(_transformed, trans_loc, val)
return _transformed
def checkbox_boolean(e):
return 'checked' in e.attrib
def clean_text(e):
s = e.text or ''
s = s.strip()
for p, r in REPLACE_MAP.items():
s = s.replace(p, r)
return s
def parse_datetime(e):
s = clean_text(e)
parsed = None
if s:
f = 0
for f in DATE_FORMATS:
try:
parsed = datetime.strptime(s, f).isoformat(sep=' ')
except ValueError:
continue
else:
return parsed
else:
return s
else:
return None
def parse_date(e):
s = clean_text(e)
parsed = None
if s:
f = 0
for f in DATE_FORMATS:
try:
parsed = datetime.strptime(s, f).strftime('%Y-%m-%d')
except ValueError:
continue
else:
return parsed
else:
for p in LEAP_DAY_CHECKS:
m = p.match(s)
if m is not None:
groups = m.groupdict()
adjusted = datetime(year=int(groups['year']),
month=int(groups['month']),
day=28)
return adjusted.strftime('%Y-%m-%d')
return s
else:
return None
def tail_text(e):
s = e.tail
for p, r in REPLACE_MAP.iteritems():
s = s.replace(p, r)
return s.strip()
def parse_decimal(e):
s = clean_text(e)
if s:
return locale.atof(s)
else:
return None
def parse_int(e):
s = clean_text(e)
if s:
return int(s)
else:
return None
def parse_percent(e):
s = clean_text(e).replace('%', '')
if s:
return float(s) / 100.0
else:
return None
def split_keep_rightmost(e):
s = clean_text(e)
split_text = s.split(' ')
if len(split_text) > 1:
return split_text[-1]
else:
return None
def split_drop_leftmost(e):
s = clean_text(e)
split_text = s.split(' ')
if len(split_text) > 1:
return ' '.join(split_text[1:])
else:
return None
def parse_array(array, children):
out = []
for element in array:
record = {}
for child in children:
_parser = child['parser']
_field = child['field']
_path = child['path']
_child_sel = element.xpath(_path)
if child.get('children', False):
record[_field] = _parser(_child_sel, child['children'])
else:
record[_field] = _parser(_child_sel[0])
out.append(record)
return out
def parse_even_odd(array, children):
for even, odd in [(array[i], array[i + 1])
for i in range(0, len(array), 2)]:
record = {}
for child in children['even']:
_parser = child['parser']
_field = child['field']
_path = child['path']
_child_node = even.xpath(_path)[0]
record[_field] = _parser(_child_node)
for child in children['odd']:
_parser = child['parser']
_field = child['field']
_path = child['path']
_child_node = odd.xpath(_path)[0]
record[_field] = _parser(_child_node)
yield record
| [
"blannon@gmail.com"
] | blannon@gmail.com |
2697ac3de76880d27f9dffe050d1e07fc88eed4f | 9b8dc17c63bb0b3aad02f36841803e08316d3578 | /problem_5348.py | f0f10a70b357dbaf121e032fe58e62cf719f971d | [] | no_license | zhou-jia-ming/leetcode-py | 9687611a097bb5aee530bec4dcf094462be76be3 | f56e59f116e6b51e222debdd575e840b74165568 | refs/heads/master | 2021-08-06T17:22:41.356258 | 2021-07-25T16:56:10 | 2021-07-25T16:56:10 | 246,513,363 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 763 | py | # coding:utf-8
# Created by: Jiaming
# Created at: 2020-03-21
# 给你两个整数数组
# arr1 , arr2
# 和一个整数
# d ,请你返回两个数组之间的
# 距离值 。
#
# 「距离值」 定义为符合此描述的元素数目:对于元素
# arr1[i] ,不存在任何元素
# arr2[j]
# 满足 | arr1[i] - arr2[j] | <= d 。
from typing import List
class Solution:
def findTheDistanceValue(self, arr1: List[int], arr2: List[int],
d: int) -> int:
count = 0
for item1 in arr1:
if all([abs(item2-item1)>d for item2 in arr2]):
count += 1
return count
if __name__ == "__main__":
s = Solution()
print(s.findTheDistanceValue([4, 5, 8], [10, 9, 1, 8], 2))
| [
"zhoujiaming12345@gmail.com"
] | zhoujiaming12345@gmail.com |
41b0967ff45a265d207f789fe783d04512078cff | f0becfb4c3622099ce3af2fad5b831b602c29d47 | /django/myvenv/lib/python3.8/site-packages/astroid/brain/brain_scipy_signal.py | 996300d4877b225396de340a5a6a033d99d47ba7 | [
"MIT"
] | permissive | boostcamp-2020/relay_06 | 9fe7c1c722405d0916b70bb7b734b7c47afff217 | a2ecfff55572c3dc9262dca5b4b2fc83f9417774 | refs/heads/master | 2022-12-02T05:51:04.937920 | 2020-08-21T09:22:44 | 2020-08-21T09:22:44 | 282,153,031 | 4 | 12 | MIT | 2022-11-27T01:13:40 | 2020-07-24T07:29:18 | Python | UTF-8 | Python | false | false | 2,255 | py | # Copyright (c) 2019 Valentin Valls <valentin.valls@esrf.fr>
# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html
# For details: https://github.com/PyCQA/astroid/blob/master/COPYING.LESSER
"""Astroid hooks for scipy.signal module."""
import astroid
def scipy_signal():
return astroid.parse(
"""
# different functions defined in scipy.signals
def barthann(M, sym=True):
return numpy.ndarray([0])
def bartlett(M, sym=True):
return numpy.ndarray([0])
def blackman(M, sym=True):
return numpy.ndarray([0])
def blackmanharris(M, sym=True):
return numpy.ndarray([0])
def bohman(M, sym=True):
return numpy.ndarray([0])
def boxcar(M, sym=True):
return numpy.ndarray([0])
def chebwin(M, at, sym=True):
return numpy.ndarray([0])
def cosine(M, sym=True):
return numpy.ndarray([0])
def exponential(M, center=None, tau=1.0, sym=True):
return numpy.ndarray([0])
def flattop(M, sym=True):
return numpy.ndarray([0])
def gaussian(M, std, sym=True):
return numpy.ndarray([0])
def general_gaussian(M, p, sig, sym=True):
return numpy.ndarray([0])
def hamming(M, sym=True):
return numpy.ndarray([0])
def hann(M, sym=True):
return numpy.ndarray([0])
def hanning(M, sym=True):
return numpy.ndarray([0])
def impulse2(system, X0=None, T=None, N=None, **kwargs):
return numpy.ndarray([0]), numpy.ndarray([0])
def kaiser(M, beta, sym=True):
return numpy.ndarray([0])
def nuttall(M, sym=True):
return numpy.ndarray([0])
def parzen(M, sym=True):
return numpy.ndarray([0])
def slepian(M, width, sym=True):
return numpy.ndarray([0])
def step2(system, X0=None, T=None, N=None, **kwargs):
return numpy.ndarray([0]), numpy.ndarray([0])
def triang(M, sym=True):
return numpy.ndarray([0])
def tukey(M, alpha=0.5, sym=True):
return numpy.ndarray([0])
"""
)
astroid.register_module_extender(astroid.MANAGER, "scipy.signal", scipy_signal)
| [
"bhko0524@naver.com"
] | bhko0524@naver.com |
052f5f9e0e1635366a552b8781c4087f1cd3642d | 89d041cd5235257834313a051272269fca1ced72 | /tfds_preprocessing_pipeline.py | 9256f9c42a4cbbf0c197853516d55781858a5ba3 | [] | no_license | Gregorgeous/ula-transformer-tensorflow2.0 | 7f0eb44e9107678b1e5767644107594b942b3a73 | 68afaaff610ff8fdfc05c2535ff9f6af43fb9ad7 | refs/heads/master | 2022-01-05T06:26:41.681699 | 2019-06-11T23:04:04 | 2019-06-11T23:04:04 | 189,657,981 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,958 | py | # ======== Global libraries imports ====================
import tensorflow as tf
import tensorflow_datasets as tfds
import time
# # ====== Local code imports for my util functions ======
# from myPickleModule import unpickle #un-comment when debugging this pipeline (to do un-pickling conveniently here)
# =========== Local code imports for text cleanup ======
import string
from string import digits
import unicodedata
import re
import contractions
# =========== CONSTANTS =======================
EXCLUDE = set(string.punctuation)
CURRENCY_SYMBOLS = u''.join(chr(i) for i in range(0xffff) if unicodedata.category(chr(i)) == 'Sc')
TEXT_MAX_LENGTH = 65
SUMM_MAX_LENGTH = 15
TEXT_TOKENIZER, SUMMARY_TOKENIZER = None, None # Make them empty for now but visible in a global scope.
# =========== PRE-PROCESSING FUNCTIONS =========
def regex(text, isSummary=False):
"""
Description:
Here I perform the text cleanup: convert English contractions (e.g. "you're" to "you are"), get rid of the punctuation etc.
Arguments:
"text": the text sentence - either a summary or an input text. REQUIRED to be in a bytes text format (exampe:_b'this is a text'_)
"isSummary": simple boolean to indicate it's an input text or a summary. This is used only to apply different length if trimming the text in case it's too long. (Bear in mind in a perfect world this should be just an argument specifying the length of the trimming if any, but we can't provide extra arguments in td.dataset.map transformation so this is my workaround)
"""
sample = str(text.numpy())
cleaned_sentence = contractions.fix(sample)
cleaned_sentence = cleaned_sentence.lower()
cleaned_sentence = re.sub("'", '', cleaned_sentence)
cleaned_sentence = re.sub(",", ' ', cleaned_sentence)
# TODO: consider adding any variations of: " glyph to regex to be changed to a standard : " . https://www.utf8-chartable.de/unicode-utf8-table.pl?start=8192&number=128
# Currently the regex below wll wipe out every non-standard quotation type as well.
cleaned_sentence = re.sub(r"\\xe2\\x80\\x9.", ' ', cleaned_sentence)
cleaned_sentence = re.sub("-", ' ', cleaned_sentence)
cleaned_sentence = re.sub("–", ' ', cleaned_sentence)
cleaned_sentence = re.sub("\.", ' ', cleaned_sentence)
cleaned_sentence = re.sub(";", ' ', cleaned_sentence)
cleaned_sentence = re.sub(" +", ' ', cleaned_sentence)
cleaned_sentence = re.sub(r"\\", ' ', cleaned_sentence)
cleaned_sentence = re.sub("/", ' ', cleaned_sentence)
cleaned_sentence = cleaned_sentence.lstrip()
cleaned_sentence = cleaned_sentence.rstrip()
cleaned_sentence = ''.join(ch for ch in cleaned_sentence if ch not in EXCLUDE and CURRENCY_SYMBOLS)
remove_digits = str.maketrans('', '', digits)
cleaned_sentence.translate(remove_digits)
# IDEA: I need to strip the text from the first char. That's because I convert the sentence from bytes format to a string one
# (so from b'this a text' to 'this is a text') and somehow it takes that "b"char denoting it's a bytes format as part of the string
# when doing the conversion "sample = str(text.numpy())" call.
cleaned_sentence = cleaned_sentence[1:]
if isSummary:
cleaned_and_trimmed_sentence = restrict_length(cleaned_sentence,SUMM_MAX_LENGTH)
else:
cleaned_and_trimmed_sentence = restrict_length(cleaned_sentence,TEXT_MAX_LENGTH)
return cleaned_and_trimmed_sentence.encode()
def restrict_length(cleaned_sentence, text_max_allowed_len):
if text_max_allowed_len is not 0:
splitted_new_sentece = cleaned_sentence.split(' ')
if len(splitted_new_sentece) > text_max_allowed_len:
splitted_new_sentece = splitted_new_sentece[:text_max_allowed_len]
trimmed_cleaned_sentence = ' '.join(word for word in splitted_new_sentece)
return trimmed_cleaned_sentence
return cleaned_sentence
def max_length_summaries(t):
return max(len(summaries) for texts,summaries in t)
def max_length_texts(t):
return max(len(texts) for texts,summaries in t)
def filter_max_length(text, summary, text_max_length=TEXT_MAX_LENGTH, summ_max_length = SUMM_MAX_LENGTH ):
return tf.logical_and(tf.size(text) <= text_max_length,
tf.size(summary) <= summ_max_length)
# =============== DATASET PIPELINE ==================================================
def dataset_preprocessing_pipeline(texts:list,summaries:list, cutoff_index=0, texts_max_length = 65, summaries_max_length= 15, batch_size=64, buffer_size=20000):
# ------------ Re-initialise some global variables -----------------------
# (yes, this is not the "cleanest" approach but we can't add extra arguments
# to the dataset's tf.data transformations and therefore need to rely on those global-scope variables for any extra logic like the "text_max_allowed" ...)
global TEXT_MAX_LENGTH, SUMM_MAX_LENGTH, TEXT_TOKENIZER, SUMMARY_TOKENIZER
TEXT_MAX_LENGTH = texts_max_length
SUMM_MAX_LENGTH = summaries_max_length
# ------------ Transform the Python Lists to Tf.dataset ------------------
dataset = tf.data.Dataset.from_tensor_slices((texts,summaries))
if cutoff_index is not 0 and cutoff_index < len(texts):
# If cutoff_index specified, take only as many samples as specified
dataset = dataset.take(cutoff_index)
# ------------ Specify extra functions that for which the newly created text/summary tokenisers NEED to be in the scope. ------
def BPE_encoding(lang1, lang2):
lang1 = [TEXT_TOKENIZER.vocab_size] + TEXT_TOKENIZER.encode(
lang1.numpy()) + [TEXT_TOKENIZER.vocab_size+1]
lang2 = [SUMMARY_TOKENIZER.vocab_size] + SUMMARY_TOKENIZER.encode(
lang2.numpy()) + [SUMMARY_TOKENIZER.vocab_size+1]
return lang1, lang2
def text_and_summary_cleanup(text, summary):
cleaned_and_trimmed_text = regex(text, False)
cleaned_and_trimmed_summary = regex(summary, True)
return cleaned_and_trimmed_text, cleaned_and_trimmed_summary
# IDEA: Since tf.data "map()"" operates in graph mode, we need to wrap it in "py_function" where we can feeely execute
# any python code - in our case that's necessary as we want to do RegEx queries and clean the text.
# IDEA: This "execute python code in TF" wrapper is for the text_and_summary_cleanup method
def tfds_map_py_wrapper(text, summary):
return tf.py_function(text_and_summary_cleanup, [text, summary], [tf.string, tf.string])
# IDEA: this wrapper is for performing the BPE tokenisation.
def tfds_map_py_wrapper2(text, summary):
return tf.py_function(BPE_encoding, [text, summary], [tf.int64, tf.int64])
# --------------- DATASET TRANSFORMATIONS PIPELINE ------------------
# Step 1: Clean the text using text_and_summary_cleanup method wrapped in the TensorFlow's utility tfds_map_py_wrapper.
dataset = dataset.map(tfds_map_py_wrapper)
# Step 2: Initialise the BPE tokenisers on texts and summary only now, so it builds its vocabulary on the text data that
# was already pre-processed (otherwise we would end up having word-as-token mappings of words that we won't need anyway)
TEXT_TOKENIZER = tfds.features.text.SubwordTextEncoder.build_from_corpus((text.numpy() for text, summary in dataset), target_vocab_size=2**13)
SUMMARY_TOKENIZER = tfds.features.text.SubwordTextEncoder.build_from_corpus((summary.numpy() for text, summary in dataset), target_vocab_size=2**13)
dataset = dataset.map(tfds_map_py_wrapper2)
# Step 3: Establish the longest text length in the dataset's samples to then correctly align the whole dataset at the padding step in "padded_batch" function
# (NOTE: Yes, theoretically this step should be redundant as you can provide padded_shapes
# argument in the padded_batch() transformation with "[-1]" and it should perform the exactly same logic .. but apparently it didn't for the summaries when I inspected the output. So here I ensure the max length and specify such in the padded_shape in the next step)
texts_bpe_encodings_max_length = max_length_texts(dataset)
summaries_bpe_encodings_max_length = max_length_summaries(dataset)
# Step 4: Shuffle the dataset, pad all the text and summary data samples to the same length (each has it's own appropriate one provided earlier), and form it all into batches.
dataset = dataset.padded_batch(
batch_size, padded_shapes=([texts_bpe_encodings_max_length], [summaries_bpe_encodings_max_length]))
# Step 5: return all the objects we'll later need in out MAIN file.
return dataset,TEXT_TOKENIZER, SUMMARY_TOKENIZER,texts_bpe_encodings_max_length,summaries_bpe_encodings_max_length
| [
"g.r.fisher.pl@gmail.com"
] | g.r.fisher.pl@gmail.com |
1a2fbb7e9d1d611beb3f814b28822f4c36022be3 | 8a33a75877294c2bcb44d1b13500f56e3d06e7f2 | /city_weather.py | 6a32d3b26f4c8cb0010548f3546483173289acbe | [] | no_license | josdas/Sleep-story | bc84c40816f9b3b75ba87143b8c0591a50f20d02 | 2ee739b8569499a62a595156963ea6153d815d20 | refs/heads/master | 2021-06-28T13:57:34.543390 | 2017-09-19T15:50:03 | 2017-09-19T15:50:03 | 103,980,823 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 386 | py | from weather import Weather
def get_weather(city='St. Petersburg'):
weather = Weather()
location = weather.lookup_by_location(city)
condition = location.condition()
condition.pop('date')
return condition
if __name__ == '__main__':
cur_weather = get_weather()
assert 'temp' in cur_weather
assert 'code' in cur_weather
assert 'text' in cur_weather
| [
"josdas@mail.ru"
] | josdas@mail.ru |
3bd089fb5ee2269ebd53bd4c0612b26e935bb7ef | 0f8b29a7d46218ea96a2d740dc3519b6b831090e | /src/charts/urls.py | 981ab69b398cc173a30324723997931954bc88a0 | [] | no_license | chuymedina96/django_data_visualization | 54d02f471580dc5718c88f42547d34c08648a906 | 56522942b5d7ad8df20dc90ca9ae541d987a184b | refs/heads/master | 2023-05-30T17:35:25.279095 | 2020-05-26T17:58:57 | 2020-05-26T17:58:57 | 267,113,821 | 0 | 0 | null | 2021-06-10T22:57:45 | 2020-05-26T17:55:24 | Python | UTF-8 | Python | false | false | 976 | py | """charts URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url
from django.contrib import admin
from .views import HomeView, ChartData
urlpatterns = [
url(r'^$', HomeView.as_view(), name="home"),
# url(r'^api/data/$', get_data, name="get-data"),
url(r'^api/chart/data/$', ChartData.as_view(), name="chart-data"),
url(r'^admin/', admin.site.urls),
]
| [
"chuymedina96@gmail.com"
] | chuymedina96@gmail.com |
7db862ef198c2cbdadec7bf0372b423abee7302d | 1819b161df921a0a7c4da89244e1cd4f4da18be4 | /WhatsApp_FarmEasy/env/lib/python3.6/site-packages/ipfshttpclient/http.py | 107176524c9a4de64775e18ff406059468081bdd | [
"MIT"
] | permissive | sanchaymittal/FarmEasy | 889b290d376d940d9b3ae2fa0620a573b0fd62a0 | 5b931a4287d56d8ac73c170a6349bdaae71bf439 | refs/heads/master | 2023-01-07T21:45:15.532142 | 2020-07-18T14:15:08 | 2020-07-18T14:15:08 | 216,203,351 | 3 | 2 | MIT | 2023-01-04T12:35:40 | 2019-10-19T12:32:15 | JavaScript | UTF-8 | Python | false | false | 13,013 | py | # -*- encoding: utf-8 -*-
"""HTTP client for api requests.
This is pluggable into the IPFS Api client and will hopefully be supplemented
by an asynchronous version.
"""
from __future__ import absolute_import
import abc
import functools
import tarfile
from six.moves import http_client
import os
import socket
try: #PY3
import urllib.parse
except ImportError: #PY2
class urllib:
import urlparse as parse
import multiaddr
from multiaddr.protocols import (P_DNS, P_DNS4, P_DNS6, P_HTTP, P_HTTPS, P_IP4, P_IP6, P_TCP)
import six
from . import encoding
from . import exceptions
PATCH_REQUESTS = (os.environ.get("PY_IPFS_HTTP_CLIENT_PATCH_REQUESTS", "yes").lower()
not in ("false", "no"))
if PATCH_REQUESTS:
from . import requests_wrapper as requests
else: # pragma: no cover (always enabled in production)
import requests
def pass_defaults(func):
"""Decorator that returns a function named wrapper.
When invoked, wrapper invokes func with default kwargs appended.
Parameters
----------
func : callable
The function to append the default kwargs to
"""
@functools.wraps(func)
def wrapper(self, *args, **kwargs):
merged = {}
merged.update(self.defaults)
merged.update(kwargs)
return func(self, *args, **merged)
return wrapper
def _notify_stream_iter_closed():
pass # Mocked by unit tests to determine check for proper closing
class StreamDecodeIterator(object):
"""
Wrapper around `Iterable` that allows the iterable to be used in a
context manager (`with`-statement) allowing for easy cleanup.
"""
def __init__(self, response, parser):
self._response = response
self._parser = parser
self._response_iter = response.iter_content(chunk_size=None)
self._parser_iter = None
def __iter__(self):
return self
def __next__(self):
while True:
# Try reading for current parser iterator
if self._parser_iter is not None:
try:
result = next(self._parser_iter)
# Detect late error messages that occured after some data
# has already been sent
if isinstance(result, dict) and result.get("Type") == "error":
msg = result["Message"]
raise exceptions.PartialErrorResponse(msg, None, [])
return result
except StopIteration:
self._parser_iter = None
# Forward exception to caller if we do not expect any
# further data
if self._response_iter is None:
raise
try:
data = next(self._response_iter)
# Create new parser iterator using the newly recieved data
self._parser_iter = iter(self._parser.parse_partial(data))
except StopIteration:
# No more data to receive – destroy response iterator and
# iterate over the final fragments returned by the parser
self._response_iter = None
self._parser_iter = iter(self._parser.parse_finalize())
#PY2: Old iterator syntax
next = __next__
def __enter__(self):
return self
def __exit__(self, *a):
self.close()
def close(self):
# Clean up any open iterators first
if self._response_iter is not None:
self._response_iter.close()
if self._parser_iter is not None:
self._parser_iter.close()
self._response_iter = None
self._parser_iter = None
# Clean up response object and parser
if self._response is not None:
self._response.close()
self._response = None
self._parser = None
_notify_stream_iter_closed()
def stream_decode_full(response, parser):
with StreamDecodeIterator(response, parser) as response_iter:
# Collect all responses
result = list(response_iter)
# Return byte streams concatenated into one message, instead of split
# at arbitrary boundaries
if parser.is_stream:
return b"".join(result)
return result
class HTTPClient(object):
"""An HTTP client for interacting with the IPFS daemon.
Parameters
----------
addr : Union[str, multiaddr.Multiaddr]
The address where the IPFS daemon may be reached
base : str
The path prefix for API calls
timeout : Union[numbers.Real, Tuple[numbers.Real, numbers.Real], NoneType]
The default number of seconds to wait when establishing a connection to
the daemon and waiting for returned data before throwing
:exc:`~ipfshttpclient.exceptions.TimeoutError`; if the value is a tuple
its contents will be interpreted as the values for the connection and
receiving phases respectively, otherwise the value will apply to both
phases; if the value is ``None`` then all timeouts will be disabled
defaults : dict
The default parameters to be passed to
:meth:`~ipfshttpclient.http.HTTPClient.request`
"""
__metaclass__ = abc.ABCMeta
def __init__(self, addr, base, **defaults):
addr = multiaddr.Multiaddr(addr)
addr_iter = iter(addr.items())
# Parse the `host`, `family`, `port` & `secure` values from the given
# multiaddr, raising on unsupported `addr` values
try:
# Read host value
proto, host = next(addr_iter)
family = socket.AF_UNSPEC
if proto.code in (P_IP4, P_DNS4):
family = socket.AF_INET
elif proto.code in (P_IP6, P_DNS6):
family = socket.AF_INET6
elif proto.code != P_DNS:
raise exceptions.AddressError(addr)
# Read port value
proto, port = next(addr_iter)
if proto.code != P_TCP:
raise exceptions.AddressError(addr)
# Read application-level protocol name
secure = False
try:
proto, value = next(addr_iter)
except StopIteration:
pass
else:
if proto.code == P_HTTPS:
secure = True
elif proto.code != P_HTTP:
raise exceptions.AddressError(addr)
# No further values may follow; this also exhausts the iterator
was_final = all(True for _ in addr_iter)
if not was_final:
raise exceptions.AddressError(addr)
except StopIteration:
six.raise_from(exceptions.AddressError(addr), None)
# Convert the parsed `addr` values to a URL base and parameters
# for `requests`
if ":" in host and not host.startswith("["):
host = "[{0}]".format(host)
self.base = urllib.parse.SplitResult(
scheme = "http" if not secure else "https",
netloc = "{0}:{1}".format(host, port),
path = base,
query = "",
fragment = ""
).geturl()
self._kwargs = {}
if PATCH_REQUESTS: # pragma: no branch (always enabled in production)
self._kwargs["family"] = family
self.defaults = defaults
self._session = None
def open_session(self):
"""Open a persistent backend session that allows reusing HTTP
connections between individual HTTP requests.
It is an error to call this function if a session is already open."""
assert self._session is None
self._session = requests.Session()
def close_session(self):
"""Close a session opened by
:meth:`~ipfshttpclient.http.HTTPClient.open_session`.
If there is no session currently open (ie: it was already closed), then
this method does nothing."""
if self._session is not None:
self._session.close()
self._session = None
def _do_request(self, *args, **kwargs):
for name, value in self._kwargs.items():
kwargs.setdefault(name, value)
try:
if self._session:
return self._session.request(*args, **kwargs)
else:
return requests.request(*args, **kwargs)
except (requests.ConnectTimeout, requests.Timeout) as error:
six.raise_from(exceptions.TimeoutError(error), error)
except requests.ConnectionError as error:
six.raise_from(exceptions.ConnectionError(error), error)
except http_client.HTTPException as error:
six.raise_from(exceptions.ProtocolError(error), error)
def _do_raise_for_status(self, response):
try:
response.raise_for_status()
except requests.exceptions.HTTPError as error:
content = []
try:
decoder = encoding.get_encoding("json")
for chunk in response.iter_content(chunk_size=None):
content += list(decoder.parse_partial(chunk))
content += list(decoder.parse_finalize())
except exceptions.DecodingError:
pass
# If we have decoded an error response from the server,
# use that as the exception message; otherwise, just pass
# the exception on to the caller.
if len(content) == 1 \
and isinstance(content[0], dict) \
and "Message" in content[0]:
msg = content[0]["Message"]
six.raise_from(exceptions.ErrorResponse(msg, error), error)
else:
six.raise_from(exceptions.StatusError(error), error)
def _request(self, method, url, params, parser, stream=False, files=None,
headers={}, data=None, timeout=120):
# Do HTTP request (synchronously)
res = self._do_request(method, url, params=params, stream=stream,
files=files, headers=headers, data=data,
timeout=timeout)
# Raise exception for response status
# (optionally incorpating the response message, if applicable)
self._do_raise_for_status(res)
if stream:
# Decode each item as it is read
return StreamDecodeIterator(res, parser)
else:
# Decode received item immediately
return stream_decode_full(res, parser)
@pass_defaults
def request(self, path,
args=[], files=[], opts={}, stream=False,
decoder=None, headers={}, data=None,
timeout=120, offline=False, return_result=True):
"""Makes an HTTP request to the IPFS daemon.
This function returns the contents of the HTTP response from the IPFS
daemon.
Raises
------
~ipfshttpclient.exceptions.ErrorResponse
~ipfshttpclient.exceptions.ConnectionError
~ipfshttpclient.exceptions.ProtocolError
~ipfshttpclient.exceptions.StatusError
~ipfshttpclient.exceptions.TimeoutError
Parameters
----------
path : str
The REST command path to send
args : list
Positional parameters to be sent along with the HTTP request
files : Union[str, io.RawIOBase, collections.abc.Iterable]
The file object(s) or path(s) to stream to the daemon
opts : dict
Query string paramters to be sent along with the HTTP request
decoder : str
The encoder to use to parse the HTTP response
timeout : float
How many seconds to wait for the server to send data
before giving up
Defaults to 120
offline : bool
Execute request in offline mode, i.e. locally without accessing
the network.
return_result : bool
Defaults to True. If the return is not relevant, such as in gc(),
passing False will return None and avoid downloading results.
"""
url = self.base + path
params = []
params.append(('stream-channels', 'true'))
if offline:
params.append(('offline', 'true'))
for opt in opts.items():
params.append(opt)
for arg in args:
params.append(('arg', arg))
if (files or data):
method = 'post'
elif not return_result:
method = 'head'
else:
method = 'get'
# Don't attempt to decode response or stream
# (which would keep an iterator open that will then never be waited for)
if not return_result:
decoder = None
stream = False
parser = encoding.get_encoding(decoder if decoder else "none")
ret = self._request(method, url, params, parser, stream,
files, headers, data, timeout=timeout)
return ret if return_result else None
@pass_defaults
def download(self, path, args=[], filepath=None, opts={},
compress=True, timeout=120, offline=False):
"""Makes a request to the IPFS daemon to download a file.
Downloads a file or files from IPFS into the current working
directory, or the directory given by ``filepath``.
Raises
------
~ipfshttpclient.exceptions.ErrorResponse
~ipfshttpclient.exceptions.ConnectionError
~ipfshttpclient.exceptions.ProtocolError
~ipfshttpclient.exceptions.StatusError
~ipfshttpclient.exceptions.TimeoutError
Parameters
----------
path : str
The REST command path to send
filepath : str
The local path where IPFS will store downloaded files
Defaults to the current working directory.
args : list
Positional parameters to be sent along with the HTTP request
opts : dict
Query string paramters to be sent along with the HTTP request
compress : bool
Whether the downloaded file should be GZip compressed by the
daemon before being sent to the client
timeout : float
How many seconds to wait for the server to send data
before giving up
Defaults to 120
offline : bool
Execute request in offline mode, i.e. locally without accessing
the network.
"""
url = self.base + path
wd = filepath or '.'
params = []
params.append(('stream-channels', 'true'))
if offline:
params.append(('offline', 'true'))
params.append(('archive', 'true'))
if compress:
params.append(('compress', 'true'))
for opt in opts.items():
params.append(opt)
for arg in args:
params.append(('arg', arg))
method = 'get'
res = self._do_request(method, url, params=params, stream=True,
timeout=timeout)
self._do_raise_for_status(res)
# try to stream download as a tar file stream
mode = 'r|gz' if compress else 'r|'
with tarfile.open(fileobj=res.raw, mode=mode) as tf:
tf.extractall(path=wd)
| [
"sanchaymittal@gmail.com"
] | sanchaymittal@gmail.com |
e3e459c1c3919e8cc75492427602caa0b3360f84 | e532534c78e1ad5bc465de2e5d9a64664fec3304 | /main.py | 973987ccd1b500abb184c4695c4f34328d9031b0 | [] | no_license | candragati/checklist-item | 3a5ad6bb995b9602da0eb9b72080243284b4e198 | 6106092cf493c5d652086d9400dfc43e6e98b176 | refs/heads/master | 2020-04-22T03:28:48.855186 | 2019-02-11T07:48:44 | 2019-02-11T07:48:44 | 170,088,137 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 978 | py | from PyQt4 import QtGui
from raw_ui import main_ui
import barang
import produk
import report
import sys
class Main(QtGui.QMainWindow, main_ui.Ui_MainWindow):
def __init__(self,parent = None):
QtGui.QMainWindow.__init__(self)
self.setupUi(self)
self.aksi()
self.showMaximized()
def aksi(self):
self.actionProduk.triggered.connect(self.onProduk)
self.actionBarang.triggered.connect(self.onBarang)
self.actionCompare.triggered.connect(self.onReport)
def onReport(self):
sub = report.Main()
self.mdiArea.addSubWindow(sub)
sub.show()
def onProduk(self):
sub = produk.Main()
self.mdiArea.addSubWindow(sub)
sub.show()
def onBarang(self):
sub = barang.Main()
self.mdiArea.addSubWindow(sub)
sub.show()
if __name__ == '__main__':
app = QtGui.QApplication(sys.argv)
form = Main()
form.show()
sys.exit(app.exec_())
| [
"candragati@gmail.com"
] | candragati@gmail.com |
562460c8fef5ebb01d175a1df57c68cd66708063 | 416e7aa65502b0d7a381221e6b8ef87d9f6732c4 | /flask-mvc-3/app/__init__.py | 878282f1d4eddbadaddebfe6df214e0b822fffcf | [] | no_license | DemchyshynV/Flask | 56f554e478293bed7ae87638fa264af3c0dc4b77 | f561b7e189b7aed8cadf1ba6a9dd974e2ee7d093 | refs/heads/master | 2023-02-04T14:29:52.131647 | 2020-12-23T02:22:36 | 2020-12-23T02:22:36 | 323,781,016 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 196 | py | from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from config import DevConfig
app = Flask(__name__)
app.config.from_object(DevConfig)
db = SQLAlchemy(app)
from app import views
| [
"K1l@t1V1"
] | K1l@t1V1 |
a340ad4177b64261692dc78f4e98d1899fc65d5d | 2157b0545e60190915d6b70e7207472c77231595 | /restaurant/tests/test_restaurant.py | f4ca4b6c9cf38ce3f0ba5ece1b7bf9af52df023c | [] | no_license | Squad1ASE/restaurant | b8153ebd45eb8ea7a7170c111bc2d69fd62f543e | f9641e0c31aec3b839133a2bb8df50bb5f67cd58 | refs/heads/main | 2023-01-19T03:38:20.318180 | 2020-11-25T11:43:34 | 2020-11-25T11:43:34 | 313,589,846 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 93,824 | py | from tests.conftest import test_app
from database import db_session, Restaurant, Table, Dish, WorkingDay, RestaurantDeleted
from sqlalchemy import exc
from tests.utilities import *
def _check_restaurants(restaurant, dict_restaurant, to_serialize=False):
if not isinstance(restaurant, dict):
restaurant = restaurant.serialize()
tot_capacity = 0
for key in dict_restaurant.keys():
if key in ['tables', 'dishes', 'working_days']:
if to_serialize:
dict_restaurant[key] = [p.serialize() for p in dict_restaurant[key]]
if key in ['tables', 'dishes']:
dict_restaurant[key] = sorted(dict_restaurant[key], key=lambda k: k['name'])
restaurant[key] = sorted(restaurant[key], key=lambda k: k['name'])
else:
dict_restaurant[key] = sorted(dict_restaurant[key], key=lambda k: k['day'])
restaurant[key] = sorted(restaurant[key], key=lambda k: k['day'])
for idx, el in enumerate(dict_restaurant[key]):
for k in el:
assert restaurant[key][idx][k] == el[k]
if key == 'tables':
tot_capacity += el['capacity']
else:
assert restaurant[key] == dict_restaurant[key]
if 'capacity' not in dict_restaurant:
assert restaurant['capacity'] == tot_capacity
if 'tot_reviews' not in dict_restaurant:
assert restaurant['tot_reviews'] == 0
if 'avg_rating' not in dict_restaurant:
assert restaurant['avg_rating'] == 0
if 'likes' not in dict_restaurant:
assert restaurant['likes'] == 0
def test_insertDB_restaurant(test_app):
app, test_client = test_app
tot_correct_restaurants = 0
tot_correct_tables = 0
tot_correct_dishes = 0
tables = [
Table(**dict(capacity = 2, name = 'yellow')),
Table(**dict(capacity = 5, name = 'blue'))
]
dishes = [
Dish(**dict(name = 'pizza', price = 4.0, ingredients = 'pomodoro, mozzarella')),
Dish(**dict(name = 'pasta', price = 6.0, ingredients = 'mozzarella')),
]
working_days = [
WorkingDay(**dict(day = 'friday', work_shifts = [['12:00','15:00'],['19:00','23:00']])),
WorkingDay(**dict(day = 'saturday', work_shifts = [['12:00','15:00'],['19:00','23:00']])),
]
# correct restaurants pt1
correct_restaurants = [
dict(owner_id = 1, name = 'Trial', lat = 22, lon = 22, phone = '3346734121', cuisine_type = ['traditional'],
capacity = 10, prec_measures = 'leggeX',avg_time_of_stay = 30, tot_reviews = 0, avg_rating = 0, likes = 0,
tables = tables, dishes = dishes, working_days = working_days
),
dict(owner_id = 1, name = 'T', lat = 22, lon = 22, phone = '3346734121', cuisine_type = ['traditional','italian'],
capacity = 1, prec_measures = '',avg_time_of_stay = 15, tables = [tables[0]], dishes = [dishes[0]], working_days = [working_days[0]]
),
dict(owner_id = 1, name = 'T', lat = 22, lon = 22, phone = '3346734121', cuisine_type = ['traditional','italian'],
capacity = 1, prec_measures = '',avg_time_of_stay = 15
)
]
for idx, r in enumerate(correct_restaurants):
restaurant = Restaurant(**r)
db_session.add(restaurant)
db_session.commit()
restaurant_to_check = db_session.query(Restaurant).filter(Restaurant.id == restaurant.id).first()
assert restaurant_to_check is not None
_check_restaurants(restaurant_to_check, correct_restaurants[idx], True)
tot_correct_restaurants += len(correct_restaurants)
# incorrect restaurants pt1 - fail check validators
incorrect_restaurants = [
# owner_id
dict(owner_id = None, name = 'Trial', lat = 22, lon = 22, phone = '3346734121', cuisine_type = ['traditional'],
capacity = 10, prec_measures = 'leggeX',avg_time_of_stay = 30, tot_reviews = 0, avg_rating = 0, likes = 0
),
dict(owner_id = 0, name = 'Trial', lat = 22, lon = 22, phone = '3346734121', cuisine_type = ['traditional'],
capacity = 10, prec_measures = 'leggeX',avg_time_of_stay = 30, tot_reviews = 0, avg_rating = 0, likes = 0
),
dict(owner_id = -1, name = 'Trial', lat = 22, lon = 22, phone = '3346734121', cuisine_type = ['traditional'],
capacity = 10, prec_measures = 'leggeX',avg_time_of_stay = 30, tot_reviews = 0, avg_rating = 0, likes = 0
),
dict(owner_id = 'a', name = 'Trial', lat = 22, lon = 22, phone = '3346734121', cuisine_type = ['traditional'],
capacity = 10, prec_measures = 'leggeX',avg_time_of_stay = 30, tot_reviews = 0, avg_rating = 0, likes = 0
),
dict(owner_id = ['a'], name = 'Trial', lat = 22, lon = 22, phone = '3346734121', cuisine_type = ['traditional'],
capacity = 10, prec_measures = 'leggeX',avg_time_of_stay = 30, tot_reviews = 0, avg_rating = 0, likes = 0
),
dict(owner_id = [], name = 'Trial', lat = 22, lon = 22, phone = '3346734121', cuisine_type = ['traditional'],
capacity = 10, prec_measures = 'leggeX',avg_time_of_stay = 30, tot_reviews = 0, avg_rating = 0, likes = 0
),
# name
dict(owner_id = 1, name = None, lat = 22, lon = 22, phone = '3346734121', cuisine_type = ['traditional'],
capacity = 10, prec_measures = 'leggeX',avg_time_of_stay = 30, tot_reviews = 0, avg_rating = 0, likes = 0
),
dict(owner_id = 1, name = '', lat = 22, lon = 22, phone = '3346734121', cuisine_type = ['traditional'],
capacity = 10, prec_measures = 'leggeX',avg_time_of_stay = 30, tot_reviews = 0, avg_rating = 0, likes = 0
),
dict(owner_id = 1, name = 1, lat = 22, lon = 22, phone = '3346734121', cuisine_type = ['traditional'],
capacity = 10, prec_measures = 'leggeX',avg_time_of_stay = 30, tot_reviews = 0, avg_rating = 0, likes = 0
),
dict(owner_id = 1, name = [], lat = 22, lon = 22, phone = '3346734121', cuisine_type = ['traditional'],
capacity = 10, prec_measures = 'leggeX',avg_time_of_stay = 30, tot_reviews = 0, avg_rating = 0, likes = 0
),
dict(owner_id = 1, name = ['a'], lat = 22, lon = 22, phone = '3346734121', cuisine_type = ['traditional'],
capacity = 10, prec_measures = 'leggeX',avg_time_of_stay = 30, tot_reviews = 0, avg_rating = 0, likes = 0
),
# lat
dict(owner_id = 1, name = 'Trial', lat = 'a', lon = 22, phone = '3346734121', cuisine_type = ['traditional'],
capacity = 10, prec_measures = 'leggeX',avg_time_of_stay = 30, tot_reviews = 0, avg_rating = 0, likes = 0
),
dict(owner_id = 1, name = 'Trial', lat = None, lon = 22, phone = '3346734121', cuisine_type = ['traditional'],
capacity = 10, prec_measures = 'leggeX',avg_time_of_stay = 30, tot_reviews = 0, avg_rating = 0, likes = 0
),
dict(owner_id = 1, name = 'Trial', lat = [], lon = 22, phone = '3346734121', cuisine_type = ['traditional'],
capacity = 10, prec_measures = 'leggeX',avg_time_of_stay = 30, tot_reviews = 0, avg_rating = 0, likes = 0
),
dict(owner_id = 1, name = 'Trial', lat = ['a'], lon = 22, phone = '3346734121', cuisine_type = ['traditional'],
capacity = 10, prec_measures = 'leggeX',avg_time_of_stay = 30, tot_reviews = 0, avg_rating = 0, likes = 0
),
# lon
dict(owner_id = 1, name = 'Trial', lat = 22, lon = None, phone = '3346734121', cuisine_type = ['traditional'],
capacity = 10, prec_measures = 'leggeX',avg_time_of_stay = 30, tot_reviews = 0, avg_rating = 0, likes = 0
),
dict(owner_id = 1, name = 'Trial', lat = 22, lon = 'a', phone = '3346734121', cuisine_type = ['traditional'],
capacity = 10, prec_measures = 'leggeX',avg_time_of_stay = 30, tot_reviews = 0, avg_rating = 0, likes = 0
),
dict(owner_id = 1, name = 'Trial', lat = 22, lon = [], phone = '3346734121', cuisine_type = ['traditional'],
capacity = 10, prec_measures = 'leggeX',avg_time_of_stay = 30, tot_reviews = 0, avg_rating = 0, likes = 0
),
dict(owner_id = 1, name = 'Trial', lat = 22, lon = ['a'], phone = '3346734121', cuisine_type = ['traditional'],
capacity = 10, prec_measures = 'leggeX',avg_time_of_stay = 30, tot_reviews = 0, avg_rating = 0, likes = 0
),
# phone
dict(owner_id = 1, name = 'Trial', lat = 22, lon = 22, phone = None, cuisine_type = ['traditional'],
capacity = 10, prec_measures = 'leggeX',avg_time_of_stay = 30, tot_reviews = 0, avg_rating = 0, likes = 0
),
dict(owner_id = 1, name = 'Trial', lat = 22, lon = 22, phone = '', cuisine_type = ['traditional'],
capacity = 10, prec_measures = 'leggeX',avg_time_of_stay = 30, tot_reviews = 0, avg_rating = 0, likes = 0
),
dict(owner_id = 1, name = 'Trial', lat = 22, lon = 22, phone = 3, cuisine_type = ['traditional'],
capacity = 10, prec_measures = 'leggeX',avg_time_of_stay = 30, tot_reviews = 0, avg_rating = 0, likes = 0
),
dict(owner_id = 1, name = 'Trial', lat = 22, lon = 22, phone = ['a'], cuisine_type = ['traditional'],
capacity = 10, prec_measures = 'leggeX',avg_time_of_stay = 30, tot_reviews = 0, avg_rating = 0, likes = 0
),
dict(owner_id = 1, name = 'Trial', lat = 22, lon = 22, phone = [], cuisine_type = ['traditional'],
capacity = 10, prec_measures = 'leggeX',avg_time_of_stay = 30, tot_reviews = 0, avg_rating = 0, likes = 0
),
# cuisine_type
dict(owner_id = 1, name = 'Trial', lat = 22, lon = 22, phone = '3346734121', cuisine_type = None,
capacity = 10, prec_measures = 'leggeX',avg_time_of_stay = 30, tot_reviews = 0, avg_rating = 0, likes = 0
),
dict(owner_id = 1, name = 'Trial', lat = 22, lon = 22, phone = '3346734121', cuisine_type = [],
capacity = 10, prec_measures = 'leggeX',avg_time_of_stay = 30, tot_reviews = 0, avg_rating = 0, likes = 0
),
dict(owner_id = 1, name = 'Trial', lat = 22, lon = 22, phone = '3346734121', cuisine_type = 'traditional',
capacity = 10, prec_measures = 'leggeX',avg_time_of_stay = 30, tot_reviews = 0, avg_rating = 0, likes = 0
),
dict(owner_id = 1, name = 'Trial', lat = 22, lon = 22, phone = '3346734121', cuisine_type = ['traditionalll'],
capacity = 10, prec_measures = 'leggeX',avg_time_of_stay = 30, tot_reviews = 0, avg_rating = 0, likes = 0
),
dict(owner_id = 1, name = 'Trial', lat = 22, lon = 22, phone = '3346734121', cuisine_type = 2,
capacity = 10, prec_measures = 'leggeX',avg_time_of_stay = 30, tot_reviews = 0, avg_rating = 0, likes = 0
),
# capacity
dict(owner_id = 1, name = 'Trial', lat = 22, lon = 22, phone = '3346734121', cuisine_type = ['traditional'],
capacity = None, prec_measures = 'leggeX',avg_time_of_stay = 30, tot_reviews = 0, avg_rating = 0, likes = 0
),
dict(owner_id = 1, name = 'Trial', lat = 22, lon = 22, phone = '3346734121', cuisine_type = ['traditional'],
capacity = 0, prec_measures = 'leggeX',avg_time_of_stay = 30, tot_reviews = 0, avg_rating = 0, likes = 0
),
dict(owner_id = 1, name = 'Trial', lat = 22, lon = 22, phone = '3346734121', cuisine_type = ['traditional'],
capacity = -1, prec_measures = 'leggeX',avg_time_of_stay = 30, tot_reviews = 0, avg_rating = 0, likes = 0
),
dict(owner_id = 1, name = 'Trial', lat = 22, lon = 22, phone = '3346734121', cuisine_type = ['traditional'],
capacity = 'a', prec_measures = 'leggeX',avg_time_of_stay = 30, tot_reviews = 0, avg_rating = 0, likes = 0
),
dict(owner_id = 1, name = 'Trial', lat = 22, lon = 22, phone = '3346734121', cuisine_type = ['traditional'],
capacity = [], prec_measures = 'leggeX',avg_time_of_stay = 30, tot_reviews = 0, avg_rating = 0, likes = 0
),
dict(owner_id = 1, name = 'Trial', lat = 22, lon = 22, phone = '3346734121', cuisine_type = ['traditional'],
capacity = ['a'], prec_measures = 'leggeX',avg_time_of_stay = 30, tot_reviews = 0, avg_rating = 0, likes = 0
),
# prec_measures
dict(owner_id = 1, name = 'Trial', lat = 22, lon = 22, phone = '3346734121', cuisine_type = ['traditional'],
capacity = 10, prec_measures = None,avg_time_of_stay = 30, tot_reviews = 0, avg_rating = 0, likes = 0
),
dict(owner_id = 1, name = 'Trial', lat = 22, lon = 22, phone = '3346734121', cuisine_type = ['traditional'],
capacity = 10, prec_measures = 2,avg_time_of_stay = 30, tot_reviews = 0, avg_rating = 0, likes = 0
),
dict(owner_id = 1, name = 'Trial', lat = 22, lon = 22, phone = '3346734121', cuisine_type = ['traditional'],
capacity = 10, prec_measures = [],avg_time_of_stay = 30, tot_reviews = 0, avg_rating = 0, likes = 0
),
dict(owner_id = 1, name = 'Trial', lat = 22, lon = 22, phone = '3346734121', cuisine_type = ['traditional'],
capacity = 10, prec_measures = ['a'],avg_time_of_stay = 30, tot_reviews = 0, avg_rating = 0, likes = 0
),
# avg_time_of_stay
dict(owner_id = 1, name = 'Trial', lat = 22, lon = 22, phone = '3346734121', cuisine_type = ['traditional'],
capacity = 10, prec_measures = 'leggeX',avg_time_of_stay = 14, tot_reviews = 0, avg_rating = 0, likes = 0
),
dict(owner_id = 1, name = 'Trial', lat = 22, lon = 22, phone = '3346734121', cuisine_type = ['traditional'],
capacity = 10, prec_measures = 'leggeX',avg_time_of_stay = -1, tot_reviews = 0, avg_rating = 0, likes = 0
),
dict(owner_id = 1, name = 'Trial', lat = 22, lon = 22, phone = '3346734121', cuisine_type = ['traditional'],
capacity = 10, prec_measures = 'leggeX',avg_time_of_stay = 0, tot_reviews = 0, avg_rating = 0, likes = 0
),
dict(owner_id = 1, name = 'Trial', lat = 22, lon = 22, phone = '3346734121', cuisine_type = ['traditional'],
capacity = 10, prec_measures = 'leggeX',avg_time_of_stay = None, tot_reviews = 0, avg_rating = 0, likes = 0
),
dict(owner_id = 1, name = 'Trial', lat = 22, lon = 22, phone = '3346734121', cuisine_type = ['traditional'],
capacity = 10, prec_measures = 'leggeX',avg_time_of_stay = 'a', tot_reviews = 0, avg_rating = 0, likes = 0
),
dict(owner_id = 1, name = 'Trial', lat = 22, lon = 22, phone = '3346734121', cuisine_type = ['traditional'],
capacity = 10, prec_measures = 'leggeX',avg_time_of_stay = ['a'], tot_reviews = 0, avg_rating = 0, likes = 0
),
dict(owner_id = 1, name = 'Trial', lat = 22, lon = 22, phone = '3346734121', cuisine_type = ['traditional'],
capacity = 10, prec_measures = 'leggeX',avg_time_of_stay = [], tot_reviews = 0, avg_rating = 0, likes = 0
),
# tot_reviews
dict(owner_id = 1, name = 'Trial', lat = 22, lon = 22, phone = '3346734121', cuisine_type = ['traditional'],
capacity = 10, prec_measures = 'leggeX',avg_time_of_stay = 30, tot_reviews = None, avg_rating = 0, likes = 0
),
dict(owner_id = 1, name = 'Trial', lat = 22, lon = 22, phone = '3346734121', cuisine_type = ['traditional'],
capacity = 10, prec_measures = 'leggeX',avg_time_of_stay = 30, tot_reviews = -1, avg_rating = 0, likes = 0
),
dict(owner_id = 1, name = 'Trial', lat = 22, lon = 22, phone = '3346734121', cuisine_type = ['traditional'],
capacity = 10, prec_measures = 'leggeX',avg_time_of_stay = 30, tot_reviews = 'a', avg_rating = 0, likes = 0
),
dict(owner_id = 1, name = 'Trial', lat = 22, lon = 22, phone = '3346734121', cuisine_type = ['traditional'],
capacity = 10, prec_measures = 'leggeX',avg_time_of_stay = 30, tot_reviews = [], avg_rating = 0, likes = 0
),
dict(owner_id = 1, name = 'Trial', lat = 22, lon = 22, phone = '3346734121', cuisine_type = ['traditional'],
capacity = 10, prec_measures = 'leggeX',avg_time_of_stay = 30, tot_reviews = ['a'], avg_rating = 0, likes = 0
),
# avg_rating
dict(owner_id = 1, name = 'Trial', lat = 22, lon = 22, phone = '3346734121', cuisine_type = ['traditional'],
capacity = 10, prec_measures = 'leggeX',avg_time_of_stay = 30, tot_reviews = 0, avg_rating = -1, likes = 0
),
dict(owner_id = 1, name = 'Trial', lat = 22, lon = 22, phone = '3346734121', cuisine_type = ['traditional'],
capacity = 10, prec_measures = 'leggeX',avg_time_of_stay = 30, tot_reviews = 0, avg_rating = 5.1, likes = 0
),
dict(owner_id = 1, name = 'Trial', lat = 22, lon = 22, phone = '3346734121', cuisine_type = ['traditional'],
capacity = 10, prec_measures = 'leggeX',avg_time_of_stay = 30, tot_reviews = 0, avg_rating = -0.1, likes = 0
),
dict(owner_id = 1, name = 'Trial', lat = 22, lon = 22, phone = '3346734121', cuisine_type = ['traditional'],
capacity = 10, prec_measures = 'leggeX',avg_time_of_stay = 30, tot_reviews = 0, avg_rating = None, likes = 0
),
dict(owner_id = 1, name = 'Trial', lat = 22, lon = 22, phone = '3346734121', cuisine_type = ['traditional'],
capacity = 10, prec_measures = 'leggeX',avg_time_of_stay = 30, tot_reviews = 0, avg_rating = 'a', likes = 0
),
dict(owner_id = 1, name = 'Trial', lat = 22, lon = 22, phone = '3346734121', cuisine_type = ['traditional'],
capacity = 10, prec_measures = 'leggeX',avg_time_of_stay = 30, tot_reviews = 0, avg_rating = [], likes = 0
),
dict(owner_id = 1, name = 'Trial', lat = 22, lon = 22, phone = '3346734121', cuisine_type = ['traditional'],
capacity = 10, prec_measures = 'leggeX',avg_time_of_stay = 30, tot_reviews = 0, avg_rating = ['a'], likes = 0
),
# likes
dict(owner_id = 1, name = 'Trial', lat = 22, lon = 22, phone = '3346734121', cuisine_type = ['traditional'],
capacity = 10, prec_measures = 'leggeX',avg_time_of_stay = 30, tot_reviews = 0, avg_rating = 0, likes = None
),
dict(owner_id = 1, name = 'Trial', lat = 22, lon = 22, phone = '3346734121', cuisine_type = ['traditional'],
capacity = 10, prec_measures = 'leggeX',avg_time_of_stay = 30, tot_reviews = 0, avg_rating = 0, likes = -1
),
dict(owner_id = 1, name = 'Trial', lat = 22, lon = 22, phone = '3346734121', cuisine_type = ['traditional'],
capacity = 10, prec_measures = 'leggeX',avg_time_of_stay = 30, tot_reviews = 0, avg_rating = 0, likes = 'a'
),
dict(owner_id = 1, name = 'Trial', lat = 22, lon = 22, phone = '3346734121', cuisine_type = ['traditional'],
capacity = 10, prec_measures = 'leggeX',avg_time_of_stay = 30, tot_reviews = 0, avg_rating = 0, likes = []
),
dict(owner_id = 1, name = 'Trial', lat = 22, lon = 22, phone = '3346734121', cuisine_type = ['traditional'],
capacity = 10, prec_measures = 'leggeX',avg_time_of_stay = 30, tot_reviews = 0, avg_rating = 0, likes = ['a']
)
]
count_assert = 0
for r in incorrect_restaurants:
try:
restaurant = Restaurant(**r)
except ValueError:
count_assert += 1
assert True
assert len(incorrect_restaurants) == count_assert
# incorrect restaurants pt2 - missing mandatory fields
incorrect_restaurants = [
dict(name = 'Trial', lat = 22, lon = 22, phone = '3346734121', cuisine_type = ['traditional'],
capacity = 10, prec_measures = 'leggeX',avg_time_of_stay = 30
),
dict(owner_id = 1, lat = 22, lon = 22, phone = '3346734121', cuisine_type = ['traditional'],
capacity = 10, prec_measures = 'leggeX',avg_time_of_stay = 30
),
dict(owner_id = 1, name = 'Trial', lon = 22, phone = '3346734121', cuisine_type = ['traditional'],
capacity = 10, prec_measures = 'leggeX',avg_time_of_stay = 30
),
dict(owner_id = 1, name = 'Trial', lat = 22, phone = '3346734121', cuisine_type = ['traditional'],
capacity = 10, prec_measures = 'leggeX',avg_time_of_stay = 30
),
dict(owner_id = 1, name = 'Trial', lat = 22, lon = 22, cuisine_type = ['traditional'],
capacity = 10, prec_measures = 'leggeX',avg_time_of_stay = 30
),
dict(owner_id = 1, name = 'Trial', lat = 22, lon = 22, phone = '3346734121',
capacity = 10, prec_measures = 'leggeX',avg_time_of_stay = 30
),
dict(owner_id = 1, name = 'Trial', lat = 22, lon = 22, phone = '3346734121', cuisine_type = ['traditional'],
prec_measures = 'leggeX',avg_time_of_stay = 30
),
dict(owner_id = 1, name = 'Trial', lat = 22, lon = 22, phone = '3346734121', cuisine_type = ['traditional'],
capacity = 10, avg_time_of_stay = 30
),
dict(owner_id = 1, name = 'Trial', lat = 22, lon = 22, phone = '3346734121', cuisine_type = ['traditional'],
capacity = 10, prec_measures = 'leggeX'
)
]
count_assert = 0
for r in incorrect_restaurants:
restaurant = Restaurant(**r)
try:
db_session.add(restaurant)
db_session.commit()
except (exc.IntegrityError, exc.InvalidRequestError):
db_session.rollback()
count_assert += 1
assert True
assert len(incorrect_restaurants) == count_assert
#check total restaurants
restaurants = db_session.query(Restaurant).all()
assert len(restaurants) == tot_correct_restaurants
def test_create_restaurant(test_app):
app, test_client = test_app
tot_correct_tables = 0
tot_correct_dishes = 0
tot_correct_wds = 0
# correct restaurants
for idx, r in enumerate(restaurant_examples):
assert create_restaurant_by_API(test_client, r).status_code == 200
tot_correct_tables += len(r['tables'])
tot_correct_dishes += len(r['dishes'])
tot_correct_wds += len(r['working_days'])
# assuming all restaurants' name are distinct in restaurant_examples
restaurant_to_check = db_session.query(Restaurant).filter(Restaurant.name == r['name']).first()
assert restaurant_to_check is not None
_check_restaurants(restaurant_to_check, r)
# incorrect restaurants
incorrect_restaurants = [
# fields that must not be present are
dict(owner_id=1, name='Restaurant 1', lat=43.4702169, lon=11.152609, phone='333333',
cuisine_type=["italian", "chinese"], prec_measures='lawX', avg_time_of_stay=15,
tables=[dict(name='yellow',capacity=3)], id=2,
dishes=[dict(name='pizza',price=4.5,ingredients='tomato,mozzarella')],
working_days=[dict(day='monday',work_shifts=[["00:01", "23:59"]])]
),
dict(owner_id=1, name='Restaurant 1', lat=43.4702169, lon=11.152609, phone='333333',
cuisine_type=["italian", "chinese"], prec_measures='lawX', avg_time_of_stay=15,
tables=[dict(name='yellow',capacity=3)], capacity=30,
dishes=[dict(name='pizza',price=4.5,ingredients='tomato,mozzarella')],
working_days=[dict(day='monday',work_shifts=[["00:01", "23:59"]])]
),
dict(owner_id=1, name='Restaurant 1', lat=43.4702169, lon=11.152609, phone='333333',
cuisine_type=["italian", "chinese"], prec_measures='lawX', avg_time_of_stay=15,
tables=[dict(name='yellow',capacity=3)], tot_reviews=1,
dishes=[dict(name='pizza',price=4.5,ingredients='tomato,mozzarella')],
working_days=[dict(day='monday',work_shifts=[["00:01", "23:59"]])]
),
dict(owner_id=1, name='Restaurant 1', lat=43.4702169, lon=11.152609, phone='333333',
cuisine_type=["italian", "chinese"], prec_measures='lawX', avg_time_of_stay=15,
tables=[dict(name='yellow',capacity=3)], avg_rating=1,
dishes=[dict(name='pizza',price=4.5,ingredients='tomato,mozzarella')],
working_days=[dict(day='monday',work_shifts=[["00:01", "23:59"]])]
),
dict(owner_id=1, name='Restaurant 1', lat=43.4702169, lon=11.152609, phone='333333',
cuisine_type=["italian", "chinese"], prec_measures='lawX', avg_time_of_stay=15,
tables=[dict(name='yellow',capacity=3)], likes=1,
dishes=[dict(name='pizza',price=4.5,ingredients='tomato,mozzarella')],
working_days=[dict(day='monday',work_shifts=[["00:01", "23:59"]])]
),
# incorrect restaurant fields
# owner_id
dict(name='Restaurant 1', lat=43.4702169, lon=11.152609, phone='333333',
cuisine_type=["italian", "chinese"], prec_measures='lawX', avg_time_of_stay=15,
tables=[dict(name='yellow',capacity=3)],
dishes=[dict(name='pizza',price=4.5,ingredients='tomato,mozzarella')],
working_days=[dict(day='monday',work_shifts=[["00:01", "23:59"]])]
),
dict(owner_id=None, name='Restaurant 1', lat=43.4702169, lon=11.152609, phone='333333',
cuisine_type=["italian", "chinese"], prec_measures='lawX', avg_time_of_stay=15,
tables=[dict(name='yellow',capacity=3)],
dishes=[dict(name='pizza',price=4.5,ingredients='tomato,mozzarella')],
working_days=[dict(day='monday',work_shifts=[["00:01", "23:59"]])]
),
dict(owner_id=0, name='Restaurant 1', lat=43.4702169, lon=11.152609, phone='333333',
cuisine_type=["italian", "chinese"], prec_measures='lawX', avg_time_of_stay=15,
tables=[dict(name='yellow',capacity=3)],
dishes=[dict(name='pizza',price=4.5,ingredients='tomato,mozzarella')],
working_days=[dict(day='monday',work_shifts=[["00:01", "23:59"]])]
),
dict(owner_id=-1, name='Restaurant 1', lat=43.4702169, lon=11.152609, phone='333333',
cuisine_type=["italian", "chinese"], prec_measures='lawX', avg_time_of_stay=15,
tables=[dict(name='yellow',capacity=3)],
dishes=[dict(name='pizza',price=4.5,ingredients='tomato,mozzarella')],
working_days=[dict(day='monday',work_shifts=[["00:01", "23:59"]])]
),
dict(owner_id='a', name='Restaurant 1', lat=43.4702169, lon=11.152609, phone='333333',
cuisine_type=["italian", "chinese"], prec_measures='lawX', avg_time_of_stay=15,
tables=[dict(name='yellow',capacity=3)],
dishes=[dict(name='pizza',price=4.5,ingredients='tomato,mozzarella')],
working_days=[dict(day='monday',work_shifts=[["00:01", "23:59"]])]
),
dict(owner_id=['a'], name='Restaurant 1', lat=43.4702169, lon=11.152609, phone='333333',
cuisine_type=["italian", "chinese"], prec_measures='lawX', avg_time_of_stay=15,
tables=[dict(name='yellow',capacity=3)],
dishes=[dict(name='pizza',price=4.5,ingredients='tomato,mozzarella')],
working_days=[dict(day='monday',work_shifts=[["00:01", "23:59"]])]
),
dict(owner_id=[], name='Restaurant 1', lat=43.4702169, lon=11.152609, phone='333333',
cuisine_type=["italian", "chinese"], prec_measures='lawX', avg_time_of_stay=15,
tables=[dict(name='yellow',capacity=3)],
dishes=[dict(name='pizza',price=4.5,ingredients='tomato,mozzarella')],
working_days=[dict(day='monday',work_shifts=[["00:01", "23:59"]])]
),
# name
dict(owner_id=1, lat=43.4702169, lon=11.152609, phone='333333',
cuisine_type=["italian", "chinese"], prec_measures='lawX', avg_time_of_stay=15,
tables=[dict(name='yellow',capacity=3)],
dishes=[dict(name='pizza',price=4.5,ingredients='tomato,mozzarella')],
working_days=[dict(day='monday',work_shifts=[["00:01", "23:59"]])]
),
dict(owner_id=1, name=None, lat=43.4702169, lon=11.152609, phone='333333',
cuisine_type=["italian", "chinese"], prec_measures='lawX', avg_time_of_stay=15,
tables=[dict(name='yellow',capacity=3)],
dishes=[dict(name='pizza',price=4.5,ingredients='tomato,mozzarella')],
working_days=[dict(day='monday',work_shifts=[["00:01", "23:59"]])]
),
dict(owner_id=1, name='', lat=43.4702169, lon=11.152609, phone='333333',
cuisine_type=["italian", "chinese"], prec_measures='lawX', avg_time_of_stay=15,
tables=[dict(name='yellow',capacity=3)],
dishes=[dict(name='pizza',price=4.5,ingredients='tomato,mozzarella')],
working_days=[dict(day='monday',work_shifts=[["00:01", "23:59"]])]
),
dict(owner_id=1, name=1, lat=43.4702169, lon=11.152609, phone='333333',
cuisine_type=["italian", "chinese"], prec_measures='lawX', avg_time_of_stay=15,
tables=[dict(name='yellow',capacity=3)],
dishes=[dict(name='pizza',price=4.5,ingredients='tomato,mozzarella')],
working_days=[dict(day='monday',work_shifts=[["00:01", "23:59"]])]
),
dict(owner_id=1, name=[], lat=43.4702169, lon=11.152609, phone='333333',
cuisine_type=["italian", "chinese"], prec_measures='lawX', avg_time_of_stay=15,
tables=[dict(name='yellow',capacity=3)],
dishes=[dict(name='pizza',price=4.5,ingredients='tomato,mozzarella')],
working_days=[dict(day='monday',work_shifts=[["00:01", "23:59"]])]
),
dict(owner_id=1, name=['a'], lat=43.4702169, lon=11.152609, phone='333333',
cuisine_type=["italian", "chinese"], prec_measures='lawX', avg_time_of_stay=15,
tables=[dict(name='yellow',capacity=3)],
dishes=[dict(name='pizza',price=4.5,ingredients='tomato,mozzarella')],
working_days=[dict(day='monday',work_shifts=[["00:01", "23:59"]])]
),
# lat
dict(owner_id=1, name='Restaurant 1', lon=11.152609, phone='333333',
cuisine_type=["italian", "chinese"], prec_measures='lawX', avg_time_of_stay=15,
tables=[dict(name='yellow',capacity=3)],
dishes=[dict(name='pizza',price=4.5,ingredients='tomato,mozzarella')],
working_days=[dict(day='monday',work_shifts=[["00:01", "23:59"]])]
),
dict(owner_id=1, name='Restaurant 1', lat='a', lon=11.152609, phone='333333',
cuisine_type=["italian", "chinese"], prec_measures='lawX', avg_time_of_stay=15,
tables=[dict(name='yellow',capacity=3)],
dishes=[dict(name='pizza',price=4.5,ingredients='tomato,mozzarella')],
working_days=[dict(day='monday',work_shifts=[["00:01", "23:59"]])]
),
dict(owner_id=1, name='Restaurant 1', lat=None, lon=11.152609, phone='333333',
cuisine_type=["italian", "chinese"], prec_measures='lawX', avg_time_of_stay=15,
tables=[dict(name='yellow',capacity=3)],
dishes=[dict(name='pizza',price=4.5,ingredients='tomato,mozzarella')],
working_days=[dict(day='monday',work_shifts=[["00:01", "23:59"]])]
),
dict(owner_id=1, name='Restaurant 1', lat=[], lon=11.152609, phone='333333',
cuisine_type=["italian", "chinese"], prec_measures='lawX', avg_time_of_stay=15,
tables=[dict(name='yellow',capacity=3)],
dishes=[dict(name='pizza',price=4.5,ingredients='tomato,mozzarella')],
working_days=[dict(day='monday',work_shifts=[["00:01", "23:59"]])]
),
dict(owner_id=1, name='Restaurant 1', lat=['a'], lon=11.152609, phone='333333',
cuisine_type=["italian", "chinese"], prec_measures='lawX', avg_time_of_stay=15,
tables=[dict(name='yellow',capacity=3)],
dishes=[dict(name='pizza',price=4.5,ingredients='tomato,mozzarella')],
working_days=[dict(day='monday',work_shifts=[["00:01", "23:59"]])]
),
# lon
dict(owner_id=1, name='Restaurant 1', lat=43.4702169, phone='333333',
cuisine_type=["italian", "chinese"], prec_measures='lawX', avg_time_of_stay=15,
tables=[dict(name='yellow',capacity=3)],
dishes=[dict(name='pizza',price=4.5,ingredients='tomato,mozzarella')],
working_days=[dict(day='monday',work_shifts=[["00:01", "23:59"]])]
),
dict(owner_id=1, name='Restaurant 1', lat=43.4702169, lon=None, phone='333333',
cuisine_type=["italian", "chinese"], prec_measures='lawX', avg_time_of_stay=15,
tables=[dict(name='yellow',capacity=3)],
dishes=[dict(name='pizza',price=4.5,ingredients='tomato,mozzarella')],
working_days=[dict(day='monday',work_shifts=[["00:01", "23:59"]])]
),
dict(owner_id=1, name='Restaurant 1', lat=43.4702169, lon='a', phone='333333',
cuisine_type=["italian", "chinese"], prec_measures='lawX', avg_time_of_stay=15,
tables=[dict(name='yellow',capacity=3)],
dishes=[dict(name='pizza',price=4.5,ingredients='tomato,mozzarella')],
working_days=[dict(day='monday',work_shifts=[["00:01", "23:59"]])]
),
dict(owner_id=1, name='Restaurant 1', lat=43.4702169, lon=[], phone='333333',
cuisine_type=["italian", "chinese"], prec_measures='lawX', avg_time_of_stay=15,
tables=[dict(name='yellow',capacity=3)],
dishes=[dict(name='pizza',price=4.5,ingredients='tomato,mozzarella')],
working_days=[dict(day='monday',work_shifts=[["00:01", "23:59"]])]
),
dict(owner_id=1, name='Restaurant 1', lat=43.4702169, lon=['a'], phone='333333',
cuisine_type=["italian", "chinese"], prec_measures='lawX', avg_time_of_stay=15,
tables=[dict(name='yellow',capacity=3)],
dishes=[dict(name='pizza',price=4.5,ingredients='tomato,mozzarella')],
working_days=[dict(day='monday',work_shifts=[["00:01", "23:59"]])]
),
# phone
dict(owner_id=1, name='Restaurant 1', lat=43.4702169, lon=11.152609,
cuisine_type=["italian", "chinese"], prec_measures='lawX', avg_time_of_stay=15,
tables=[dict(name='yellow',capacity=3)],
dishes=[dict(name='pizza',price=4.5,ingredients='tomato,mozzarella')],
working_days=[dict(day='monday',work_shifts=[["00:01", "23:59"]])]
),
dict(owner_id=1, name='Restaurant 1', lat=43.4702169, lon=11.152609, phone=None,
cuisine_type=["italian", "chinese"], prec_measures='lawX', avg_time_of_stay=15,
tables=[dict(name='yellow',capacity=3)],
dishes=[dict(name='pizza',price=4.5,ingredients='tomato,mozzarella')],
working_days=[dict(day='monday',work_shifts=[["00:01", "23:59"]])]
),
dict(owner_id=1, name='Restaurant 1', lat=43.4702169, lon=11.152609, phone='',
cuisine_type=["italian", "chinese"], prec_measures='lawX', avg_time_of_stay=15,
tables=[dict(name='yellow',capacity=3)],
dishes=[dict(name='pizza',price=4.5,ingredients='tomato,mozzarella')],
working_days=[dict(day='monday',work_shifts=[["00:01", "23:59"]])]
),
dict(owner_id=1, name='Restaurant 1', lat=43.4702169, lon=11.152609, phone=3,
cuisine_type=["italian", "chinese"], prec_measures='lawX', avg_time_of_stay=15,
tables=[dict(name='yellow',capacity=3)],
dishes=[dict(name='pizza',price=4.5,ingredients='tomato,mozzarella')],
working_days=[dict(day='monday',work_shifts=[["00:01", "23:59"]])]
),
dict(owner_id=1, name='Restaurant 1', lat=43.4702169, lon=11.152609, phone=['a'],
cuisine_type=["italian", "chinese"], prec_measures='lawX', avg_time_of_stay=15,
tables=[dict(name='yellow',capacity=3)],
dishes=[dict(name='pizza',price=4.5,ingredients='tomato,mozzarella')],
working_days=[dict(day='monday',work_shifts=[["00:01", "23:59"]])]
),
dict(owner_id=1, name='Restaurant 1', lat=43.4702169, lon=11.152609, phone=[],
cuisine_type=["italian", "chinese"], prec_measures='lawX', avg_time_of_stay=15,
tables=[dict(name='yellow',capacity=3)],
dishes=[dict(name='pizza',price=4.5,ingredients='tomato,mozzarella')],
working_days=[dict(day='monday',work_shifts=[["00:01", "23:59"]])]
),
# cuisine_type
dict(owner_id=1, name='Restaurant 1', lat=43.4702169, lon=11.152609, phone='333333',
prec_measures='lawX', avg_time_of_stay=15,
tables=[dict(name='yellow',capacity=3)],
dishes=[dict(name='pizza',price=4.5,ingredients='tomato,mozzarella')],
working_days=[dict(day='monday',work_shifts=[["00:01", "23:59"]])]
),
dict(owner_id=1, name='Restaurant 1', lat=43.4702169, lon=11.152609, phone='333333',
cuisine_type=None, prec_measures='lawX', avg_time_of_stay=15,
tables=[dict(name='yellow',capacity=3)],
dishes=[dict(name='pizza',price=4.5,ingredients='tomato,mozzarella')],
working_days=[dict(day='monday',work_shifts=[["00:01", "23:59"]])]
),
dict(owner_id=1, name='Restaurant 1', lat=43.4702169, lon=11.152609, phone='333333',
cuisine_type=[], prec_measures='lawX', avg_time_of_stay=15,
tables=[dict(name='yellow',capacity=3)],
dishes=[dict(name='pizza',price=4.5,ingredients='tomato,mozzarella')],
working_days=[dict(day='monday',work_shifts=[["00:01", "23:59"]])]
),
dict(owner_id=1, name='Restaurant 1', lat=43.4702169, lon=11.152609, phone='333333',
cuisine_type="italian", prec_measures='lawX', avg_time_of_stay=15,
tables=[dict(name='yellow',capacity=3)],
dishes=[dict(name='pizza',price=4.5,ingredients='tomato,mozzarella')],
working_days=[dict(day='monday',work_shifts=[["00:01", "23:59"]])]
),
dict(owner_id=1, name='Restaurant 1', lat=43.4702169, lon=11.152609, phone='333333',
cuisine_type=["italiannnnn"], prec_measures='lawX', avg_time_of_stay=15,
tables=[dict(name='yellow',capacity=3)],
dishes=[dict(name='pizza',price=4.5,ingredients='tomato,mozzarella')],
working_days=[dict(day='monday',work_shifts=[["00:01", "23:59"]])]
),
dict(owner_id=1, name='Restaurant 1', lat=43.4702169, lon=11.152609, phone='333333',
cuisine_type=2, prec_measures='lawX', avg_time_of_stay=15,
tables=[dict(name='yellow',capacity=3)],
dishes=[dict(name='pizza',price=4.5,ingredients='tomato,mozzarella')],
working_days=[dict(day='monday',work_shifts=[["00:01", "23:59"]])]
),
# prec_measures
dict(owner_id=1, name='Restaurant 1', lat=43.4702169, lon=11.152609, phone='333333',
cuisine_type=["italian", "chinese"], avg_time_of_stay=15,
tables=[dict(name='yellow',capacity=3)],
dishes=[dict(name='pizza',price=4.5,ingredients='tomato,mozzarella')],
working_days=[dict(day='monday',work_shifts=[["00:01", "23:59"]])]
),
dict(owner_id=1, name='Restaurant 1', lat=43.4702169, lon=11.152609, phone='333333',
cuisine_type=["italian", "chinese"], prec_measures=None, avg_time_of_stay=15,
tables=[dict(name='yellow',capacity=3)],
dishes=[dict(name='pizza',price=4.5,ingredients='tomato,mozzarella')],
working_days=[dict(day='monday',work_shifts=[["00:01", "23:59"]])]
),
dict(owner_id=1, name='Restaurant 1', lat=43.4702169, lon=11.152609, phone='333333',
cuisine_type=["italian", "chinese"], prec_measures=2, avg_time_of_stay=15,
tables=[dict(name='yellow',capacity=3)],
dishes=[dict(name='pizza',price=4.5,ingredients='tomato,mozzarella')],
working_days=[dict(day='monday',work_shifts=[["00:01", "23:59"]])]
),
dict(owner_id=1, name='Restaurant 1', lat=43.4702169, lon=11.152609, phone='333333',
cuisine_type=["italian", "chinese"], prec_measures=[], avg_time_of_stay=15,
tables=[dict(name='yellow',capacity=3)],
dishes=[dict(name='pizza',price=4.5,ingredients='tomato,mozzarella')],
working_days=[dict(day='monday',work_shifts=[["00:01", "23:59"]])]
),
dict(owner_id=1, name='Restaurant 1', lat=43.4702169, lon=11.152609, phone='333333',
cuisine_type=["italian", "chinese"], prec_measures=['a'], avg_time_of_stay=15,
tables=[dict(name='yellow',capacity=3)],
dishes=[dict(name='pizza',price=4.5,ingredients='tomato,mozzarella')],
working_days=[dict(day='monday',work_shifts=[["00:01", "23:59"]])]
),
# avg_time_of_stay
dict(owner_id=1, name='Restaurant 1', lat=43.4702169, lon=11.152609, phone='333333',
cuisine_type=["italian", "chinese"], prec_measures='lawX',
tables=[dict(name='yellow',capacity=3)],
dishes=[dict(name='pizza',price=4.5,ingredients='tomato,mozzarella')],
working_days=[dict(day='monday',work_shifts=[["00:01", "23:59"]])]
),
dict(owner_id=1, name='Restaurant 1', lat=43.4702169, lon=11.152609, phone='333333',
cuisine_type=["italian", "chinese"], prec_measures='lawX', avg_time_of_stay=14,
tables=[dict(name='yellow',capacity=3)],
dishes=[dict(name='pizza',price=4.5,ingredients='tomato,mozzarella')],
working_days=[dict(day='monday',work_shifts=[["00:01", "23:59"]])]
),
dict(owner_id=1, name='Restaurant 1', lat=43.4702169, lon=11.152609, phone='333333',
cuisine_type=["italian", "chinese"], prec_measures='lawX', avg_time_of_stay=-1,
tables=[dict(name='yellow',capacity=3)],
dishes=[dict(name='pizza',price=4.5,ingredients='tomato,mozzarella')],
working_days=[dict(day='monday',work_shifts=[["00:01", "23:59"]])]
),
dict(owner_id=1, name='Restaurant 1', lat=43.4702169, lon=11.152609, phone='333333',
cuisine_type=["italian", "chinese"], prec_measures='lawX', avg_time_of_stay=0,
tables=[dict(name='yellow',capacity=3)],
dishes=[dict(name='pizza',price=4.5,ingredients='tomato,mozzarella')],
working_days=[dict(day='monday',work_shifts=[["00:01", "23:59"]])]
),
dict(owner_id=1, name='Restaurant 1', lat=43.4702169, lon=11.152609, phone='333333',
cuisine_type=["italian", "chinese"], prec_measures='lawX', avg_time_of_stay=None,
tables=[dict(name='yellow',capacity=3)],
dishes=[dict(name='pizza',price=4.5,ingredients='tomato,mozzarella')],
working_days=[dict(day='monday',work_shifts=[["00:01", "23:59"]])]
),
dict(owner_id=1, name='Restaurant 1', lat=43.4702169, lon=11.152609, phone='333333',
cuisine_type=["italian", "chinese"], prec_measures='lawX', avg_time_of_stay='a',
tables=[dict(name='yellow',capacity=3)],
dishes=[dict(name='pizza',price=4.5,ingredients='tomato,mozzarella')],
working_days=[dict(day='monday',work_shifts=[["00:01", "23:59"]])]
),
dict(owner_id=1, name='Restaurant 1', lat=43.4702169, lon=11.152609, phone='333333',
cuisine_type=["italian", "chinese"], prec_measures='lawX', avg_time_of_stay=['a'],
tables=[dict(name='yellow',capacity=3)],
dishes=[dict(name='pizza',price=4.5,ingredients='tomato,mozzarella')],
working_days=[dict(day='monday',work_shifts=[["00:01", "23:59"]])]
),
dict(owner_id=1, name='Restaurant 1', lat=43.4702169, lon=11.152609, phone='333333',
cuisine_type=["italian", "chinese"], prec_measures='lawX', avg_time_of_stay=[],
tables=[dict(name='yellow',capacity=3)],
dishes=[dict(name='pizza',price=4.5,ingredients='tomato,mozzarella')],
working_days=[dict(day='monday',work_shifts=[["00:01", "23:59"]])]
),
# incorrect tables fields
dict(owner_id=1, name='Restaurant 1', lat=43.4702169, lon=11.152609, phone='333333',
cuisine_type=["italian", "chinese"], prec_measures='lawX', avg_time_of_stay=15,
dishes=[dict(name='pizza',price=4.5,ingredients='tomato,mozzarella')],
working_days=[dict(day='monday',work_shifts=[["00:01", "23:59"]])]
),
dict(owner_id=1, name='Restaurant 1', lat=43.4702169, lon=11.152609, phone='333333',
cuisine_type=["italian", "chinese"], prec_measures='lawX', avg_time_of_stay=15,
tables=[dict()],
dishes=[dict(name='pizza',price=4.5,ingredients='tomato,mozzarella')],
working_days=[dict(day='monday',work_shifts=[["00:01", "23:59"]])]
),
dict(owner_id=1, name='Restaurant 1', lat=43.4702169, lon=11.152609, phone='333333',
cuisine_type=["italian", "chinese"], prec_measures='lawX', avg_time_of_stay=15,
tables=[],
dishes=[dict(name='pizza',price=4.5,ingredients='tomato,mozzarella')],
working_days=[dict(day='monday',work_shifts=[["00:01", "23:59"]])]
),
dict(owner_id=1, name='Restaurant 1', lat=43.4702169, lon=11.152609, phone='333333',
cuisine_type=["italian", "chinese"], prec_measures='lawX', avg_time_of_stay=15,
tables=['yellow',3],
dishes=[dict(name='pizza',price=4.5,ingredients='tomato,mozzarella')],
working_days=[dict(day='monday',work_shifts=[["00:01", "23:59"]])]
),
dict(owner_id=1, name='Restaurant 1', lat=43.4702169, lon=11.152609, phone='333333',
cuisine_type=["italian", "chinese"], prec_measures='lawX', avg_time_of_stay=15,
tables=[dict(capacity=3)],
dishes=[dict(name='pizza',price=4.5,ingredients='tomato,mozzarella')],
working_days=[dict(day='monday',work_shifts=[["00:01", "23:59"]])]
),
dict(owner_id=1, name='Restaurant 1', lat=43.4702169, lon=11.152609, phone='333333',
cuisine_type=["italian", "chinese"], prec_measures='lawX', avg_time_of_stay=15,
tables=[dict(name='yellow')],
dishes=[dict(name='pizza',price=4.5,ingredients='tomato,mozzarella')],
working_days=[dict(day='monday',work_shifts=[["00:01", "23:59"]])]
),
dict(owner_id=1, name='Restaurant 1', lat=43.4702169, lon=11.152609, phone='333333',
cuisine_type=["italian", "chinese"], prec_measures='lawX', avg_time_of_stay=15,
tables=[dict(name=None,capacity=3)],
dishes=[dict(name='pizza',price=4.5,ingredients='tomato,mozzarella')],
working_days=[dict(day='monday',work_shifts=[["00:01", "23:59"]])]
),
dict(owner_id=1, name='Restaurant 1', lat=43.4702169, lon=11.152609, phone='333333',
cuisine_type=["italian", "chinese"], prec_measures='lawX', avg_time_of_stay=15,
tables=[dict(name='',capacity=3)],
dishes=[dict(name='pizza',price=4.5,ingredients='tomato,mozzarella')],
working_days=[dict(day='monday',work_shifts=[["00:01", "23:59"]])]
),
dict(owner_id=1, name='Restaurant 1', lat=43.4702169, lon=11.152609, phone='333333',
cuisine_type=["italian", "chinese"], prec_measures='lawX', avg_time_of_stay=15,
tables=[dict(name=2,capacity=3)],
dishes=[dict(name='pizza',price=4.5,ingredients='tomato,mozzarella')],
working_days=[dict(day='monday',work_shifts=[["00:01", "23:59"]])]
),
dict(owner_id=1, name='Restaurant 1', lat=43.4702169, lon=11.152609, phone='333333',
cuisine_type=["italian", "chinese"], prec_measures='lawX', avg_time_of_stay=15,
tables=[dict(name=[],capacity=3)],
dishes=[dict(name='pizza',price=4.5,ingredients='tomato,mozzarella')],
working_days=[dict(day='monday',work_shifts=[["00:01", "23:59"]])]
),
dict(owner_id=1, name='Restaurant 1', lat=43.4702169, lon=11.152609, phone='333333',
cuisine_type=["italian", "chinese"], prec_measures='lawX', avg_time_of_stay=15,
tables=[dict(name=['a'],capacity=3)],
dishes=[dict(name='pizza',price=4.5,ingredients='tomato,mozzarella')],
working_days=[dict(day='monday',work_shifts=[["00:01", "23:59"]])]
),
dict(owner_id=1, name='Restaurant 1', lat=43.4702169, lon=11.152609, phone='333333',
cuisine_type=["italian", "chinese"], prec_measures='lawX', avg_time_of_stay=15,
tables=[dict(name='yellow',capacity=None)],
dishes=[dict(name='pizza',price=4.5,ingredients='tomato,mozzarella')],
working_days=[dict(day='monday',work_shifts=[["00:01", "23:59"]])]
),
dict(owner_id=1, name='Restaurant 1', lat=43.4702169, lon=11.152609, phone='333333',
cuisine_type=["italian", "chinese"], prec_measures='lawX', avg_time_of_stay=15,
tables=[dict(name='yellow',capacity=0)],
dishes=[dict(name='pizza',price=4.5,ingredients='tomato,mozzarella')],
working_days=[dict(day='monday',work_shifts=[["00:01", "23:59"]])]
),
dict(owner_id=1, name='Restaurant 1', lat=43.4702169, lon=11.152609, phone='333333',
cuisine_type=["italian", "chinese"], prec_measures='lawX', avg_time_of_stay=15,
tables=[dict(name='yellow',capacity=-1)],
dishes=[dict(name='pizza',price=4.5,ingredients='tomato,mozzarella')],
working_days=[dict(day='monday',work_shifts=[["00:01", "23:59"]])]
),
dict(owner_id=1, name='Restaurant 1', lat=43.4702169, lon=11.152609, phone='333333',
cuisine_type=["italian", "chinese"], prec_measures='lawX', avg_time_of_stay=15,
tables=[dict(name='yellow',capacity=['a'])],
dishes=[dict(name='pizza',price=4.5,ingredients='tomato,mozzarella')],
working_days=[dict(day='monday',work_shifts=[["00:01", "23:59"]])]
),
dict(owner_id=1, name='Restaurant 1', lat=43.4702169, lon=11.152609, phone='333333',
cuisine_type=["italian", "chinese"], prec_measures='lawX', avg_time_of_stay=15,
tables=[dict(name='yellow',capacity='a')],
dishes=[dict(name='pizza',price=4.5,ingredients='tomato,mozzarella')],
working_days=[dict(day='monday',work_shifts=[["00:01", "23:59"]])]
),
dict(owner_id=1, name='Restaurant 1', lat=43.4702169, lon=11.152609, phone='333333',
cuisine_type=["italian", "chinese"], prec_measures='lawX', avg_time_of_stay=15,
tables=[dict(name='yellow',capacity=[])],
dishes=[dict(name='pizza',price=4.5,ingredients='tomato,mozzarella')],
working_days=[dict(day='monday',work_shifts=[["00:01", "23:59"]])]
),
dict(owner_id=1, name='Restaurant 1', lat=43.4702169, lon=11.152609, phone='333333',
cuisine_type=["italian", "chinese"], prec_measures='lawX', avg_time_of_stay=15,
tables=[dict(restaurant_id=2,name='yellow',capacity=3)],
dishes=[dict(name='pizza',price=4.5,ingredients='tomato,mozzarella')],
working_days=[dict(day='monday',work_shifts=[["00:01", "23:59"]])]
),
dict(owner_id=1, name='Restaurant 1', lat=43.4702169, lon=11.152609, phone='333333',
cuisine_type=["italian", "chinese"], prec_measures='lawX', avg_time_of_stay=15,
tables=[dict(id=3,name='yellow',capacity=3)],
dishes=[dict(name='pizza',price=4.5,ingredients='tomato,mozzarella')],
working_days=[dict(day='monday',work_shifts=[["00:01", "23:59"]])]
),
# incorrect dishes fields
dict(owner_id=1, name='Restaurant 1', lat=43.4702169, lon=11.152609, phone='333333',
cuisine_type=["italian", "chinese"], prec_measures='lawX', avg_time_of_stay=15,
tables=[dict(name='yellow',capacity=3)],
working_days=[dict(day='monday',work_shifts=[["00:01", "23:59"]])]
),
dict(owner_id=1, name='Restaurant 1', lat=43.4702169, lon=11.152609, phone='333333',
cuisine_type=["italian", "chinese"], prec_measures='lawX', avg_time_of_stay=15,
tables=[dict(name='yellow',capacity=3)],
dishes=[],
working_days=[dict(day='monday',work_shifts=[["00:01", "23:59"]])]
),
dict(owner_id=1, name='Restaurant 1', lat=43.4702169, lon=11.152609, phone='333333',
cuisine_type=["italian", "chinese"], prec_measures='lawX', avg_time_of_stay=15,
tables=[dict(name='yellow',capacity=3)],
dishes=[dict()],
working_days=[dict(day='monday',work_shifts=[["00:01", "23:59"]])]
),
dict(owner_id=1, name='Restaurant 1', lat=43.4702169, lon=11.152609, phone='333333',
cuisine_type=["italian", "chinese"], prec_measures='lawX', avg_time_of_stay=15,
tables=[dict(name='yellow',capacity=3)],
dishes=['pizza',4.5,'tomato,mozzarella'],
working_days=[dict(day='monday',work_shifts=[["00:01", "23:59"]])]
),
dict(owner_id=1, name='Restaurant 1', lat=43.4702169, lon=11.152609, phone='333333',
cuisine_type=["italian", "chinese"], prec_measures='lawX', avg_time_of_stay=15,
tables=[dict(name='yellow',capacity=3)],
dishes=[dict(price=4.5,ingredients='tomato,mozzarella')],
working_days=[dict(day='monday',work_shifts=[["00:01", "23:59"]])]
),
dict(owner_id=1, name='Restaurant 1', lat=43.4702169, lon=11.152609, phone='333333',
cuisine_type=["italian", "chinese"], prec_measures='lawX', avg_time_of_stay=15,
tables=[dict(name='yellow',capacity=3)],
dishes=[dict(name='pizza',ingredients='tomato,mozzarella')],
working_days=[dict(day='monday',work_shifts=[["00:01", "23:59"]])]
),
dict(owner_id=1, name='Restaurant 1', lat=43.4702169, lon=11.152609, phone='333333',
cuisine_type=["italian", "chinese"], prec_measures='lawX', avg_time_of_stay=15,
tables=[dict(name='yellow',capacity=3)],
dishes=[dict(name='pizza',price=4.5)],
working_days=[dict(day='monday',work_shifts=[["00:01", "23:59"]])]
),
dict(owner_id=1, name='Restaurant 1', lat=43.4702169, lon=11.152609, phone='333333',
cuisine_type=["italian", "chinese"], prec_measures='lawX', avg_time_of_stay=15,
tables=[dict(name='yellow',capacity=3)],
dishes=[dict(name='',price=4.5,ingredients='tomato,mozzarella')],
working_days=[dict(day='monday',work_shifts=[["00:01", "23:59"]])]
),
dict(owner_id=1, name='Restaurant 1', lat=43.4702169, lon=11.152609, phone='333333',
cuisine_type=["italian", "chinese"], prec_measures='lawX', avg_time_of_stay=15,
tables=[dict(name='yellow',capacity=3)],
dishes=[dict(name=None,price=4.5,ingredients='tomato,mozzarella')],
working_days=[dict(day='monday',work_shifts=[["00:01", "23:59"]])]
),
dict(owner_id=1, name='Restaurant 1', lat=43.4702169, lon=11.152609, phone='333333',
cuisine_type=["italian", "chinese"], prec_measures='lawX', avg_time_of_stay=15,
tables=[dict(name='yellow',capacity=3)],
dishes=[dict(name=2,price=4.5,ingredients='tomato,mozzarella')],
working_days=[dict(day='monday',work_shifts=[["00:01", "23:59"]])]
),
dict(owner_id=1, name='Restaurant 1', lat=43.4702169, lon=11.152609, phone='333333',
cuisine_type=["italian", "chinese"], prec_measures='lawX', avg_time_of_stay=15,
tables=[dict(name='yellow',capacity=3)],
dishes=[dict(name=[],price=4.5,ingredients='tomato,mozzarella')],
working_days=[dict(day='monday',work_shifts=[["00:01", "23:59"]])]
),
dict(owner_id=1, name='Restaurant 1', lat=43.4702169, lon=11.152609, phone='333333',
cuisine_type=["italian", "chinese"], prec_measures='lawX', avg_time_of_stay=15,
tables=[dict(name='yellow',capacity=3)],
dishes=[dict(name=['a'],price=4.5,ingredients='tomato,mozzarella')],
working_days=[dict(day='monday',work_shifts=[["00:01", "23:59"]])]
),
dict(owner_id=1, name='Restaurant 1', lat=43.4702169, lon=11.152609, phone='333333',
cuisine_type=["italian", "chinese"], prec_measures='lawX', avg_time_of_stay=15,
tables=[dict(name='yellow',capacity=3)],
dishes=[dict(name='pizza',price=0,ingredients='tomato,mozzarella')],
working_days=[dict(day='monday',work_shifts=[["00:01", "23:59"]])]
),
dict(owner_id=1, name='Restaurant 1', lat=43.4702169, lon=11.152609, phone='333333',
cuisine_type=["italian", "chinese"], prec_measures='lawX', avg_time_of_stay=15,
tables=[dict(name='yellow',capacity=3)],
dishes=[dict(name='pizza',price=-1,ingredients='tomato,mozzarella')],
working_days=[dict(day='monday',work_shifts=[["00:01", "23:59"]])]
),
dict(owner_id=1, name='Restaurant 1', lat=43.4702169, lon=11.152609, phone='333333',
cuisine_type=["italian", "chinese"], prec_measures='lawX', avg_time_of_stay=15,
tables=[dict(name='yellow',capacity=3)],
dishes=[dict(name='pizza',price=None,ingredients='tomato,mozzarella')],
working_days=[dict(day='monday',work_shifts=[["00:01", "23:59"]])]
),
dict(owner_id=1, name='Restaurant 1', lat=43.4702169, lon=11.152609, phone='333333',
cuisine_type=["italian", "chinese"], prec_measures='lawX', avg_time_of_stay=15,
tables=[dict(name='yellow',capacity=3)],
dishes=[dict(name='pizza',price='a',ingredients='tomato,mozzarella')],
working_days=[dict(day='monday',work_shifts=[["00:01", "23:59"]])]
),
dict(owner_id=1, name='Restaurant 1', lat=43.4702169, lon=11.152609, phone='333333',
cuisine_type=["italian", "chinese"], prec_measures='lawX', avg_time_of_stay=15,
tables=[dict(name='yellow',capacity=3)],
dishes=[dict(name='pizza',price=[],ingredients='tomato,mozzarella')],
working_days=[dict(day='monday',work_shifts=[["00:01", "23:59"]])]
),
dict(owner_id=1, name='Restaurant 1', lat=43.4702169, lon=11.152609, phone='333333',
cuisine_type=["italian", "chinese"], prec_measures='lawX', avg_time_of_stay=15,
tables=[dict(name='yellow',capacity=3)],
dishes=[dict(name='pizza',price=['a'],ingredients='tomato,mozzarella')],
working_days=[dict(day='monday',work_shifts=[["00:01", "23:59"]])]
),
dict(owner_id=1, name='Restaurant 1', lat=43.4702169, lon=11.152609, phone='333333',
cuisine_type=["italian", "chinese"], prec_measures='lawX', avg_time_of_stay=15,
tables=[dict(name='yellow',capacity=3)],
dishes=[dict(name='pizza',price=4.5,ingredients='')],
working_days=[dict(day='monday',work_shifts=[["00:01", "23:59"]])]
),
dict(owner_id=1, name='Restaurant 1', lat=43.4702169, lon=11.152609, phone='333333',
cuisine_type=["italian", "chinese"], prec_measures='lawX', avg_time_of_stay=15,
tables=[dict(name='yellow',capacity=3)],
dishes=[dict(name='pizza',price=4.5,ingredients=None)],
working_days=[dict(day='monday',work_shifts=[["00:01", "23:59"]])]
),
dict(owner_id=1, name='Restaurant 1', lat=43.4702169, lon=11.152609, phone='333333',
cuisine_type=["italian", "chinese"], prec_measures='lawX', avg_time_of_stay=15,
tables=[dict(name='yellow',capacity=3)],
dishes=[dict(name='pizza',price=4.5,ingredients=3)],
working_days=[dict(day='monday',work_shifts=[["00:01", "23:59"]])]
),
dict(owner_id=1, name='Restaurant 1', lat=43.4702169, lon=11.152609, phone='333333',
cuisine_type=["italian", "chinese"], prec_measures='lawX', avg_time_of_stay=15,
tables=[dict(name='yellow',capacity=3)],
dishes=[dict(name='pizza',price=4.5,ingredients=[])],
working_days=[dict(day='monday',work_shifts=[["00:01", "23:59"]])]
),
dict(owner_id=1, name='Restaurant 1', lat=43.4702169, lon=11.152609, phone='333333',
cuisine_type=["italian", "chinese"], prec_measures='lawX', avg_time_of_stay=15,
tables=[dict(name='yellow',capacity=3)],
dishes=[dict(name='pizza',price=4.5,ingredients=['a'])],
working_days=[dict(day='monday',work_shifts=[["00:01", "23:59"]])]
),
dict(owner_id=1, name='Restaurant 1', lat=43.4702169, lon=11.152609, phone='333333',
cuisine_type=["italian", "chinese"], prec_measures='lawX', avg_time_of_stay=15,
tables=[dict(name='yellow',capacity=3)],
dishes=[dict(id=3,name='pizza',price=4.5,ingredients='tomato,mozzarella')],
working_days=[dict(day='monday',work_shifts=[["00:01", "23:59"]])]
),
dict(owner_id=1, name='Restaurant 1', lat=43.4702169, lon=11.152609, phone='333333',
cuisine_type=["italian", "chinese"], prec_measures='lawX', avg_time_of_stay=15,
tables=[dict(name='yellow',capacity=3)],
dishes=[dict(restaurant_id=2,name='pizza',price=4.5,ingredients='tomato,mozzarella')],
working_days=[dict(day='monday',work_shifts=[["00:01", "23:59"]])]
),
# incorrect working days fields
dict(owner_id=1, name='Restaurant 1', lat=43.4702169, lon=11.152609, phone='333333',
cuisine_type=["italian", "chinese"], prec_measures='lawX', avg_time_of_stay=15,
tables=[dict(name='yellow',capacity=3)],
dishes=[dict(name='pizza',price=4.5,ingredients='tomato,mozzarella')]
),
dict(owner_id=1, name='Restaurant 1', lat=43.4702169, lon=11.152609, phone='333333',
cuisine_type=["italian", "chinese"], prec_measures='lawX', avg_time_of_stay=15,
tables=[dict(name='yellow',capacity=3)],
dishes=[dict(name='pizza',price=4.5,ingredients='tomato,mozzarella')],
working_days=[]
),
dict(owner_id=1, name='Restaurant 1', lat=43.4702169, lon=11.152609, phone='333333',
cuisine_type=["italian", "chinese"], prec_measures='lawX', avg_time_of_stay=15,
tables=[dict(name='yellow',capacity=3)],
dishes=[dict(name='pizza',price=4.5,ingredients='tomato,mozzarella')],
working_days=None
),
dict(owner_id=1, name='Restaurant 1', lat=43.4702169, lon=11.152609, phone='333333',
cuisine_type=["italian", "chinese"], prec_measures='lawX', avg_time_of_stay=15,
tables=[dict(name='yellow',capacity=3)],
dishes=[dict(name='pizza',price=4.5,ingredients='tomato,mozzarella')],
working_days=[dict()]
),
dict(owner_id=1, name='Restaurant 1', lat=43.4702169, lon=11.152609, phone='333333',
cuisine_type=["italian", "chinese"], prec_measures='lawX', avg_time_of_stay=15,
tables=[dict(name='yellow',capacity=3)],
dishes=[dict(name='pizza',price=4.5,ingredients='tomato,mozzarella')],
working_days=[dict(work_shifts=[["00:01", "23:59"]])]
),
dict(owner_id=1, name='Restaurant 1', lat=43.4702169, lon=11.152609, phone='333333',
cuisine_type=["italian", "chinese"], prec_measures='lawX', avg_time_of_stay=15,
tables=[dict(name='yellow',capacity=3)],
dishes=[dict(name='pizza',price=4.5,ingredients='tomato,mozzarella')],
working_days=[dict(day='monday')]
),
dict(owner_id=1, name='Restaurant 1', lat=43.4702169, lon=11.152609, phone='333333',
cuisine_type=["italian", "chinese"], prec_measures='lawX', avg_time_of_stay=15,
tables=[dict(name='yellow',capacity=3)],
dishes=[dict(name='pizza',price=4.5,ingredients='tomato,mozzarella')],
working_days=[dict(day=None,work_shifts=[["00:01", "23:59"]])]
),
dict(owner_id=1, name='Restaurant 1', lat=43.4702169, lon=11.152609, phone='333333',
cuisine_type=["italian", "chinese"], prec_measures='lawX', avg_time_of_stay=15,
tables=[dict(name='yellow',capacity=3)],
dishes=[dict(name='pizza',price=4.5,ingredients='tomato,mozzarella')],
working_days=[dict(day='',work_shifts=[["00:01", "23:59"]])]
),
dict(owner_id=1, name='Restaurant 1', lat=43.4702169, lon=11.152609, phone='333333',
cuisine_type=["italian", "chinese"], prec_measures='lawX', avg_time_of_stay=15,
tables=[dict(name='yellow',capacity=3)],
dishes=[dict(name='pizza',price=4.5,ingredients='tomato,mozzarella')],
working_days=[dict(day=3,work_shifts=[["00:01", "23:59"]])]
),
dict(owner_id=1, name='Restaurant 1', lat=43.4702169, lon=11.152609, phone='333333',
cuisine_type=["italian", "chinese"], prec_measures='lawX', avg_time_of_stay=15,
tables=[dict(name='yellow',capacity=3)],
dishes=[dict(name='pizza',price=4.5,ingredients='tomato,mozzarella')],
working_days=[dict(day=['a'],work_shifts=[["00:01", "23:59"]])]
),
dict(owner_id=1, name='Restaurant 1', lat=43.4702169, lon=11.152609, phone='333333',
cuisine_type=["italian", "chinese"], prec_measures='lawX', avg_time_of_stay=15,
tables=[dict(name='yellow',capacity=3)],
dishes=[dict(name='pizza',price=4.5,ingredients='tomato,mozzarella')],
working_days=[dict(day=[],work_shifts=[["00:01", "23:59"]])]
),
dict(owner_id=1, name='Restaurant 1', lat=43.4702169, lon=11.152609, phone='333333',
cuisine_type=["italian", "chinese"], prec_measures='lawX', avg_time_of_stay=15,
tables=[dict(name='yellow',capacity=3)],
dishes=[dict(name='pizza',price=4.5,ingredients='tomato,mozzarella')],
working_days=[dict(day='monday',work_shifts=None)]
),
dict(owner_id=1, name='Restaurant 1', lat=43.4702169, lon=11.152609, phone='333333',
cuisine_type=["italian", "chinese"], prec_measures='lawX', avg_time_of_stay=15,
tables=[dict(name='yellow',capacity=3)],
dishes=[dict(name='pizza',price=4.5,ingredients='tomato,mozzarella')],
working_days=[dict(day='monday',work_shifts=[])]
),
dict(owner_id=1, name='Restaurant 1', lat=43.4702169, lon=11.152609, phone='333333',
cuisine_type=["italian", "chinese"], prec_measures='lawX', avg_time_of_stay=15,
tables=[dict(name='yellow',capacity=3)],
dishes=[dict(name='pizza',price=4.5,ingredients='tomato,mozzarella')],
working_days=[dict(day='monday',work_shifts=[[1, 2]])]
),
dict(owner_id=1, name='Restaurant 1', lat=43.4702169, lon=11.152609, phone='333333',
cuisine_type=["italian", "chinese"], prec_measures='lawX', avg_time_of_stay=15,
tables=[dict(name='yellow',capacity=3)],
dishes=[dict(name='pizza',price=4.5,ingredients='tomato,mozzarella')],
working_days=[dict(day='monday',work_shifts=[["00:01"]])]
),
dict(owner_id=1, name='Restaurant 1', lat=43.4702169, lon=11.152609, phone='333333',
cuisine_type=["italian", "chinese"], prec_measures='lawX', avg_time_of_stay=15,
tables=[dict(name='yellow',capacity=3)],
dishes=[dict(name='pizza',price=4.5,ingredients='tomato,mozzarella')],
working_days=[dict(day='monday',work_shifts=[["10:01", "12:59"],["16:01", "19:59"],["21:01", "23:59"]])]
),
dict(owner_id=1, name='Restaurant 1', lat=43.4702169, lon=11.152609, phone='333333',
cuisine_type=["italian", "chinese"], prec_measures='lawX', avg_time_of_stay=15,
tables=[dict(name='yellow',capacity=3)],
dishes=[dict(name='pizza',price=4.5,ingredients='tomato,mozzarella')],
working_days=[dict(day='monday',work_shifts=[["15:00", "15:00"]])]
),
dict(owner_id=1, name='Restaurant 1', lat=43.4702169, lon=11.152609, phone='333333',
cuisine_type=["italian", "chinese"], prec_measures='lawX', avg_time_of_stay=15,
tables=[dict(name='yellow',capacity=3)],
dishes=[dict(name='pizza',price=4.5,ingredients='tomato,mozzarella')],
working_days=[dict(day='monday',work_shifts=[["00:01 ", "23:59"]])]
),
dict(owner_id=1, name='Restaurant 1', lat=43.4702169, lon=11.152609, phone='333333',
cuisine_type=["italian", "chinese"], prec_measures='lawX', avg_time_of_stay=15,
tables=[dict(name='yellow',capacity=3)],
dishes=[dict(name='pizza',price=4.5,ingredients='tomato,mozzarella')],
working_days=[dict(day='monday',work_shifts=[["00:010", "23:59"]])]
),
dict(owner_id=1, name='Restaurant 1', lat=43.4702169, lon=11.152609, phone='333333',
cuisine_type=["italian", "chinese"], prec_measures='lawX', avg_time_of_stay=15,
tables=[dict(name='yellow',capacity=3)],
dishes=[dict(name='pizza',price=4.5,ingredients='tomato,mozzarella')],
working_days=[dict(day='monday',work_shifts=[[" 00:01", "23:59"]])]
),
dict(owner_id=1, name='Restaurant 1', lat=43.4702169, lon=11.152609, phone='333333',
cuisine_type=["italian", "chinese"], prec_measures='lawX', avg_time_of_stay=15,
tables=[dict(name='yellow',capacity=3)],
dishes=[dict(name='pizza',price=4.5,ingredients='tomato,mozzarella')],
working_days=[dict(day='monday',work_shifts=[["000:01", "23:59"]])]
),
dict(owner_id=1, name='Restaurant 1', lat=43.4702169, lon=11.152609, phone='333333',
cuisine_type=["italian", "chinese"], prec_measures='lawX', avg_time_of_stay=15,
tables=[dict(name='yellow',capacity=3)],
dishes=[dict(name='pizza',price=4.5,ingredients='tomato,mozzarella')],
working_days=[dict(day='monday',work_shifts=[["00-01", "23:59"]])]
),
dict(owner_id=1, name='Restaurant 1', lat=43.4702169, lon=11.152609, phone='333333',
cuisine_type=["italian", "chinese"], prec_measures='lawX', avg_time_of_stay=15,
tables=[dict(name='yellow',capacity=3)],
dishes=[dict(name='pizza',price=4.5,ingredients='tomato,mozzarella')],
working_days=[dict(day='monday',work_shifts=[["00/01", "23:59"]])]
),
dict(owner_id=1, name='Restaurant 1', lat=43.4702169, lon=11.152609, phone='333333',
cuisine_type=["italian", "chinese"], prec_measures='lawX', avg_time_of_stay=15,
tables=[dict(name='yellow',capacity=3)],
dishes=[dict(name='pizza',price=4.5,ingredients='tomato,mozzarella')],
working_days=[dict(day='monday',work_shifts=[["10:00", "10:00"]])]
),
dict(owner_id=1, name='Restaurant 1', lat=43.4702169, lon=11.152609, phone='333333',
cuisine_type=["italian", "chinese"], prec_measures='lawX', avg_time_of_stay=15,
tables=[dict(name='yellow',capacity=3)],
dishes=[dict(name='pizza',price=4.5,ingredients='tomato,mozzarella')],
working_days=[dict(day='monday',work_shifts=[["10:01", "10:00"]])]
),
dict(owner_id=1, name='Restaurant 1', lat=43.4702169, lon=11.152609, phone='333333',
cuisine_type=["italian", "chinese"], prec_measures='lawX', avg_time_of_stay=15,
tables=[dict(name='yellow',capacity=3)],
dishes=[dict(name='pizza',price=4.5,ingredients='tomato,mozzarella')],
working_days=[dict(day='monday',work_shifts=[["12:01", "14:59"],["14:59", "23:59"]])]
),
dict(owner_id=1, name='Restaurant 1', lat=43.4702169, lon=11.152609, phone='333333',
cuisine_type=["italian", "chinese"], prec_measures='lawX', avg_time_of_stay=15,
tables=[dict(name='yellow',capacity=3)],
dishes=[dict(name='pizza',price=4.5,ingredients='tomato,mozzarella')],
working_days=[dict(day='monday',work_shifts=[["12:01", "14:59"],["14:58", "23:59"]])]
),
dict(owner_id=1, name='Restaurant 1', lat=43.4702169, lon=11.152609, phone='333333',
cuisine_type=["italian", "chinese"], prec_measures='lawX', avg_time_of_stay=15,
tables=[dict(name='yellow',capacity=3)],
dishes=[dict(name='pizza',price=4.5,ingredients='tomato,mozzarella')],
working_days=[dict(restaurant_id=2,day='monday',work_shifts=[["00:01", "14:59"]])]
),
dict(owner_id=1, name='Restaurant 1', lat=43.4702169, lon=11.152609, phone='333333',
cuisine_type=["italian", "chinese"], prec_measures='lawX', avg_time_of_stay=15,
tables=[dict(name='yellow',capacity=3)],
dishes=[dict(name='pizza',price=4.5,ingredients='tomato,mozzarella')],
working_days=[dict(day='monday',work_shifts=[["00:01", "14:59"]]),dict(day='monday',work_shifts=[["00:01", "14:59"]])]
)
]
for r in incorrect_restaurants:
assert create_restaurant_by_API(test_client, r).status_code == 400
#check total restaurants/tables/dishes/working_days
restaurants = db_session.query(Restaurant).all()
assert len(restaurants) == len(restaurant_examples)
tables = db_session.query(Table).all()
assert len(tables) == tot_correct_tables
dishes = db_session.query(Dish).all()
assert len(dishes) == tot_correct_dishes
wds = db_session.query(WorkingDay).all()
assert len(wds) == tot_correct_wds
def test_get_restaurants(test_app):
app, test_client = test_app
# empty get
response = get_restaurants_by_API(test_client)
assert response.status_code == 200
assert response.json == []
# create some restaurants
correct_restaurants = restaurant_examples
for idx, r in enumerate(correct_restaurants):
assert create_restaurant_by_API(test_client, r).status_code == 200
# correct get
response = get_restaurants_by_API(test_client)
assert response.status_code == 200
restaurants = response.json
assert len(correct_restaurants) == len(restaurants)
correct_restaurants = sorted(correct_restaurants, key=lambda k: k['name'])
restaurants = sorted(restaurants, key=lambda k: k['name'])
for idx, r in enumerate(correct_restaurants):
_check_restaurants(restaurants[idx], r)
# bad query parameters
assert get_restaurants_by_API(test_client, 0).status_code == 400
assert get_restaurants_by_API(test_client, -1).status_code == 400
assert get_restaurants_by_API(test_client, 'a').status_code == 400
assert get_restaurants_by_API(test_client, []).status_code == 400
assert get_restaurants_by_API(test_client, ['a']).status_code == 400
assert get_restaurants_by_API(test_client, None, None, 1).status_code == 400
assert get_restaurants_by_API(test_client, None, None, 'a').status_code == 400
assert get_restaurants_by_API(test_client, None, None, []).status_code == 400
assert get_restaurants_by_API(test_client, None, None, ['a']).status_code == 400
assert get_restaurants_by_API(test_client, None, None, None, 1).status_code == 400
assert get_restaurants_by_API(test_client, None, None, None, 'a').status_code == 400
assert get_restaurants_by_API(test_client, None, None, None, []).status_code == 400
assert get_restaurants_by_API(test_client, None, None, None, ['a']).status_code == 400
assert get_restaurants_by_API(test_client, None, None, 1, 'a').status_code == 400
assert get_restaurants_by_API(test_client, None, None, 'a', 1).status_code == 400
# correct query parameters - owner id
correct_restaurants = restaurant_examples
owner_id = correct_restaurants[0]['owner_id']
response = get_restaurants_by_API(test_client, owner_id)
assert response.status_code == 200
restaurants = response.json
correct_restaurants = [p for p in correct_restaurants if p['owner_id'] == owner_id]
assert len(correct_restaurants) == len(restaurants)
correct_restaurants = sorted(correct_restaurants, key=lambda k: k['name'])
restaurants = sorted(restaurants, key=lambda k: k['name'])
for idx, r in enumerate(correct_restaurants):
_check_restaurants(restaurants[idx], r)
# correct query parameters - name
correct_restaurants = restaurant_examples
response = get_restaurants_by_API(test_client, None, '-')
assert response.status_code == 200
restaurants = response.json
correct_restaurants = [p for p in correct_restaurants if '-' in p['name']]
assert len(correct_restaurants) == len(restaurants)
correct_restaurants = sorted(correct_restaurants, key=lambda k: k['name'])
restaurants = sorted(restaurants, key=lambda k: k['name'])
for idx, r in enumerate(correct_restaurants):
_check_restaurants(restaurants[idx], r)
# correct query parameters - lat and lon
# the first two restaurants in restaurant_examples should be relatively close
correct_restaurants = [restaurant_examples[0], restaurant_examples[1]]
response = get_restaurants_by_API(test_client, None, None, restaurant_examples[0]['lat'], restaurant_examples[0]['lon'])
assert response.status_code == 200
restaurants = response.json
assert len(correct_restaurants) == len(restaurants)
correct_restaurants = sorted(correct_restaurants, key=lambda k: k['name'])
restaurants = sorted(restaurants, key=lambda k: k['name'])
for idx, r in enumerate(correct_restaurants):
_check_restaurants(restaurants[idx], r)
# correct query parameters - cuisine type
correct_restaurants = restaurant_examples
response = get_restaurants_by_API(test_client, None, None, None, None, ['italian', 'pizzeria'])
assert response.status_code == 200
restaurants = response.json
correct_restaurants = [p for p in correct_restaurants if any(i in p['cuisine_type'] for i in ['italian', 'pizzeria'])]
assert len(correct_restaurants) == len(restaurants)
correct_restaurants = sorted(correct_restaurants, key=lambda k: k['name'])
restaurants = sorted(restaurants, key=lambda k: k['name'])
for idx, r in enumerate(correct_restaurants):
_check_restaurants(restaurants[idx], r)
# correct query parameters - all filters
response = get_restaurants_by_API(
test_client, restaurant_examples[0]['owner_id'], restaurant_examples[0]['name'],
restaurant_examples[0]['lat'], restaurant_examples[0]['lon'], ['italian', 'pizzeria']
)
assert response.status_code == 200
restaurants = response.json
assert len(restaurants) == 1
_check_restaurants(restaurants[0], restaurant_examples[0])
def test_delete_restaurants(test_app):
app, test_client = test_app
# create some restaurants
correct_restaurants = restaurant_examples
for idx, r in enumerate(correct_restaurants):
assert create_restaurant_by_API(test_client, r).status_code == 200
# incorrect body json
assert test_client.delete('/restaurants', json=dict(owner_id=0), follow_redirects=True).status_code == 400
assert test_client.delete('/restaurants', json=dict(owner_id=-1), follow_redirects=True).status_code == 400
assert test_client.delete('/restaurants', json=dict(owner_id='a'), follow_redirects=True).status_code == 400
assert test_client.delete('/restaurants', json=dict(owner_id=[]), follow_redirects=True).status_code == 400
assert test_client.delete('/restaurants', json=dict(), follow_redirects=True).status_code == 400
assert test_client.delete('/restaurants', follow_redirects=True).status_code == 400
# correct - pt1
owner_id = restaurant_examples[0]['owner_id']
assert test_client.delete('/restaurants', json=dict(owner_id=owner_id), follow_redirects=True).status_code == 200
deleted_restaurants = [p for p in restaurant_examples if p['owner_id'] == owner_id]
remaining_restaurants = len(restaurant_examples) - len(deleted_restaurants)
remaining_restaurants_db = db_session.query(Restaurant).all()
assert len(remaining_restaurants_db) == remaining_restaurants
remaining_tables, remaining_dishes, remaining_wds = 0, 0, 0
for r in remaining_restaurants_db:
remaining_tables += len(r.tables)
remaining_dishes += len(r.dishes)
remaining_wds += len(r.working_days)
q = db_session.query(Table).all()
assert len(q) == remaining_tables
q = db_session.query(Dish).all()
assert len(q) == remaining_dishes
q = db_session.query(WorkingDay).all()
assert len(q) == remaining_wds
deleted_restaurants_db = db_session.query(RestaurantDeleted).all()
assert len(deleted_restaurants_db) == len(deleted_restaurants)
# correct - pt2 owner_id without restaurants
assert test_client.delete('/restaurants', json=dict(owner_id=9999), follow_redirects=True).status_code == 200
# correct - pt3
# it should also work with an additional meaningless body parameter
owner_id = restaurant_examples[1]['owner_id']
assert test_client.delete('/restaurants', json=dict(owner_id=owner_id, trial='hello'), follow_redirects=True).status_code == 200
# by deleting the restaurants of the owner_id of the first two restaurants
# in 'restaurant_examples' all the restaurants should be deleted
q = db_session.query(Restaurant).all()
assert len(q) == 0
q = db_session.query(Table).all()
assert len(q) == 0
q = db_session.query(Dish).all()
assert len(q) == 0
q = db_session.query(WorkingDay).all()
assert len(q) == 0
deleted_restaurants_db = db_session.query(RestaurantDeleted).all()
assert len(deleted_restaurants_db) == len(restaurant_examples)
def test_get_restaurant(test_app):
app, test_client = test_app
# incorrect get - restaurant_id not exists
assert get_restaurant_by_API(test_client, 1).status_code == 404
# incorrect get - restaurant_id incorrect
assert get_restaurant_by_API(test_client, 0).status_code == 400
assert get_restaurant_by_API(test_client, -1).status_code == 404
assert get_restaurant_by_API(test_client, 'a').status_code == 404
assert get_restaurant_by_API(test_client, ['a']).status_code == 404
assert get_restaurant_by_API(test_client, []).status_code == 404
# create some restaurants
correct_restaurants = restaurant_examples
for idx, r in enumerate(correct_restaurants):
assert create_restaurant_by_API(test_client, r).status_code == 200
# correct get
for idx, r in enumerate(correct_restaurants):
response = get_restaurant_by_API(test_client, idx+1)
assert response.status_code == 200
_check_restaurants(response.json, r)
def test_edit_restaurant(test_app):
app, test_client = test_app
owner_id = restaurant_examples[0]['owner_id']
edit_dict = dict(
owner_id=owner_id,
phone='3243243434',
dishes=[
dict(name='pizza2',price=4.5,ingredients='tomato,mozzarella'),
dict(name='pasta2',price=6.5,ingredients='tomato'),
dict(name='pizza3',price=4.5,ingredients='tomato,mozzarella'),
dict(name='pasta3',price=6.5,ingredients='tomato')
]
)
# incorrect edit - restaurant_id not exists
assert edit_restaurant_by_API(test_client, 1, edit_dict).status_code == 404
# create one restaurant
assert create_restaurant_by_API(test_client, restaurant_examples[0]).status_code == 200
# incorrect edit - incorrect restaurant_id
assert edit_restaurant_by_API(test_client, 0, edit_dict).status_code == 400
assert edit_restaurant_by_API(test_client, -1, edit_dict).status_code == 404
assert edit_restaurant_by_API(test_client, 'a', edit_dict).status_code == 404
assert edit_restaurant_by_API(test_client, ['a'], edit_dict).status_code == 404
assert edit_restaurant_by_API(test_client, [], edit_dict).status_code == 404
# incorrect edit - incorrect restaurant_id
incorrect_dicts = [
dict(phone='3243243434', dishes=[dict(name='pizza2',price=4.5,ingredients='tomato,mozzarella')]),
dict(owner_id=owner_id),
dict(owner_id=owner_id, phone='3243243434', dishes=[]),
dict(owner_id=owner_id, phone='3243243434', dishes=[dict()]),
dict(owner_id=owner_id, phone='3243243434', dishes=[dict(name=None,price=4.5,ingredients='tomato,mozzarella')]),
dict(owner_id=owner_id, phone='3243243434', dishes=[dict(name='',price=4.5,ingredients='tomato,mozzarella')]),
dict(owner_id=owner_id, phone='3243243434', dishes=[dict(name=1,price=4.5,ingredients='tomato,mozzarella')]),
dict(owner_id=owner_id, phone='3243243434', dishes=[dict(name=[],price=4.5,ingredients='tomato,mozzarella')]),
dict(owner_id=owner_id, phone='3243243434', dishes=[dict(name=['a'],price=4.5,ingredients='tomato,mozzarella')]),
dict(owner_id=owner_id, phone='3243243434', dishes=[dict(name='pizza2',price=None,ingredients='tomato,mozzarella')]),
dict(owner_id=owner_id, phone='3243243434', dishes=[dict(name='pizza2',price=0,ingredients='tomato,mozzarella')]),
dict(owner_id=owner_id, phone='3243243434', dishes=[dict(name='pizza2',price=-1,ingredients='tomato,mozzarella')]),
dict(owner_id=owner_id, phone='3243243434', dishes=[dict(name='pizza2',price='a',ingredients='tomato,mozzarella')]),
dict(owner_id=owner_id, phone='3243243434', dishes=[dict(name='pizza2',price=[],ingredients='tomato,mozzarella')]),
dict(owner_id=owner_id, phone='3243243434', dishes=[dict(name='pizza2',price=['a'],ingredients='tomato,mozzarella')]),
dict(owner_id=owner_id, phone='3243243434', dishes=[dict(name='pizza2',price=4.5,ingredients=None)]),
dict(owner_id=owner_id, phone='3243243434', dishes=[dict(name='pizza2',price=4.5,ingredients='')]),
dict(owner_id=owner_id, phone='3243243434', dishes=[dict(name='pizza2',price=4.5,ingredients=2)]),
dict(owner_id=owner_id, phone='3243243434', dishes=[dict(name='pizza2',price=4.5,ingredients=[])]),
dict(owner_id=owner_id, phone='3243243434', dishes=[dict(name='pizza2',price=4.5,ingredients=['a'])]),
dict(owner_id=owner_id, phone='3243243434', dishes=[dict(price=4.5,ingredients='tomato,mozzarella')]),
dict(owner_id=owner_id, phone='3243243434', dishes=[dict(name='pizza2',ingredients='tomato,mozzarella')]),
dict(owner_id=owner_id, phone='3243243434', dishes=[dict(name='pizza2',price=4.5)])
]
for d in incorrect_dicts:
assert edit_restaurant_by_API(test_client, 1, d).status_code == 400
#incorrect edit - owner_id is not the restaurant's owner
d = dict(
owner_id=9999, phone='3243243434',
dishes=[dict(name='pizza2',price=4.5,ingredients='tomato,mozzarella')]
)
assert edit_restaurant_by_API(test_client, 1, d).status_code == 403
# correct pt1
restaurant_edited = restaurant_examples[0]
restaurant_edited['phone'] = edit_dict['phone']
restaurant_edited['dishes'] = edit_dict['dishes']
assert edit_restaurant_by_API(test_client, 1, edit_dict).status_code == 200
q = db_session.query(Dish).all()
assert len(q) == len(edit_dict['dishes'])
restaurant = db_session.query(Restaurant).first()
_check_restaurants(restaurant, restaurant_edited)
# correct pt2 - ok but meaningless
assert edit_restaurant_by_API(test_client, 1, dict(owner_id=owner_id, trial='aaaa')).status_code == 200
q = db_session.query(Dish).all()
assert len(q) == len(edit_dict['dishes'])
restaurant = db_session.query(Restaurant).first()
_check_restaurants(restaurant, restaurant_edited)
# correct pt3 - only phone
restaurant_edited['phone'] = '111'
assert edit_restaurant_by_API(test_client, 1, dict(owner_id=owner_id,phone='111')).status_code == 200
q = db_session.query(Dish).all()
assert len(q) == len(edit_dict['dishes'])
restaurant = db_session.query(Restaurant).first()
_check_restaurants(restaurant, restaurant_edited)
# correct pt3 - only dishes
restaurant_edited['dishes'] = [dict(name='pizza2',price=4.5,ingredients='tomato,mozzarella')]
assert edit_restaurant_by_API(test_client, 1, dict(owner_id=owner_id,dishes=restaurant_edited['dishes'])).status_code == 200
q = db_session.query(Dish).all()
assert len(q) == len(restaurant_edited['dishes'])
restaurant = db_session.query(Restaurant).first()
_check_restaurants(restaurant, restaurant_edited)
# correct pt4 - phone, dishes and an additional properties that shouldn't be edited
edit_dict = dict(
owner_id=owner_id,
phone='3243243434',
dishes=[
dict(name='pizza2',price=4.5,ingredients='tomato,mozzarella'),
dict(name='pasta2',price=6.5,ingredients='tomato'),
dict(name='pizza3',price=4.5,ingredients='tomato,mozzarella'),
dict(name='pasta3',price=6.5,ingredients='tomato')
],
capcity=100
)
restaurant_edited['phone'] = edit_dict['phone']
restaurant_edited['dishes'] = edit_dict['dishes']
assert edit_restaurant_by_API(test_client, 1, edit_dict).status_code == 200
q = db_session.query(Dish).all()
assert len(q) == len(edit_dict['dishes'])
restaurant = db_session.query(Restaurant).first()
_check_restaurants(restaurant, restaurant_edited)
def test_delete_restaurant(test_app):
app, test_client = test_app
# incorrect delete - restaurant_id not exists
assert delete_restaurant_by_API(test_client, 1, 1).status_code == 404
# create some restaurants
correct_restaurants = restaurant_examples
for idx, r in enumerate(correct_restaurants):
assert create_restaurant_by_API(test_client, r).status_code == 200
# incorrect body json
assert delete_restaurant_by_API(test_client, 1, 0).status_code == 400
assert delete_restaurant_by_API(test_client, 1, -1).status_code == 400
assert delete_restaurant_by_API(test_client, 1, 'a').status_code == 400
assert delete_restaurant_by_API(test_client, 1, []).status_code == 400
assert delete_restaurant_by_API(test_client, 1, None).status_code == 400
assert delete_restaurant_by_API(test_client, 1, ['a']).status_code == 400
assert test_client.delete('/restaurants/1', follow_redirects=True).status_code == 400
#incorrect edit - owner_id is not the restaurant's owner
assert delete_restaurant_by_API(test_client, 1, 9999).status_code == 403
# correct - pt1
owner_id = restaurant_examples[0]['owner_id']
assert delete_restaurant_by_API(test_client, 1, owner_id).status_code == 200
deleted_restaurants = [restaurant_examples[0]]
remaining_restaurants = len(restaurant_examples) - len(deleted_restaurants)
remaining_restaurants_db = db_session.query(Restaurant).all()
assert len(remaining_restaurants_db) == remaining_restaurants
remaining_tables, remaining_dishes, remaining_wds = 0, 0, 0
for r in remaining_restaurants_db:
remaining_tables += len(r.tables)
remaining_dishes += len(r.dishes)
remaining_wds += len(r.working_days)
q = db_session.query(Table).all()
assert len(q) == remaining_tables
q = db_session.query(Dish).all()
assert len(q) == remaining_dishes
q = db_session.query(WorkingDay).all()
assert len(q) == remaining_wds
deleted_restaurants_db = db_session.query(RestaurantDeleted).all()
assert len(deleted_restaurants_db) == len(deleted_restaurants)
# correct - pt2
# it should also work with an additional meaningless body parameter
owner_id = restaurant_examples[1]['owner_id']
assert test_client.delete('/restaurants/2', json=dict(owner_id=owner_id, trial='hello'), follow_redirects=True).status_code == 200
deleted_restaurants = [restaurant_examples[0],restaurant_examples[1]]
remaining_restaurants = len(restaurant_examples) - len(deleted_restaurants)
remaining_restaurants_db = db_session.query(Restaurant).all()
assert len(remaining_restaurants_db) == remaining_restaurants
remaining_tables, remaining_dishes, remaining_wds = 0, 0, 0
for r in remaining_restaurants_db:
remaining_tables += len(r.tables)
remaining_dishes += len(r.dishes)
remaining_wds += len(r.working_days)
q = db_session.query(Table).all()
assert len(q) == remaining_tables
q = db_session.query(Dish).all()
assert len(q) == remaining_dishes
q = db_session.query(WorkingDay).all()
assert len(q) == remaining_wds
deleted_restaurants_db = db_session.query(RestaurantDeleted).all()
assert len(deleted_restaurants_db) == len(deleted_restaurants) | [
"emiliopanti96@gmail.com"
] | emiliopanti96@gmail.com |
85d885154a183827ad6b137124af59056d4ab8c2 | 1a4689cdac7c5aa604ea896f83ec2166651ecdfc | /reconhecedor_lbph.py | 84618f6614d710beade16604595e61a69e832254 | [] | no_license | paulobressan/opencv-reconhecimento-facial | 189b7de105c2b0dc4582bd607c2f5d5bc25b3daf | 6dcc328485cc62ecf1f3add4b451d05c16a9250b | refs/heads/master | 2021-10-16T00:26:48.177623 | 2019-02-07T12:14:55 | 2019-02-07T12:14:55 | 169,281,445 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,897 | py | import cv2
def nomePorId(id):
if id == 1:
return 'Paulo'
elif id == 2:
return 'Marcelo'
elif id == 3:
return 'Alex'
else:
'Boiola'
# detector de faces
detectorFace = cv2.CascadeClassifier('haarcascade-frontalface-default.xml')
# criando o reconhecedor
reconhecedor = cv2.face.LBPHFaceRecognizer_create()
# Carregando para o reconhecedor o arquivo treinado
reconhecedor.read('classificadorLbph.yml')
# definindo as dimensões da imagem
largura, altura = 220, 220
# font usada para escrever na tela
font = cv2.FONT_HERSHEY_COMPLEX_SMALL
# Iniciar a captura de imagem com a webcam
camera = cv2.VideoCapture(0)
while True:
# conectando e capturando uma imagem da webcam
conectado, imagem = camera.read()
# convertendo a imagem para a tonalidade/escalas cinza
imagemCinza = cv2.cvtColor(imagem, cv2.COLOR_BGR2GRAY)
# detectando as faces da imagemCinza com a escala de 1.5 e o tamanho minimo de 30 x 30
facesDetectadas = detectorFace.detectMultiScale(
imagemCinza, scaleFactor=1.5, minSize=(30, 30))
# percorrendo as faces detectadas
for x, y, l, a in facesDetectadas:
# resize na imagem detectada para o tamanho da largura e altura
imagemFace = cv2.resize(imagemCinza[y:y+a, x:x + l], (largura, altura))
# Adicionando o retandulo ao redor da face
cv2.rectangle(imagem, (x, y), (x + l, y + a), (0, 0, 255), 2)
# reconhecendo a face com o lbph treinado no treinamento.py
id, confianca = reconhecedor.predict(imagemFace)
# escrevendo o texto na imagem
cv2.putText(imagem, nomePorId(id),
(x, y + (a + 30)), font, 2, (0, 0, 255))
cv2.putText(imagem, str(confianca),
(x, y + (a + 50)), font, 2, (0, 0, 255))
cv2.imshow("Face", imagem)
cv2.waitKey(1)
camera.release()
cv2.destroyAllWindows()
| [
"paulo.bressan@outlook.com"
] | paulo.bressan@outlook.com |
5e34be6b1e058e695e7f2d7faa7c251fd65e8935 | 54be379de2b913849df6002121e986bb14585d66 | /RF_ROP.py | ae4b8260f2fc596f4ed22034a06438e6d801b845 | [] | no_license | chokenkill/unsuper_learn | 673891abad6a39e631938ff17645f761c08e64f8 | eac6b5f0de33866a9b7b364457de112e4d008a8e | refs/heads/master | 2020-03-30T16:45:08.047880 | 2018-11-12T04:44:43 | 2018-11-12T04:44:43 | 151,424,755 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,424 | py | import pandas as pd
import numpy as np
from sklearn.ensemble import RandomForestRegressor
from sklearn.model_selection import train_test_split
from sklearn.model_selection import LeaveOneOut
from sklearn.metrics import mean_squared_error
from math import sqrt
path = "/Users/av/Documents/Drilling/unsuper_learn/"
# All Depth Converted Data
Alpha = pd.read_csv(path+"AlphaDepthConvertedData.csv")
Bravo = pd.read_csv(path+"BravoDepthConvertedData.csv")
Charlie = pd.read_csv(path+"CharlieDepthConvertedData.csv")
Delta = pd.read_csv(path+"DeltaDepthConvertedData.csv")
Foxtrot = pd.read_csv(path+"FoxtrotDepthConvertedData.csv")
Alpha.name = 'Alpha'
Bravo.name = 'Bravo'
Charlie.name = 'Charlie'
Delta.name = 'Delta'
Foxtrot.name = 'Foxtrot'
dfs = [Alpha, Bravo, Charlie, Delta, Foxtrot]
loo = LeaveOneOut()
for train_idx, test_idx in loo.split(dfs): # train on 4 wells and test on the 5th
print("TRAIN:", train_idx, "TEST:", test_idx)
train_set = pd.concat(dfs[i] for i in train_idx.tolist())
test_set = dfs[test_idx.tolist()[0]]
# The paper used WOB, RPM, and flow rate of drilling mud
# I don't know what the deal with the cleansed values is, but they are all that's in this data set
# If nothing else, gives us a starting point
features_list = features_list = list(train_set[["RT 01S VC WEIGHT ON BIT CLEANSED VALUE", "RT 01S SURFACE TORQUE CLEANSED VALUE",
"RT 01S SURFACE RPM CLEANSED VALUE", "RT 01S FLOW RATE OUT CLEANSED VALUE"]].columns)
train_features = np.array(train_set[["RT 01S VC WEIGHT ON BIT CLEANSED VALUE", "RT 01S SURFACE TORQUE CLEANSED VALUE",
"RT 01S SURFACE RPM CLEANSED VALUE", "RT 01S FLOW RATE OUT CLEANSED VALUE"]])
train_labels = np.array(train_set['RT 01S VC ON BOTTOM ROP'])
test_features = np.array(test_set[["RT 01S VC WEIGHT ON BIT CLEANSED VALUE", "RT 01S SURFACE TORQUE CLEANSED VALUE",
"RT 01S SURFACE RPM CLEANSED VALUE", "RT 01S FLOW RATE OUT CLEANSED VALUE"]])
test_labels = np.array(test_set['RT 01S VC ON BOTTOM ROP'])
rf = RandomForestRegressor(n_estimators=100, random_state=42) # tune n_estimators with hyperparameter optimization later
rf.fit(train_features, train_labels)
predictions = rf.predict(test_features)
rms = sqrt(mean_squared_error(test_labels, predictions))
| [
"Anthony.Vrotsos@utexas.edu"
] | Anthony.Vrotsos@utexas.edu |
9d11213ea2c2b6bc4de9d7aa551d211f2ad8d7d4 | d631dc5493b14cead84131d553ac7142426e8a29 | /src/snuway/wsgi.py | 241e720b2a1358d6b7482230ea2beaac337be792 | [] | no_license | oseolgi/snuway_final | d890ccf8b9b28fe9ef1d93283a2314a9ebd285c8 | 0c32cf046d75a1ab39e52c71c3b3b92c65ba6db3 | refs/heads/master | 2021-01-18T11:45:23.396713 | 2016-08-12T04:00:12 | 2016-08-12T04:00:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 389 | py | """
WSGI config for snuway project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "snuway.settings")
application = get_wsgi_application()
| [
"heun0108@gmail.com"
] | heun0108@gmail.com |
f44ea758ed720263d283bc572d243668746e24d4 | e99e690e276be83b1a1f1c24c4518b572e69ec3f | /euler19.py | 0344fadcc8bc5a30039fc1c70679af64cd713709 | [] | no_license | SenorNoName/projectEuler | 9e30689c66bc7f38b2916039dd078dbb1de6d482 | d64d5dd26ce1016b6dd7ee881be9ff7cca09d4f7 | refs/heads/main | 2023-06-29T20:17:06.660261 | 2021-08-05T00:40:24 | 2021-08-05T00:40:24 | 392,859,847 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 830 | py | '''
You are given the following information, but you may prefer to do some research for yourself.
1 Jan 1900 was a Monday.
Thirty days has September,
April, June and November.
All the rest have thirty-one,
Saving February alone,
Which has twenty-eight, rain or shine.
And on leap years, twenty-nine.
A leap year occurs on any year evenly divisible by 4, but not on a century unless it is divisible by 400.
How many Sundays fell on the first of the month during the twentieth century (1 Jan 1901 to 31 Dec 2000)?
'''
import datetime
from datetime import date
min = date(1901, 1, 1)
max = date(2000, 12, 31)
delta = max - min
date = datetime.date(1901, 1, 1)
numDays = 0
for i in range(delta.days + 1):
if date.weekday() == 6 and date.day == 1:
numDays += 1
date += datetime.timedelta(days = 1)
print(numDays)
| [
"SenorNoName@users.noreply.github.com"
] | SenorNoName@users.noreply.github.com |
afc16cfd08f4740ded1e2204955aaa45052b1d30 | 539518e5c97eae9485ac30b3993677ef39c22fff | /base/admin.py | a646985f0d2dac47f5d66b63040cf7474cf5d74e | [] | no_license | hackathon-das-fronteiras/jogos-amazonicos | d64cbca131fb39090296953fbd95b84e28f378fc | 6e234eed0da879e16d113c24b48d0b7bc7d5b893 | refs/heads/master | 2020-03-29T14:21:34.849333 | 2018-09-23T18:42:50 | 2018-09-23T18:42:50 | 150,013,099 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 343 | py | from django.contrib import admin
# Register your models here.
from base.models import Country, Region
class CountryAdmin(admin.ModelAdmin):
list_display = ('country_name',)
admin.site.register(Country, CountryAdmin)
class RegionAdmin(admin.ModelAdmin):
list_display = ('region_name',)
admin.site.register(Region, RegionAdmin)
| [
"marcosthomaz@icomp.ufam.edu.br"
] | marcosthomaz@icomp.ufam.edu.br |
2c177d90f01124541169790b3b7270464830e31e | 5fa713ea010b8e84886a7073c37115850e0f8de2 | /src/activities/stock_prediction/stock_arena.py | 556670d5443a61144df0a982d6fa3cba3a37d851 | [] | no_license | ShaynAli/Aipen | fa3c61d678ff0ddd7b11b62256776442033a19d3 | 3d2dde6f849c6304475f6edc5a9e08b2074209f1 | refs/heads/master | 2021-10-24T22:00:10.206532 | 2019-03-22T09:55:42 | 2019-03-22T09:55:42 | 110,479,921 | 3 | 2 | null | 2019-03-22T09:55:43 | 2017-11-13T00:02:04 | Python | UTF-8 | Python | false | false | 404 | py | from arena.arena import MachineLearningArena
from activities.stock_prediction.stock_prediction_models import *
import pprint
if __name__ == '__main__':
arena = MachineLearningArena(model_pool=[ShallowNeuralNetworkPredictor], activity=FrankfurtStockPrediction)
printer = pprint.PrettyPrinter()
for _ in range(10):
arena.auto_compete()
printer.pprint(arena.score_history[-1])
| [
"shayaan.syed.ali@gmail.com"
] | shayaan.syed.ali@gmail.com |
1c5f697dd6855da6b7d26a9ee5c7a1b2c772a09e | b9386cfc639dfcc1cc224777015eddda57056f30 | /ProjetosPython/PraticandoPython/P60-SimuladorCaixaEletronico.py | 6200c4e53a159ef004a878f6c89ec423dd544954 | [] | no_license | lucasstevanin/LearningPythonfromCursoemVideo | 8a6850269bd9b58b110b37f0479ba21a17dbeced | 82916302c06402e2a8612b12ae0e2ea6654fe9c4 | refs/heads/master | 2023-06-14T11:13:14.336580 | 2021-07-07T19:04:32 | 2021-07-07T19:04:32 | 209,598,270 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,884 | py | #Com cédulas de 50, 20, 10 e 1 #OBS: Fiz com de 100 tambem
#Informar quantas cédula de cada valor serão entregues
print('='*30)
print('{:^30}'.format('CAIXA ELETRÔNICO (24 HRS)'))
print('='*30)
m = c = d = u = 0
milhar = centena = dezena10 = dezena20 = unidade = 0
while True:
valor_sacado = input('Qual o valor a ser sacado? R$ ')
if int(valor_sacado) >= 1000:
m = valor_sacado[-4]
if int(valor_sacado) >= 100:
c = valor_sacado[-3]
if int(valor_sacado) >= 10:
d = valor_sacado[-2]
u = valor_sacado[-1]
mult_milhar = (int(m) * 1000)
if mult_milhar >= 100:
milhar = mult_milhar / 100 #notas de 100
centena = (int(c) * 100 / 50) #notas de 50
mult_dezena = (int(d) * 10) #notas de 20
resto = mult_dezena - (mult_dezena - 10)
if mult_dezena >= 20:
dezena20 = mult_dezena // 20
if mult_dezena % 20 != 0:
dezena10 = resto / 10 #notas de 10
unidade = (int(u) * 1) / 1 #notas de 1
pergunta = str(input('Deseja Realizar Alguma Outra Operação? [S / N] ')).upper()
if pergunta == 'N':
break
print()
print('=== SEU DINHEIRO ===')
print(f'Vão ser {milhar:.0f} notas de R$100\n'
f'{centena:.0f} notas de R$50\n'
f'{dezena20:.0f} notas de R$20\n'
f'{dezena10:.0f} notas de R$10\n'
f'{unidade:.0f} notas de R$1\n')
'''
valor = int(input('Que valor você quer sacar? R$ '))
total = valor
ced = 50
totced = 0
while True:
if total >= ced:
total -= ced
totced += 1
else:
if totced > 0:
print(f'Total de {totced} cédulas de R$ {ced}')
if ced == 50:
ced = 20
elif ced == 20:
ced = 10
elif ced == 10:
ced = 1
totced = 0
if total == 0:
break
'''
| [
"lucasstevanin@gmail.com"
] | lucasstevanin@gmail.com |
bc54b35f106f1d79df2f352512f5f441a35e2a0d | 6923f79f1eaaba0ab28b25337ba6cb56be97d32d | /Numerical_Methods_in_Engineering_with_Python_Kiusalaas/linInterp.py | 4a54aadcdef95cfdcb87969af29fd0949e5677c5 | [] | no_license | burakbayramli/books | 9fe7ba0cabf06e113eb125d62fe16d4946f4a4f0 | 5e9a0e03aa7ddf5e5ddf89943ccc68d94b539e95 | refs/heads/master | 2023-08-17T05:31:08.885134 | 2023-08-14T10:05:37 | 2023-08-14T10:05:37 | 72,460,321 | 223 | 174 | null | 2022-10-24T12:15:06 | 2016-10-31T17:24:00 | Jupyter Notebook | UTF-8 | Python | false | false | 273 | py | ## module linInterp
''' root = linInterp(f,x1,x2).
Finds the zero of the linear function f(x) by straight
line interpolation based on x = x1 and x2.
'''
def linInterp(f,x1,x2):
f1 = f(x1)
f2 = f(x2)
return x2 - f2*(x2 - x1)/(f2 - f1)
| [
"bb@b.om"
] | bb@b.om |
7f5e5da7185ed2490f5b2d874561e9214c8db779 | 6395987515664fd475fc91398bae06f2d7c1465c | /assign/5-list/append.py | cd6c1888327286d904b14a32e7ebcac18f2b0cad | [] | no_license | amanmishra98/python-programs | 984e1f503d04983802aec14ef7f3b2968cdebb60 | e8e90e8ae38b0b4058fa978d5bced943ac995e91 | refs/heads/master | 2020-04-23T07:00:20.740648 | 2020-03-20T08:14:47 | 2020-03-20T08:14:47 | 170,993,284 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 103 | py | n=int(input("enter no"))
l=[]
for x in range(1,n+1):
a=int(input())
l.append(a)
print(l)
| [
"noreply@github.com"
] | amanmishra98.noreply@github.com |
41174bfa77697a1e47f7584e57e063d0a3eab5bd | f205a750018c73f2acba4e15d72ee8f68c41b0eb | /home/migrations/0002_testd.py | 332d450258603db0d5b447d409db8926fee0399f | [] | no_license | crowdbotics-apps/testdjangotest-dev-1417 | 67d5f289d6bce5cc26f6b31390a1826f2a07d255 | 857696193fc35e615e7ac47a7594c1e153f3c612 | refs/heads/master | 2022-03-28T09:00:27.569608 | 2020-01-14T12:38:24 | 2020-01-14T12:38:24 | 231,307,914 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 516 | py | # Generated by Django 2.2.9 on 2020-01-14 12:27
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('home', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='TestD',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('demotest', models.GenericIPAddressField(protocol='IPv4')),
],
),
]
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
02e17622a57d31173fb75f8db715b46d04e40d90 | 08f247bcf0d835871d375150d5b388f3ebb41eb6 | /alien_invasion.py | bd239838fc3323bb5189163994fab774de8c392d | [] | no_license | MagicLover/python | dd44be34a06d6c5deb5549096ed315365a5a1418 | 8a81152f22d71e6c2dd5901c4b0bd90260290068 | refs/heads/master | 2020-03-29T01:12:49.285075 | 2018-09-19T11:30:47 | 2018-09-19T11:30:47 | 149,376,925 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,599 | py | import sys
import pygame
#导入设置屏幕类
from settings import Settings
#导入飞船类
from ship import Ship
#导入鉴定事件模块
import game_functions as gf
#导入编组类
from pygame.sprite import Group
#导入外星人类
from alien import Alien
#导入游戏统计信息类
from game_status import GameStatus
#导入按钮类
from button import Button
#导入计分类
from scoreboard import Scoreboard
def run_game():
#初始化游戏并创建一个屏幕对象
pygame.init()
ai_settings = Settings()
#设置主屏幕的大小
screen = pygame.display.set_mode((ai_settings.screen_width,ai_settings.screen_height))
#设置游戏的主题
pygame.display.set_caption("Alien Invasion")
#创建一艘飞船
ship = Ship(ai_settings,screen)
#创建一个用于存储子弹的编组
bullets = Group()
#创建一个存储外星人的编组
aliens = Group()
#创建外星人群
gf.create_fleet(ai_settings,screen,ship,aliens)
#创建一个用户存储游戏统计信息的实例
status = GameStatus(ai_settings)
#创建play按钮
play_button = Button(ai_settings,screen,"Play")
#创建记分牌
sb = Scoreboard(ai_settings,screen,status)
#开始游戏的主循环
while True:
#监视键盘和鼠标事件
gf.check_events(ai_settings,screen,status,sb,play_button,ship,aliens,bullets)
if status.game_active:
ship.update()
gf.update_bullets(ai_settings,screen,status,sb,ship,aliens,bullets)
gf.update_aliens(ai_settings,screen,status,sb,ship,aliens,bullets)
gf.update_screen(ai_settings,screen,status,sb,ship,aliens,bullets,play_button)
run_game()
| [
"noreply@github.com"
] | MagicLover.noreply@github.com |
ebbb6f1baab105437d5edb3d7f2830a9aced27ac | f4524d382863480eba8b334d2ec98e953d1c9ed3 | /image_encryption/asgi.py | 3de69617b5047c47ce792b5cde9b259dfb4c9c34 | [] | no_license | JAWalmsley/image-encryption | c861592a27dd6dbd8ba1f10f9e35c6afa16f0570 | 6b9d78e768dfb25ea9b84c94b2a2006df771a587 | refs/heads/master | 2023-08-07T20:59:24.220713 | 2020-06-14T23:15:12 | 2020-06-14T23:15:12 | 250,051,426 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 409 | py | """
ASGI config for image_encryption project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'image_encryption.settings')
application = get_asgi_application()
| [
"jackawalmsley@gmail.com"
] | jackawalmsley@gmail.com |
722b40adbd6072a57d0e72d53759a3d575ccfa68 | e3be8552aff4dbcf71e5aa165f254fd094bc048c | /examples/adspygoogle/dfp/v201311/creative_wrapper_service/update_creative_wrappers.py | 38883e6d6cd1a89949df9296656a5d2240dc35a6 | [
"Apache-2.0"
] | permissive | caioserra/apiAdwords | cd1317f05e26edf5cad2faff40c43df96405e715 | 2419b22b1fb7a03cf98355b5793f816319e1e654 | refs/heads/master | 2020-05-05T03:37:16.605798 | 2014-02-03T17:09:39 | 2014-02-03T17:09:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,479 | py | #!/usr/bin/python
#
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This code example updates a creative wrapper to the 'OUTER' wrapping order.
To determine which creative wrappers exist, run get_all_creative_wrappers.py.
Tags: CreativeWrapperService.getCreativeWrapper
Tags: CreativeWrapperService.updateCreativeWrappers
"""
__author__ = 'api.shamjeff@gmail.com (Jeff Sham)'
# Locate the client library. If module was installed via "setup.py" script, then
# the following two lines are not needed.
import os
import sys
sys.path.insert(0, os.path.join('..', '..', '..', '..', '..'))
# Import appropriate classes from the client library.
from adspygoogle import DfpClient
# Set the ID of the creative wrapper to get.
CREATIVE_WRAPPER_ID = 'INSERT_CREATIVE_WRAPPER_ID_HERE'
def main(client, creative_wrapper_id):
# Initialize appropriate service.
creative_wrapper_service = client.GetService('CreativeWrapperService',
version='v201311')
# Get creative wrapper.
creative_wrapper = creative_wrapper_service.GetCreativeWrapper(
creative_wrapper_id)[0]
if creative_wrapper:
creative_wrapper['ordering'] = 'OUTER'
# Update the creative wrappers on the server.
creative_wrappers = creative_wrapper_service.UpdateCreativeWrappers(
[creative_wrapper])
# Display results.
if creative_wrappers:
for creative_wrapper in creative_wrappers:
print (('Creative wrapper with ID \'%s\' and wrapping order \'%s\' '
'was updated.') % (creative_wrapper['id'],
creative_wrapper['ordering']))
else:
print 'No creative wrappers were updated.'
else:
print 'No creative wrappers found to update.'
if __name__ == '__main__':
# Initialize client object.
dfp_client = DfpClient(path=os.path.join('..', '..', '..', '..', '..'))
main(dfp_client, CREATIVE_WRAPPER_ID)
| [
"cvserra@gmail.com"
] | cvserra@gmail.com |
fd5615ae97a4486aed7ed81eec67b56b59f0256f | 97bda493af82f57bc212770b13b80ef863f301c4 | /vmadmin/urls.py | b2d7e54ea45f05a6f33c308a97f03cfad110ab0c | [] | no_license | iselusky/ev-cloud | 57a38f7056ed8c864347c069401b826d2faffd5d | 04b336a758aa6e27539179c0b72f36a1f9bccc9b | refs/heads/master | 2021-01-16T01:02:48.543307 | 2016-07-21T02:44:39 | 2016-07-21T02:44:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,147 | py | from django.conf.urls import include, url
from .vm_views import *
from .gpu_views import *
from .volume_views import *
urlpatterns = [
# url(r'^(?P<url>.+)', root),
url(r'^$', index_view),
url(r'^vm/$', index_view),
url(r'^vm/list/$', vm_list_view),
url(r'^vm/create/$', vm_create_view),
url(r'^vm/edit/$', vm_edit_view),
url(r'^vm/vnc/$', vm_vnc_view),
url(r'^vm/migrate/$', vm_migrate_view),
url(r'^vm/detail/$', vm_detail_view),
url(r'^vm/status/$', vm_status_ajax),
url(r'^vm/op/$', vm_op_ajax),
url(r'^vm/edit_remarks/$', vm_edit_remarks_ajax),
url(r'^gpu/list/$', gpu_list_view),
url(r'^gpu/mount/$', gpu_mount_view),
url(r'^gpu/umount/$', gpu_umount_ajax),
# url(r'^gpu/detail/$', gpu_detail_view),
url(r'^gpu/edit_remarks/$', gpu_edit_remarks_ajax),
url(r'^volume/list/$', volume_list_view),
url(r'^volume/create/$', volume_create_view),
url(r'^volume/mount/$', volume_mount_ceph_view),
url(r'^volume/delete/$', volume_delete_ajax),
url(r'^volume/edit_remarks/$', volume_edit_remarks_ajax),
url(r'^volume/umount/$', volume_umount_ceph_ajax),
]
| [
"bobfu@live.cn"
] | bobfu@live.cn |
e32d1bcc7f82ddc883671e0c1b87f27b34f8a510 | 6273409935b4e2f9f760fa7ff67854781077881a | /ex19.py | f6ed94e98a20384db2113ade8e306105519a5b18 | [] | no_license | EstherGuan/Hard-way-exercise | 406bb9d58e77d16a18ef1fcb09e45b8f1efc13fd | c7722690973b86fe6c4e0ef7ac02ccf47eec15eb | refs/heads/master | 2021-01-19T10:29:10.259465 | 2017-02-17T11:49:17 | 2017-02-17T11:49:17 | 82,183,859 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 706 | py |
def cheese_and_crackers(cheese_count, boxes_of_crackers):
print "You have %d cheeses!" % cheese_count
print "You have %d boxes of crackers!" % boxes_of_crackers
print "Man that's enough for a party!"
print "Get a blanket.\n"
print "We can just give the function numbers directly:"
cheese_and_crackers(20,30)
print "OR, we can use variables from our script:"
amount_of_cheese = 10
amount_of_crackers = 50
cheese_and_crackers(amount_of_cheese, amount_of_crackers)
print "We can even do math inside too:"
cheese_and_crackers(10+20, 5+6)
print "And we can combine the two, variables and math:"
cheese_and_crackers(amount_of_cheese + 100, amount_of_crackers + 1000)
| [
"guanyanan0520@gmail.com"
] | guanyanan0520@gmail.com |
234e8528297d1630f15dae6b54fb67acdd02f795 | 6b8c60f3f19d41c9ca1a609959d9573b026eba72 | /string.py | bb1e821dfd2f070435962ea92daf66a60088121e | [] | no_license | GhiffariCaesa/basic-python-b6-b | 67b432c9ff9217fc871209b4e0817d761063a8d8 | 3d92489e610146d43e3d141400341d8b6a367606 | refs/heads/main | 2023-06-17T20:08:09.286633 | 2021-07-12T08:11:13 | 2021-07-12T08:11:13 | 358,808,525 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 182 | py | nama = "Mentor Nafi"
print(nama)
print(nama[1])
print(nama[7:11])
print(nama[7:])
print(nama[:7])
print(len(nama))
# M e n t o r [spasi] N a f i
# 0 1 2 3 4 5 6 7 8 9 10 11 | [
"ghiffari55@gmail.com"
] | ghiffari55@gmail.com |
f18f69f99ee9c7c76097ae27323d7cdd294308c8 | 22a5dd1d50523f560ea41c54930c71bb1ddddc99 | /v2/libraries/model/options.py | c051efe91ebc07c3befb0a303b86057e30b72239 | [
"MIT"
] | permissive | daniele21/Anomaly_Detection | 9f5facffc136053b1dccd371a7f6e0b756eea456 | 10a6a9dffcbcc0f27e702eed8a5b607c5daf6877 | refs/heads/master | 2022-07-01T05:59:43.419613 | 2020-05-13T10:41:27 | 2020-05-13T10:41:27 | 224,666,058 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 5,369 | py | # -*- coding: utf-8 -*-
#%%
#from dataset import loadData
from libraries.model.dataset import loadDataset
#%%
class Options():
def __init__(self,
# DATASET
nFolders = 5,
startFolder = 1,
endFolder = 2000,
patch_per_im= 2000,
transforms = None,
batch_size = 64,
split = 0.8,
n_workers = 8,
augmentation= True,
shape = 32,
# NETWORK
img_size = 32,
in_channels = 3, # 1=GRAYSCALE 3=RGB
out_channels = 64,
z_size = 100,
n_extra_layers = 0,
# MODEL
name = 'My Ganomaly',
seed = -1,
epochs = 10,
patience = 3,
beta1 = 0.5,
lr = 0.0005,
lr_gen = 0.0002,
lr_discr = 0.0001,
output_dir = '/media/daniele/Data/Tesi/Practice/Code/ganomaly/ganomaly-master/output',
load_weights = True,
phase = 'train',
resume = '',
alpha = 0.15,
weightedLosses = False,
w_adv = 1,
w_con = 50,
w_enc = 1,
multiTaskLoss = False,
kernel_size = 3,
sigma = 1,
tl = 'vgg16',
TL_size = 200,
dataset = '',
descr = '',
):
#NETWORK
self.img_size = img_size
self.in_channels = in_channels
self.out_channels = out_channels
self.z_size = z_size
self.n_extra_layers = n_extra_layers
# DATASET
self.nFolders = nFolders
self.startFolder = startFolder
self.endFolder = endFolder
self.patch_per_im = patch_per_im
self.transforms = transforms
self.batch_size = batch_size
self.split = split
self.n_workers = n_workers
self.augmentation = augmentation
self.shape = shape
self.train_data = []
self.train_targets = []
self.validation_data = []
self.validation_targets = []
self.loadedData = False
# MODEL
self.seed = seed
self.name = name
self.patience = patience
self.epochs = epochs
self.lr = lr
self.lr_gen = lr_gen
self.lr_discr = lr_discr
self.beta1 = beta1
self.load_weights = load_weights
self.phase = phase
self.output_dir = output_dir
self.resume = resume
self.alpha = alpha
self.weightedLosses = weightedLosses
self.w_adv = w_adv
self.w_con = w_con
self.w_enc = w_enc
self.multiTaskLoss = multiTaskLoss
self.kernel_size = kernel_size
self.sigma = sigma
self.tl = tl
self.TL_size = TL_size
self.dataset = dataset
self.descr = descr
self.isTrain = True
def loadDatasets(self):
train, validation, test = loadDataset(self, test='mixed')
# train, validation, test = loadDataset(self, test='normal')
# train, train_targets, val, val_targets, test, test_targets = loadDataNormAnonm(self)
# train, train_targets, val, val_targets, test, test_targets = loadDatasetAllNormals(self)
# train, train_targets, val, val_targets = loadData(self)
# self.train_data = train
# self.train_targets = train_targets
# self.validation_data = val
# self.validation_targets = val_targets
# self.test_data = test
# self.test_targets = test_targets
self.training_set = train
self.validation_set = validation
self.test_set = test
self.loadedData = True
#%%
class FullImagesOptions():
def __init__(self,
# DATASET
augmentation = True,
batch_size = 16,
split = 0.7,
n_workers = 8,
start = 0,
end = 100,
shape = 64,
name = 'My_Ganomaly',
in_channels = 3,
):
self.augmentation = augmentation
self.batch_size = batch_size
self.split = split
self.n_workers = n_workers
self.start = start
self.end = end
self.shape = shape
self.name = name
self.in_channels = in_channels
| [
"daniele.moltisanti@mail.polimi.it"
] | daniele.moltisanti@mail.polimi.it |
beac0877155167b3266705ba9a6127d5fdeb60b0 | 9ddaea1efb3bf651b49b968bfc7a1de6077ef2ab | /obrero/experimental/video_udea.py | b1f9c29c089a92be12c34cabc86d6fa47016f4ab | [
"MIT"
] | permissive | Maduvi/obrero | 1af1fcc99cd70bd7a6c9a3bc129c62f402b75e94 | 6f4424863afda1c957d2e20304a26c0ea2251125 | refs/heads/master | 2020-06-02T07:08:02.590791 | 2020-05-24T12:41:39 | 2020-05-24T12:41:39 | 191,078,510 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 18,976 | py | import os
import sys
import pkg_resources
import numpy as np
from matplotlib.image import imread
import obrero.cal as ocal
import obrero.plot as oplot
import obrero.experimental.enso as oenso
# path where stored logo
DATA_PATH = pkg_resources.resource_filename('obrero', 'data/')
def _add_text_axes(axes, text):
"""Use a given axes to place given text."""
txt = axes.text(0.5, 0.5, text, ha='center', va='center')
axes.axis('off')
return txt
def _latex_authoring(title, author, affil, email):
"""Creates a text object with LaTeX code to include in plots
made with `video_udea`.
""" # noqa
texmsg = []
# lets build it
texmsg.append(r'\begin{center}')
# title
if isinstance(title, list):
for t in title:
texmsg.append(t + r'\\')
else:
texmsg.append(title + r'\\')
# a bit of space
texmsg.append(r'\vspace{1em}')
# authors
if isinstance(author, list):
for a in author:
texmsg.append(r'\tiny{' + a + r'}\\')
else:
texmsg.append(r'\tiny{' + author + r'}\\')
# authors
if isinstance(affil, list):
for a in affil:
texmsg.append(r'\tiny{' + a + r'}\\')
else:
texmsg.append(r'\tiny{' + affil + r'}\\')
# email
if isinstance(email, list):
for e in email:
texmsg.append(r'\tiny{' + e + r'}')
else:
texmsg.append(r'\tiny{' + email + r'}')
# finish
texmsg.append(r'\end{center}')
# join
latext = ' '.join(texmsg)
return latext
def video_udea(dlist, slist, bbox, title, author, affil, email,
rotate, wpx=1920, hpx=1080, dpi=300, lon0=0, dg=1,
save_dir=None, smooth=False, winds=None, xhres=None):
"""Create video made for ExpoIngenieria 2018.
A very specific format was used to produce this video and to keep
it we created this function. It can only be used to produce such
video. In this case we need for sets of data arrays: a variable to
be plotted in an Orthographic projection rotating every `dg`
degrees, two lines of time series area average over a region to be
plotted and compared in an xy-plot, and sea surface temperature
(SST) values to include the ONI time series. The user can also
input horizontal wind fields U and V to have vectors plotted on
top of contours.
Parameters
----------
dlist: list of xarray.DataArray
This list must have the following order:
[variable_for_contours, first_time_series,
second_time_series, sst_array]
The first variable will be plotted in a rotating Orthographic
projection. The time series will be plotted together in an
xy-plot. And the SST array will be used to plot also an ONI
index axes.
slist: list of dict objects of specifications
This list must contain three dict objects: one for the contour
plot, one for the time series plot and one for the ONI index
plot. So the list must be:
[specifications_contours, specifications_time_series,
specifications_oni_index]
For the specifications of the contours see keywords of
function `plot_global_contour`, except keyword `axes`. For the
time series specifications see keywords of the function
`averages_video_udea`. And for the ONI plot see keywords in
the `oni_video_udea` function.
bbox: list of list objects
This is a list of two list objects which have corner
coordinates to plot a squared region: [xcorners,
ycorners]. This in case the user wants to highlight a squared
region somewhere in the Orthographic projection map. This
object can be obatined using function `bbox_linecoords`.
title: str or list of str
Title to be placed in a text-only axes. Input for
`_latex_authoring`. If multiple lines it should be a list of
str in which each str is a single line.
author: str or list of str
Author information to be placed in a text-only axes. Input for
`_latex_authoring`. If multiple lines it should be a list of
str in which each str is a single line.
affil: str or list of str
Affiliation information of author to be placed in a text-only
axes. Input for `_latex_authoring`. If multiple lines it
should be a list of str in which each str is a single line.
email: str or list of str
Author e-mail information to be placed in a text-only
axes. Input for `_latex_authoring`. If multiple lines it
should be a list of str in which each str is a single line.
rotate: list
In this list the user can specify when to rotate the
projection. To do this the user must use dates in the format:
'YYYY-MMM', using 3 letters for the month. So for example if:
rotate = ['1997-Jun', '1998-Dec']
It means that the Orthographic projection will rotate for
those two months only, in spite of the data arrays having more
time steps.
wpx: int, optional
Width in pixels for the images. Default is 1920 px.
hpx: int, optional
Height in pixels for the images. Default is 1080 px.
lon0: float, optional
Initial longitude at which to start rotating every time step.
Default is Greenwich meridian.
dg: float, optional
Degrees step to advance rotation. The maximum possible value is
dg = 360 which means no rotation at all. The slowest possible is
dg = 1. Default is 1.
save_dir: str, optional
If the user wants to save all plotted frames in a folder, they
can set this keyword to a folder name and figures will be
stored there. Otherwise figures will not be saved. Default is
not save plots.
dpi: int, optional
Dots per inch for every frame. Default is 300.
smooth: bool, optional
Use this boolean flag to choose whether to smooth the time
series or not. The smoothing will be done using a rolling mean
every 3-time steps, so if it is monthly data, the user will
actually be plotting 3-monthly rolling averages. Default is
False.
winds: list of xarray.DataArray, optional
If the user has U and V winds data and wants to put vectors on
top of the contours in the Orthographic projection plot, then
they must use this option for input winds like so:
winds = [u, v]
For this to work the user must also use the `xhres` keyword
because the function needs the resolution of the grid in the x
direction to be able to avoid plotting vectors out of the
projection bounds.
xhres: float, optional
Grid resolution in the x direction. This keyword is only used
if `winds` is being used, in which case it is a mandatory
argument.
""" # noqa
# unpack data and specifications
vmap, vline1, vline2, sst = dlist
spec1, spec2, spec3 = slist
# check if wind wanted and given
if winds is not None:
u, v = winds
if xhres is None:
msg = ('if you want wind you must specify horizontal ' +
'horizontal x resolution with \'xhres\' keyword')
raise ValueError(msg)
# only lats in between will have wind
w_ymin = 4
w_ymax = 28
# longitudes will have wind
wlon = 9
# get longitudes as x
x = u.longitude.values
y = u.latitude.values
mlon = x.size
# smooth area averages if wanted
if smooth is True:
vline1 = (vline1.rolling(time=3, min_periods=2)
.mean(keep_attrs=True))
vline2 = (vline2.rolling(time=3, min_periods=2)
.mean(keep_attrs=True))
# get number of times
ntim = vmap.time.size
# get oni series from exp
oni = oenso.oni(sst).values.flatten()
# authoring message
msg = _latex_authoring(title, author, affil, email)
# get dates
dates = ocal.get_dates(vmap.time.values)
# guess number of maps
nmpr = int(360 / dg)
nrots = len(rotate)
totm = (ntim - nrots) + nrots * nmpr
# counter for names
c = 1
# create save directory
save_path = oplot.create_save_dir(save_dir)
# step every time
for t in range(ntim):
# rotate only for specified dates
dstr = dates[t].strftime('%Y-%b')
if dstr in rotate:
rotation = True
nrot = nmpr # number of maps per rotation
else:
rotation = False
nrot = 1
if winds is not None:
clon = x[(x >= lon0 - xhres / 2) & (x < lon0 + xhres / 2)]
idx = np.where(x == clon)[0][0]
# rotate or not
for i in range(nrot):
# create figure instance
fig = oplot.plt.figure(1, figsize=(wpx / dpi, hpx / dpi))
# projection
prj = oplot.ort(central_longitude=lon0)
# create axes for all
ax1 = oplot.plt.subplot2grid((3, 6), (0, 0), colspan=3,
rowspan=3, projection=prj)
ax2 = oplot.plt.subplot2grid((3, 6), (0, 3), colspan=3)
ax3 = oplot.plt.subplot2grid((3, 6), (1, 3), colspan=3)
ax4 = oplot.plt.subplot2grid((3, 6), (2, 3), colspan=2)
ax5 = oplot.plt.subplot2grid((3, 6), (2, 5))
# add axes and title to specifications
spec1['axes'] = ax1
spec1['title'] = r'\texttt{' + dstr + r'}'
# plot
oplot.plot_global_contour(vmap[t], **spec1)
# add wind arrows if given
if winds is not None:
# get winds
U = u[t].values
V = v[t].values
# get longitude range indexes
if (idx + wlon) < mlon:
xrang = np.arange(idx - wlon, idx + wlon + 1,
dtype=int)
else:
xrang = np.arange(idx - mlon - wlon, idx - mlon
+ wlon + 1, dtype=int)
# select those to plot
xx = x[xrang]
yy = y[w_ymin:w_ymax]
uu = U[w_ymin:w_ymax, xrang]
vv = V[w_ymin:w_ymax, xrang]
# add arrows
quiv = ax1.quiver(xx, yy, uu, vv, pivot='middle',
transform=oplot.pcar(),
scale_units='inches',
scale=8500 / 25.4)
# add key
ax1.quiverkey(quiv, 0.9, 0.1, 20, r'20 km h$^{-1}$',
labelpos='S', angle=180)
# bounding box
ax1.plot(bbox[0], bbox[1], '-', linewidth=1,
color='black', transform=oplot.pcar())
# plot averages
averages_video_udea(dates[:t + 1], vline1.values[:t + 1],
vline2.values[:t + 1], ax2, **spec2)
# plot oni
oni_video_udea(dates[:t + 1], oni[:t + 1], ax3, **spec3)
# add message
_add_text_axes(ax4, msg)
# add logo
udea_logo(ax5)
# maximize plot
oplot.plt.tight_layout()
# savefig if provided name
if save_dir is not None:
img = os.path.join(save_path, "rotate_%08d.png" % c)
oplot.plt.savefig(img, dpi=dpi)
oplot.plt.close(fig)
sys.stdout.write('Plotting progress: %d%% \r' %
(100 * c/totm))
sys.stdout.flush()
# update counter
c += 1
else:
oplot.plt.pause(0.05)
# update lon0
if rotation is True:
if lon0 > 0.0:
lon0 = lon0 - dg
else:
lon0 = 360.0
# update clon if winds and get ist index
if winds is not None:
if idx <= mlon - 1:
clon = x[(x >= lon0 - xhres / 2.0) &
(x < lon0 + xhres / 2.0)]
try:
idx = np.where(x == clon)[0][0]
except IndexError:
idx = 0
else:
idx = 0
def oni_video_udea(dates, oni, axes, xticks=None, xlim=None,
ylim=[-3, 3], title='ONI', color='black',
xlabel=r'Year', ylabel=r'($^{\circ}$\,C)'):
"""Plot ONI time series for UdeA video.
In the video there will be an axes with ONI values. This function
will take care of it.
Parameters
----------
dates: pandas.DatetimeIndex
These are the x axis values. Matplotlib will interpret them as
dates and format them as such.
oni: numpy.ndarray
This is a time series. It should be obtained flattening the
values of the data frame that the function `enso.get_oni`
creates.
axes: matplotlib.axes.Axes
Generally created using `figure.add_subplot()`. Since this
plot is to be appended to a larger picture, the axes must be
created outside this function and used as input.
xticks: list or numpy.ndarray, optional
This controls the tick marks in the x axis. Default is to put
a tick from the second year until the end every 2 years.
xlim: list, optional
Limits in the x axis. The user can choose the limit dates in
this axis. Default is to use the first and last items in
`dates`.
ylim: list, optional
Limits in the y axis. Default is [-3, 3].
title: str, optional
Centered title. Default is 'ONI'.
xlabel: str, optional
Title in the x axis. Default is 'Year'.
ylabel: str, optional
Title in the y axis. Default is '(oC)'.
Returns
-------
matplotlib.axes.Axes with plot attached.
""" # noqa
# get ticks
if xticks is None:
xticks = dates[12::48]
# get xlim
if xlim is None:
xlim = [dates[0], dates[-1]]
# get colors for line plots
cm = oplot.plt.get_cmap('bwr')
cred = cm(cm.N)
cblue = cm(0)
# plot last as point
point = oni[-1]
if point > 0.5:
cpoint = cred
elif point < -0.5:
cpoint = cblue
else:
cpoint = 'black'
# line plot
axes.plot(dates, oni, linewidth=1, color=color)
axes.plot(dates[-1], point, 'o', color=cpoint, ms=2)
# axes lims
axes.set_xlim(xlim)
axes.set_ylim(ylim)
# set ticks
axes.set_xticks(xticks)
# horizonatl lines
axes.axhline(y=0, linestyle='--', alpha=0.5, linewidth=1,
color='black')
axes.axhline(y=0.5, linestyle='--', alpha=0.5, linewidth=1,
color=cred)
axes.axhline(y=-0.5, linestyle='--', alpha=0.5, linewidth=1,
color=cblue)
# titling
axes.set_title(title)
axes.set_ylabel(ylabel)
axes.set_xlabel(xlabel)
return axes
def averages_video_udea(dates, dlist, axes, names=['Exp1', 'Exp2'],
colors=['black', 'DodgerBlue'], xticks=None,
xlim=None, ylim=[-3, 3], title='',
xlabel=r'Year', ylabel=''):
"""Plot area average time series of variable for UdeA video.
In the video there will be axes with time series of some variable
for two different data sets averaged spatially. This function will
take care of it.
Parameters
----------
dates: pandas.DatetimeIndex
These are the x axis values. Matplotlib will interpret them as
dates and format them as such.
dlist: list of numpy.ndarrays
Only two arrays are supported. These should be time series of
area averages for some variable.
axes: matplotlib.axes.Axes
Generally created using `figure.add_subplot()`. Since this
plot is to be appended to a larger picture, the axes must be
created outside this function and used as input.
names: list of str, optional
Names to be shown in the legend. They must have the same order
as the data in `dlist`. Default is ['Exp1', 'Exp2']. They will
always be converted to upper case.
colors: list of named colors, optional
Colors for each line. They must have the same order as the
data in `dlist`. Default is ['black', 'DodgerBlue']
xticks: list or numpy.ndarray, optional
This controls the tick marks in the x axis. Default is to put
a tick from the second year until the end every 2 years.
xlim: list of datetime objects, optional
Limits in the x axis. The user can choose the limit dates in
this axis. Default is to use the first and last items in
`dates`.
ylim: list of float, optional
Limits in the y axis. Default is [-3, 3].
title: str, optional
Centered title. Default is empty.
xlabel: str, optional
Title in the x axis. Default is 'Year'.
ylabel: str, optional
Title in the y axis. Default is empty.
Returns
-------
matplotlib.axes.Axes with plot attached.
""" # noqa
# get ticks
if xticks is None:
xticks = dates[12::48]
# get xlim
if xlim is None:
xlim = [dates[0], dates[-1]]
# unpack data
av1, av2 = dlist
# points
point1 = av1[-1]
point2 = av2[-1]
# line plot for land
axes.plot(dates, av1, linewidth=1, color=colors[0],
label=names[0].upper())
axes.plot(dates, av2, linewidth=1, color=colors[1],
label=names[1].upper())
axes.plot(dates[-1], point1, 'o', color=colors[0], ms=2)
axes.plot(dates[-1], point2, 'o', color=colors[1], ms=2)
# set lims
axes.set_xlim(xlim)
axes.set_ylim(ylim)
axes.set_xticks(xticks)
# horizonatl lines
axes.axhline(y=0, linestyle='--', alpha=0.5, linewidth=1,
color='black')
# titling
axes.set_title(title)
axes.set_ylabel(ylabel)
axes.set_xlabel(xlabel)
axes.legend(ncol=2)
return axes
def udea_logo(axes):
"""Add Universidad de Antioquia logo to given axes.
For some plots it is nice to put the logo of the school. This
function was specifically created to be used in `video_udea`
function but might be used elsewhere.
Paramaters
----------
axes: matplotlib.axes.Axes
Generally created using `figure.add_subplot()`.
Returns
-------
matplotlib.axes.Axes with logo attached.
"""
# university logo
logo = imread(DATA_PATH + 'logo-udea_240px.png')
# logo
plog = axes.imshow(logo)
axes.axis('off')
return plog
| [
"mateo.duquev@udea.edu.co"
] | mateo.duquev@udea.edu.co |
d9b7b71fd3a717608917bf6da152e399e687d757 | b9533fc58f590fe98eb16f9dc03b5a6717dcc702 | /docs_src/parameter_types/bool/tutorial001.py | 46f6335ceea854286da49c62ee558fc865e62b16 | [
"MIT"
] | permissive | aguinane/typer | 3af0e513d4f5450c8ae2df4fb2057bb619264c4d | 88ca6983fb4fdb969dadc6e150bf74ccea0ad9e1 | refs/heads/master | 2023-02-09T04:32:12.035768 | 2021-01-05T23:16:18 | 2021-01-06T06:35:12 | 327,137,224 | 1 | 0 | MIT | 2021-01-06T06:32:36 | 2021-01-05T22:39:08 | Python | UTF-8 | Python | false | false | 218 | py | import typer
def main(force: bool = typer.Option(False, "--force")):
if force:
typer.echo("Forcing operation")
else:
typer.echo("Not forcing")
if __name__ == "__main__":
typer.run(main)
| [
"tiangolo@gmail.com"
] | tiangolo@gmail.com |
95cb67257bde600765f99301c8298f843a1061fe | 3ac5e7eb41995d9b9f7a067d8eb981e474f158d4 | /BaeminSample/urls.py | 645fb14ca9128f496834b4fc54431b58f394c97f | [] | no_license | bluesysuya/BaeminSample | 4babb7ec8c94556f46940acc62480dfe19a1969e | 524d8ba79f0709fa107aa66fa7b8736989884f13 | refs/heads/master | 2021-01-23T06:05:31.822167 | 2017-09-16T04:38:31 | 2017-09-16T04:38:31 | 102,487,970 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 826 | py | """BaeminSample URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
urlpatterns = [
url(r'^partner/', include('partner.urls')),
url(r'^admin/', admin.site.urls),
]
| [
"bluesysuya@naver.com"
] | bluesysuya@naver.com |
cf4d0c9cf5b8812cf3a8b2ac5b93d4d7523cb311 | 5c28626057e83860dd6da5b239d9eca60e4c2ceb | /Backend/osmchadjango/contrib/sites/migrations/0003_auto_20161005_1234.py | be2fddc71e014f112631132d67fb341544ca9105 | [
"ISC",
"MIT",
"BSD-3-Clause",
"BSD-2-Clause"
] | permissive | habi/srz-edi | 1ab6fbf797eb90fbaa56817fa8ef772ed81d73b6 | 603496dce834bf3ecf28cc949da619b837e2873c | refs/heads/main | 2023-06-05T00:53:18.109307 | 2021-06-25T08:49:27 | 2021-06-25T08:49:27 | 380,263,278 | 1 | 0 | ISC | 2021-06-25T14:28:09 | 2021-06-25T14:28:08 | null | UTF-8 | Python | false | false | 600 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2016-10-05 12:34
from __future__ import unicode_literals
import django.contrib.sites.models
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('sites', '0002_set_site_domain_and_name'),
]
operations = [
migrations.AlterField(
model_name='site',
name='domain',
field=models.CharField(max_length=100, unique=True, validators=[django.contrib.sites.models._simple_domain_name_validator], verbose_name='domain name'),
),
]
| [
"43520128+Denelio@users.noreply.github.com"
] | 43520128+Denelio@users.noreply.github.com |
e5592a520b551171214fac5913366f39cab9e907 | 678f6dc26296391a76b8fa2284c92f864402c129 | /summary/commentChooser.py | e6748cdc1fe050582b43ac01d25d6f949816ddbd | [
"Apache-2.0"
] | permissive | mcdir/Weiss | ae2dd1336dfe96b0d52bccb0012a541f059073ad | 16b9bff300660753e94e251659f873db095843cf | refs/heads/master | 2021-01-19T21:55:18.484386 | 2017-04-19T09:12:17 | 2017-04-19T09:12:17 | 88,724,230 | 0 | 0 | null | 2017-04-19T09:05:26 | 2017-04-19T09:05:26 | null | UTF-8 | Python | false | false | 2,878 | py | '''
A python package which contains different methods for chooseing a
representative comment from a list of comments.
Current Methods:
randomComment - chooses a random comment
leadComment - chooses the first/lead comment
walkThrough - ??
pageRankComment - creates a graph from similar words within comments and then
runs page rank. Chooses comment with highest page rank.
Author: Austin Ankney & Wenjun Wang
Date 6/7/2015
Usage:
import the package (commentChooser) and choose the specifc method
(listed above) you would like to use.
'''
## Import list
from igraph import *
from nltk.corpus import stopwords
import re
import random
## Chooses random comment
def randomComment(comment_list):
num_comments = len(comment_list)
comment_index = random.randint(0,num_comments-1)
return comment_list[comment_index]
## Chooses first/lead comment
def leadComment(comment_list):
return comment_list[0]
## Choose comment based on page rank of comment graph
def pageRankComment(comment_list):
commentList = []
for text in comment_list:
wordList = tokenize(text)
noStop = removeStopWords(wordList)
noNums = removeNumbers(noStop)
commentList.append(noNums)
g = createGraph(commentList)
commentChoice = importantNode(g)
return commentChoice
def tokenize(text, replace_chars = [',','.','"','\'','(',')','$','?','<','>','=','/']):
# iterate over list of chars being replaces
for char in replace_chars:
text = text.replace(char,'')
text = text.lower().split(' ')
return text
def splitSentence(text):
return text.split('. ')
def splitWord(sentence):
return sentence.lower().split(' ')
def cleanWord(word):
table = string.maketrans("","")
return word.translate(table, string.punctuation)
def removeStopWords(wordList):
newWordList = []
for word in wordList:
if not word in stopwords.words('english'):
newWordList.append(word)
return list(set(newWordList))
def removeNumbers(wordList):
newWordList = []
for word in wordList:
if not re.search('\d+', word):
newWordList.append(word)
return list(set(newWordList))
def intersection(list1, list2):
overlap = list(set(list1) & set(list2))
return overlap
def createGraph(commentList):
g = Graph()
g.add_vertices(len(commentList))
## add edges
for i in range(len(commentList)):
for j in range(len(commentList)):
if not i == j and g.are_connected(i,j) is False:
intersect = intersection(commentList[i],commentList[j])
if len(intersect) > 5:
g.add_edge(i,j)
return g
def importantNode(graph):
pageRank = graph.pagerank()
maxPR = max(pageRank)
node = pageRank.index(maxPR)
return node
| [
"codebeatstode@aol.com"
] | codebeatstode@aol.com |
a4d4638c9319e40bd188419a370718c710dceb68 | 15dadd4d1f7cca36e066cf06f90e3a5390e79c47 | /src/policy/seo/setuphandlers.py | 0b02b27d6b1a9f1d430e89ae8aa997e5aaa811f4 | [] | no_license | affinitic/plone-policy.seo | fae17a33580b0c70937685111458644f7258fd19 | 653660f452ed90724139993e5ae3802495606ace | refs/heads/master | 2021-10-07T10:19:45.000958 | 2013-09-10T08:06:04 | 2013-09-10T08:06:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,595 | py | from Products.CMFCore.utils import getToolByName
from Products.LinguaPlone.browser.setup import SetupView
def setupLinguaFolders(site, logger):
sw = SetupView(site, site.REQUEST)
sw.folders = {}
pl = getToolByName(site, "portal_languages")
sw.languages = pl.getSupportedLanguages()
if len(sw.languages) == 1:
logger.error('Only one supported language configured.')
sw.defaultLanguage = pl.getDefaultLanguage()
available = pl.getAvailableLanguages()
for language in sw.languages:
info = available[language]
sw.setUpLanguage(language, info.get('native', info.get('name')))
sw.linkTranslations()
sw.removePortalDefaultPage()
# if sw.previousDefaultPageId:
# sw.resetDefaultPage()
sw.setupLanguageSwitcher()
def setupVarious(context):
# Ordinarily, GenericSetup handlers check for the existence of XML files.
# Here, we are not parsing an XML file, but we use this text file as a
# flag to check that we actually meant for this import step to be run.
# The file is found in profiles/default.
logger = context.getLogger('policy.seo')
if context.readDataFile('policy.seo_various.txt') is None:
return
site = context.getSite()
for folder_name in ['news', 'events', 'Members']:
if getattr(site, folder_name, None):
folder = getattr(site, folder_name)
folder.setExcludeFromNav(True)
folder.reindexObject()
if not getattr(site, 'fr', None):
setupLinguaFolders(site, logger)
setup_tool = context.getSetupTool()
| [
"smoussiaux@cirb.irisnet.be"
] | smoussiaux@cirb.irisnet.be |
0e3b8f66a09dd70b50624d3d8540c51d8cff8306 | 321b4ed83b6874eeb512027eaa0b17b0daf3c289 | /198/198.house-robber.250714223.Accepted.leetcode.py | 77a36f9e1995b5ec98eb4f8b6f5813fb91b049c2 | [] | no_license | huangyingw/submissions | 7a610613bdb03f1223cdec5f6ccc4391149ca618 | bfac1238ecef8b03e54842b852f6fec111abedfa | refs/heads/master | 2023-07-25T09:56:46.814504 | 2023-07-16T07:38:36 | 2023-07-16T07:38:36 | 143,352,065 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 232 | py | class Solution:
def rob(self, nums):
if not nums:
return 0
current, prev = nums[0], 0
for num in nums[1:]:
prev, current = current, max(prev + num, current)
return current
| [
"huangyingw@gmail.com"
] | huangyingw@gmail.com |
3a90a5230704003aeb2d89708a4b2496c79fb525 | 99f83ebcdf04ace0c3a44b43d9891a43a87eddc0 | /mysite/settings.py | eeb8b1df635bece170335d62d7d6af8ac2f99cfe | [] | no_license | Kadyrgali/my-first-blog | 0db4161924f8a71732dd48b162fdfbf2db5a2ad4 | f0363b236adc961e5b3bd02615bb4c2e713d3523 | refs/heads/master | 2023-01-05T14:01:48.991803 | 2020-11-05T10:03:35 | 2020-11-05T10:03:35 | 309,664,074 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,191 | py | """
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 2.2.17.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '^y5%z7wd*$fuuh11e=!m9az%^u@1=fif0fc*cdvsrl-!ga2_1k'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['127.0.0.1', '.pythonanywhere.com']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'blog',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'ru-ru'
TIME_ZONE = 'Asia/Almaty'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
| [
"galishka15@gmail.com"
] | galishka15@gmail.com |
e8e014088b652af59a2a9cf399e4e10cf8b4779e | 1d9da7d9375baa2d9812df881e53f32d2f7634c1 | /MFW/tools/src/python/DB_Base_pb2.py | e892a78b146ecf9271464c6810c889d947664d39 | [] | no_license | wlcaption/MFrameWork | 7f91a21dd94f6762c7d892b26c321f8042fa4475 | c232e4ad742b59f9d95a4f70290c74d59151eceb | refs/heads/master | 2021-05-16T10:23:57.038228 | 2017-09-23T08:00:50 | 2017-09-23T08:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | true | 65,432 | py | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: DB_Base.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='DB_Base.proto',
package='PDB_Base',
syntax='proto3',
serialized_pb=_b('\n\rDB_Base.proto\x12\x08PDB_Base\"\xff\x01\n\x0f\x44\x42\x43hallengeMode\x12\r\n\x05ulUid\x18\x01 \x01(\x04\x12\x18\n\x10uiChallengeCount\x18\x02 \x01(\r\x12\x18\n\x10ulChallengeTimes\x18\x03 \x01(\x04\x12\x19\n\x11ulChallengePoints\x18\x04 \x01(\x04\x12\x1b\n\x13uiTreasureCardCount\x18\x05 \x01(\r\x12\x1c\n\x14ulChallengeStartTime\x18\x06 \x01(\x04\x12\x12\n\nuiRoomType\x18\x07 \x01(\r\x12\x10\n\x08uiRoomId\x18\x08 \x01(\r\x12\x16\n\x0euiTreasureCard\x18\t \x01(\r\x12\x15\n\ruiSinglePoint\x18\n \x01(\r\"\x95\x01\n\x19\x44\x42\x43hallengeModeRankRecord\x12\r\n\x05ulUid\x18\x01 \x01(\x04\x12\x12\n\nuiRoomType\x18\x02 \x01(\r\x12\x10\n\x08uiRoomid\x18\x03 \x01(\r\x12\x11\n\tuiRanking\x18\x04 \x01(\r\x12\x10\n\x08ulPoints\x18\x05 \x01(\x04\x12\x0e\n\x06ulTime\x18\x06 \x01(\x04\x12\x0e\n\x06ulGold\x18\x07 \x01(\x04\"\x86\x01\n\x15\x44\x42\x43hallengeModeRecord\x12\r\n\x05ulUid\x18\x01 \x01(\x04\x12\x19\n\x11ulChallengePoints\x18\x02 \x01(\x04\x12\x1d\n\x15ulChallengeRecordTime\x18\x03 \x01(\x04\x12\x12\n\nuiRoomType\x18\x04 \x01(\r\x12\x10\n\x08uiRoomId\x18\x05 \x01(\r\"\x81\x02\n\x14\x44\x42\x43hallengeMode_copy\x12\r\n\x05ulUid\x18\x01 \x01(\x04\x12\x18\n\x10uiChallengeCount\x18\x02 \x01(\r\x12\x17\n\x0fulChallengeTime\x18\x03 \x01(\x04\x12\x19\n\x11uiChallengePoints\x18\x04 \x01(\r\x12\x1b\n\x13uiTreasureCardCount\x18\x05 \x01(\r\x12\x1c\n\x14ulChallengeStartTime\x18\x06 \x01(\x04\x12\x12\n\nuiRoomType\x18\x07 \x01(\r\x12\x10\n\x08uiRoomId\x18\x08 \x01(\r\x12\x13\n\x0buiSubFlower\x18\t \x01(\r\x12\x16\n\x0euiTreasureCard\x18\n \x01(\r\"J\n\x11\x44\x42\x43lubCreateMatch\x12\x0c\n\x04ulId\x18\x01 \x01(\x04\x12\x11\n\tsRoomInfo\x18\x02 \x01(\x0c\x12\x14\n\x0culCreateTime\x18\x03 \x01(\x04\"\xd4\x01\n\nDBGameClub\x12\x0c\n\x04ulId\x18\x01 \x01(\x04\x12\x14\n\x0c\x63harNickName\x18\x02 \x01(\t\x12\x11\n\tulCaptain\x18\x03 \x01(\x04\x12\x13\n\x0buiLogoIndex\x18\x04 \x01(\r\x12\x13\n\x0buiMaxMember\x18\x05 \x01(\r\x12\x13\n\x0bsApplicants\x18\x06 \x01(\x0c\x12\x0f\n\x07sNotice\x18\x07 \x01(\x0c\x12\x10\n\x08uiStatus\x18\x08 \x01(\r\x12\x14\n\x0csDynamicInfo\x18\t \x01(\x0c\x12\x17\n\x0fulCaptainReward\x18\n \x01(\x04\"|\n\rDBGameLogInfo\x12\x0c\n\x04ulId\x18\x01 \x01(\x04\x12\r\n\x05ulUid\x18\x02 \x01(\x04\x12\x12\n\nuiRoomType\x18\x03 \x01(\r\x12\x10\n\x08uiRoomId\x18\x04 \x01(\r\x12\x0e\n\x06ulTime\x18\x05 \x01(\x04\x12\x18\n\x10ulCurrencyChange\x18\x06 \x01(\x04\"A\n\x10\x44\x42PlayerClubInfo\x12\r\n\x05ulUid\x18\x01 \x01(\x04\x12\x0e\n\x06sText1\x18\x02 \x01(\x0c\x12\x0e\n\x06sText2\x18\x03 \x01(\x0c\"2\n\x11\x44\x42PlayerEmailInfo\x12\r\n\x05ulUid\x18\x01 \x01(\x04\x12\x0e\n\x06sEmail\x18\x02 \x01(\x0c\"0\n\x0e\x44\x42PlayerSignIn\x12\r\n\x05ulUid\x18\x01 \x01(\x04\x12\x0f\n\x07sSignIn\x18\x02 \x01(\x0c\"W\n\x10\x44\x42PlayerTaskInfo\x12\r\n\x05ulUid\x18\x01 \x01(\x04\x12\r\n\x05sTask\x18\x02 \x01(\x0c\x12\x11\n\tsRankTask\x18\x03 \x01(\x0c\x12\x12\n\nsNewerGift\x18\x04 \x01(\x0c\"\xe7\x02\n\x11\x44\x42SystemEmailInfo\x12\x0c\n\x04uiId\x18\x01 \x01(\r\x12\x0e\n\x06uiType\x18\x02 \x01(\r\x12\x14\n\x0cuiNotifyType\x18\x03 \x01(\r\x12\x11\n\tcharTitle\x18\x04 \x01(\t\x12\x10\n\x08\x63harText\x18\x05 \x01(\t\x12\x0e\n\x06ulTime\x18\x06 \x01(\x04\x12\x15\n\ruiRewardType1\x18\x07 \x01(\r\x12\x16\n\x0euiRewardCount1\x18\x08 \x01(\r\x12\x15\n\ruiRewardType2\x18\t \x01(\r\x12\x16\n\x0euiRewardCount2\x18\n \x01(\r\x12\x15\n\ruiRewardType3\x18\x0b \x01(\r\x12\x16\n\x0euiRewardCount3\x18\x0c \x01(\r\x12\x15\n\ruiRewardType4\x18\r \x01(\r\x12\x16\n\x0euiRewardCount4\x18\x0e \x01(\r\x12\x15\n\ruiRewardType5\x18\x0f \x01(\r\x12\x16\n\x0euiRewardCount5\x18\x10 \x01(\r\"A\n\x0f\x44\x42SystemMsgInfo\x12\x0c\n\x04uiId\x18\x01 \x01(\r\x12\x10\n\x08sMsgBody\x18\x02 \x01(\x0c\x12\x0e\n\x06ulTime\x18\x03 \x01(\x04\"<\n\x15\x44\x42UserClubMatchRecode\x12\r\n\x05ulUid\x18\x01 \x01(\x04\x12\x14\n\x0csMatchRecode\x18\x02 \x01(\x0c\">\n\x17\x44\x42UserCustomMatchRecode\x12\r\n\x05ulUid\x18\x01 \x01(\x04\x12\x14\n\x0csMatchRecode\x18\x02 \x01(\x0c\"\x99\x01\n\x19\x44\x42UserEmailRechargeRecord\x12\x0c\n\x04ulId\x18\x01 \x01(\x04\x12\x11\n\tulSendUid\x18\x02 \x01(\x04\x12\x11\n\tulRecvUid\x18\x03 \x01(\x04\x12\x18\n\x10\x63harRechargeTime\x18\x04 \x01(\t\x12\x19\n\x11ulProductAddCount\x18\x05 \x01(\x04\x12\x13\n\x0bulEmailType\x18\x06 \x01(\x04\"\xa1\x03\n\nDBUserInfo\x12\r\n\x05ulUid\x18\x01 \x01(\x04\x12\x14\n\x0c\x63harNickName\x18\x02 \x01(\t\x12\r\n\x05uiSex\x18\x03 \x01(\r\x12\x11\n\tulGoldNum\x18\x04 \x01(\x04\x12\x14\n\x0culDiamondNum\x18\x05 \x01(\x04\x12\x0f\n\x07uiRobot\x18\x06 \x01(\r\x12\x17\n\x0fuiMatchWinCount\x18\x07 \x01(\r\x12\x18\n\x10uiMatchLoseCount\x18\x08 \x01(\r\x12\x19\n\x11ulCustomRoomPoint\x18\t \x01(\x04\x12\x13\n\x0bsHeadImgurl\x18\n \x01(\x0c\x12\x12\n\nuiIdentity\x18\x0b \x01(\r\x12\x0e\n\x06ulClub\x18\x0c \x01(\x04\x12\x17\n\x0fuiAccountStatus\x18\r \x01(\r\x12!\n\x19ulAccountStatusChangeTime\x18\x0e \x01(\x04\x12\x16\n\x0esContributions\x18\x0f \x01(\x0c\x12 \n\x18ulContributionChangeTime\x18\x10 \x01(\x04\x12\x13\n\x0bsRankExtend\x18\x11 \x01(\x0c\x12\x13\n\x0bulLoginTime\x18\x12 \x01(\x04\"\x8c\x02\n\x14\x44\x42UserRechargeRecord\x12\x12\n\nulServerId\x18\x01 \x01(\x04\x12\r\n\x05ulUid\x18\x02 \x01(\x04\x12\x13\n\x0buiProductId\x18\x03 \x01(\r\x12\x15\n\ruiProductType\x18\x04 \x01(\r\x12\x16\n\x0euiProductPrice\x18\x05 \x01(\r\x12\x19\n\x11ulProductAddCount\x18\x06 \x01(\x04\x12\x18\n\x10\x63harRechargeTime\x18\x07 \x01(\t\x12\x1a\n\x12ulRechargeShowTime\x18\x08 \x01(\x04\x12\x13\n\x0buiIsSandbox\x18\t \x01(\r\x12\x13\n\x0buiIsCaptain\x18\n \x01(\r\x12\x12\n\nuiPlatform\x18\x0b \x01(\r\"\xa8\x01\n\x17\x44\x42UserWebRechargeRecord\x12\x0c\n\x04uiId\x18\x01 \x01(\r\x12\r\n\x05ulUid\x18\x02 \x01(\x04\x12\x12\n\nuiPlatType\x18\x03 \x01(\r\x12\x18\n\x10\x63harRechargeTime\x18\x04 \x01(\t\x12\x19\n\x11ulProductAddCount\x18\x05 \x01(\x04\x12\r\n\x05ulRMB\x18\x06 \x01(\x04\x12\x18\n\x10\x63harThirdOrderId\x18\x07 \x01(\tb\x06proto3')
)
_DBCHALLENGEMODE = _descriptor.Descriptor(
name='DBChallengeMode',
full_name='PDB_Base.DBChallengeMode',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='ulUid', full_name='PDB_Base.DBChallengeMode.ulUid', index=0,
number=1, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='uiChallengeCount', full_name='PDB_Base.DBChallengeMode.uiChallengeCount', index=1,
number=2, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='ulChallengeTimes', full_name='PDB_Base.DBChallengeMode.ulChallengeTimes', index=2,
number=3, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='ulChallengePoints', full_name='PDB_Base.DBChallengeMode.ulChallengePoints', index=3,
number=4, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='uiTreasureCardCount', full_name='PDB_Base.DBChallengeMode.uiTreasureCardCount', index=4,
number=5, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='ulChallengeStartTime', full_name='PDB_Base.DBChallengeMode.ulChallengeStartTime', index=5,
number=6, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='uiRoomType', full_name='PDB_Base.DBChallengeMode.uiRoomType', index=6,
number=7, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='uiRoomId', full_name='PDB_Base.DBChallengeMode.uiRoomId', index=7,
number=8, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='uiTreasureCard', full_name='PDB_Base.DBChallengeMode.uiTreasureCard', index=8,
number=9, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='uiSinglePoint', full_name='PDB_Base.DBChallengeMode.uiSinglePoint', index=9,
number=10, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=28,
serialized_end=283,
)
_DBCHALLENGEMODERANKRECORD = _descriptor.Descriptor(
name='DBChallengeModeRankRecord',
full_name='PDB_Base.DBChallengeModeRankRecord',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='ulUid', full_name='PDB_Base.DBChallengeModeRankRecord.ulUid', index=0,
number=1, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='uiRoomType', full_name='PDB_Base.DBChallengeModeRankRecord.uiRoomType', index=1,
number=2, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='uiRoomid', full_name='PDB_Base.DBChallengeModeRankRecord.uiRoomid', index=2,
number=3, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='uiRanking', full_name='PDB_Base.DBChallengeModeRankRecord.uiRanking', index=3,
number=4, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='ulPoints', full_name='PDB_Base.DBChallengeModeRankRecord.ulPoints', index=4,
number=5, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='ulTime', full_name='PDB_Base.DBChallengeModeRankRecord.ulTime', index=5,
number=6, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='ulGold', full_name='PDB_Base.DBChallengeModeRankRecord.ulGold', index=6,
number=7, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=286,
serialized_end=435,
)
_DBCHALLENGEMODERECORD = _descriptor.Descriptor(
name='DBChallengeModeRecord',
full_name='PDB_Base.DBChallengeModeRecord',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='ulUid', full_name='PDB_Base.DBChallengeModeRecord.ulUid', index=0,
number=1, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='ulChallengePoints', full_name='PDB_Base.DBChallengeModeRecord.ulChallengePoints', index=1,
number=2, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='ulChallengeRecordTime', full_name='PDB_Base.DBChallengeModeRecord.ulChallengeRecordTime', index=2,
number=3, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='uiRoomType', full_name='PDB_Base.DBChallengeModeRecord.uiRoomType', index=3,
number=4, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='uiRoomId', full_name='PDB_Base.DBChallengeModeRecord.uiRoomId', index=4,
number=5, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=438,
serialized_end=572,
)
_DBCHALLENGEMODE_COPY = _descriptor.Descriptor(
name='DBChallengeMode_copy',
full_name='PDB_Base.DBChallengeMode_copy',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='ulUid', full_name='PDB_Base.DBChallengeMode_copy.ulUid', index=0,
number=1, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='uiChallengeCount', full_name='PDB_Base.DBChallengeMode_copy.uiChallengeCount', index=1,
number=2, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='ulChallengeTime', full_name='PDB_Base.DBChallengeMode_copy.ulChallengeTime', index=2,
number=3, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='uiChallengePoints', full_name='PDB_Base.DBChallengeMode_copy.uiChallengePoints', index=3,
number=4, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='uiTreasureCardCount', full_name='PDB_Base.DBChallengeMode_copy.uiTreasureCardCount', index=4,
number=5, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='ulChallengeStartTime', full_name='PDB_Base.DBChallengeMode_copy.ulChallengeStartTime', index=5,
number=6, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='uiRoomType', full_name='PDB_Base.DBChallengeMode_copy.uiRoomType', index=6,
number=7, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='uiRoomId', full_name='PDB_Base.DBChallengeMode_copy.uiRoomId', index=7,
number=8, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='uiSubFlower', full_name='PDB_Base.DBChallengeMode_copy.uiSubFlower', index=8,
number=9, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='uiTreasureCard', full_name='PDB_Base.DBChallengeMode_copy.uiTreasureCard', index=9,
number=10, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=575,
serialized_end=832,
)
_DBCLUBCREATEMATCH = _descriptor.Descriptor(
name='DBClubCreateMatch',
full_name='PDB_Base.DBClubCreateMatch',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='ulId', full_name='PDB_Base.DBClubCreateMatch.ulId', index=0,
number=1, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='sRoomInfo', full_name='PDB_Base.DBClubCreateMatch.sRoomInfo', index=1,
number=2, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='ulCreateTime', full_name='PDB_Base.DBClubCreateMatch.ulCreateTime', index=2,
number=3, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=834,
serialized_end=908,
)
_DBGAMECLUB = _descriptor.Descriptor(
name='DBGameClub',
full_name='PDB_Base.DBGameClub',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='ulId', full_name='PDB_Base.DBGameClub.ulId', index=0,
number=1, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='charNickName', full_name='PDB_Base.DBGameClub.charNickName', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='ulCaptain', full_name='PDB_Base.DBGameClub.ulCaptain', index=2,
number=3, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='uiLogoIndex', full_name='PDB_Base.DBGameClub.uiLogoIndex', index=3,
number=4, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='uiMaxMember', full_name='PDB_Base.DBGameClub.uiMaxMember', index=4,
number=5, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='sApplicants', full_name='PDB_Base.DBGameClub.sApplicants', index=5,
number=6, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='sNotice', full_name='PDB_Base.DBGameClub.sNotice', index=6,
number=7, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='uiStatus', full_name='PDB_Base.DBGameClub.uiStatus', index=7,
number=8, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='sDynamicInfo', full_name='PDB_Base.DBGameClub.sDynamicInfo', index=8,
number=9, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='ulCaptainReward', full_name='PDB_Base.DBGameClub.ulCaptainReward', index=9,
number=10, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=911,
serialized_end=1123,
)
_DBGAMELOGINFO = _descriptor.Descriptor(
name='DBGameLogInfo',
full_name='PDB_Base.DBGameLogInfo',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='ulId', full_name='PDB_Base.DBGameLogInfo.ulId', index=0,
number=1, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='ulUid', full_name='PDB_Base.DBGameLogInfo.ulUid', index=1,
number=2, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='uiRoomType', full_name='PDB_Base.DBGameLogInfo.uiRoomType', index=2,
number=3, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='uiRoomId', full_name='PDB_Base.DBGameLogInfo.uiRoomId', index=3,
number=4, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='ulTime', full_name='PDB_Base.DBGameLogInfo.ulTime', index=4,
number=5, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='ulCurrencyChange', full_name='PDB_Base.DBGameLogInfo.ulCurrencyChange', index=5,
number=6, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1125,
serialized_end=1249,
)
_DBPLAYERCLUBINFO = _descriptor.Descriptor(
name='DBPlayerClubInfo',
full_name='PDB_Base.DBPlayerClubInfo',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='ulUid', full_name='PDB_Base.DBPlayerClubInfo.ulUid', index=0,
number=1, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='sText1', full_name='PDB_Base.DBPlayerClubInfo.sText1', index=1,
number=2, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='sText2', full_name='PDB_Base.DBPlayerClubInfo.sText2', index=2,
number=3, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1251,
serialized_end=1316,
)
_DBPLAYEREMAILINFO = _descriptor.Descriptor(
name='DBPlayerEmailInfo',
full_name='PDB_Base.DBPlayerEmailInfo',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='ulUid', full_name='PDB_Base.DBPlayerEmailInfo.ulUid', index=0,
number=1, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='sEmail', full_name='PDB_Base.DBPlayerEmailInfo.sEmail', index=1,
number=2, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1318,
serialized_end=1368,
)
_DBPLAYERSIGNIN = _descriptor.Descriptor(
name='DBPlayerSignIn',
full_name='PDB_Base.DBPlayerSignIn',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='ulUid', full_name='PDB_Base.DBPlayerSignIn.ulUid', index=0,
number=1, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='sSignIn', full_name='PDB_Base.DBPlayerSignIn.sSignIn', index=1,
number=2, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1370,
serialized_end=1418,
)
_DBPLAYERTASKINFO = _descriptor.Descriptor(
name='DBPlayerTaskInfo',
full_name='PDB_Base.DBPlayerTaskInfo',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='ulUid', full_name='PDB_Base.DBPlayerTaskInfo.ulUid', index=0,
number=1, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='sTask', full_name='PDB_Base.DBPlayerTaskInfo.sTask', index=1,
number=2, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='sRankTask', full_name='PDB_Base.DBPlayerTaskInfo.sRankTask', index=2,
number=3, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='sNewerGift', full_name='PDB_Base.DBPlayerTaskInfo.sNewerGift', index=3,
number=4, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1420,
serialized_end=1507,
)
_DBSYSTEMEMAILINFO = _descriptor.Descriptor(
name='DBSystemEmailInfo',
full_name='PDB_Base.DBSystemEmailInfo',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='uiId', full_name='PDB_Base.DBSystemEmailInfo.uiId', index=0,
number=1, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='uiType', full_name='PDB_Base.DBSystemEmailInfo.uiType', index=1,
number=2, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='uiNotifyType', full_name='PDB_Base.DBSystemEmailInfo.uiNotifyType', index=2,
number=3, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='charTitle', full_name='PDB_Base.DBSystemEmailInfo.charTitle', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='charText', full_name='PDB_Base.DBSystemEmailInfo.charText', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='ulTime', full_name='PDB_Base.DBSystemEmailInfo.ulTime', index=5,
number=6, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='uiRewardType1', full_name='PDB_Base.DBSystemEmailInfo.uiRewardType1', index=6,
number=7, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='uiRewardCount1', full_name='PDB_Base.DBSystemEmailInfo.uiRewardCount1', index=7,
number=8, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='uiRewardType2', full_name='PDB_Base.DBSystemEmailInfo.uiRewardType2', index=8,
number=9, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='uiRewardCount2', full_name='PDB_Base.DBSystemEmailInfo.uiRewardCount2', index=9,
number=10, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='uiRewardType3', full_name='PDB_Base.DBSystemEmailInfo.uiRewardType3', index=10,
number=11, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='uiRewardCount3', full_name='PDB_Base.DBSystemEmailInfo.uiRewardCount3', index=11,
number=12, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='uiRewardType4', full_name='PDB_Base.DBSystemEmailInfo.uiRewardType4', index=12,
number=13, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='uiRewardCount4', full_name='PDB_Base.DBSystemEmailInfo.uiRewardCount4', index=13,
number=14, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='uiRewardType5', full_name='PDB_Base.DBSystemEmailInfo.uiRewardType5', index=14,
number=15, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='uiRewardCount5', full_name='PDB_Base.DBSystemEmailInfo.uiRewardCount5', index=15,
number=16, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1510,
serialized_end=1869,
)
_DBSYSTEMMSGINFO = _descriptor.Descriptor(
name='DBSystemMsgInfo',
full_name='PDB_Base.DBSystemMsgInfo',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='uiId', full_name='PDB_Base.DBSystemMsgInfo.uiId', index=0,
number=1, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='sMsgBody', full_name='PDB_Base.DBSystemMsgInfo.sMsgBody', index=1,
number=2, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='ulTime', full_name='PDB_Base.DBSystemMsgInfo.ulTime', index=2,
number=3, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1871,
serialized_end=1936,
)
_DBUSERCLUBMATCHRECODE = _descriptor.Descriptor(
name='DBUserClubMatchRecode',
full_name='PDB_Base.DBUserClubMatchRecode',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='ulUid', full_name='PDB_Base.DBUserClubMatchRecode.ulUid', index=0,
number=1, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='sMatchRecode', full_name='PDB_Base.DBUserClubMatchRecode.sMatchRecode', index=1,
number=2, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1938,
serialized_end=1998,
)
_DBUSERCUSTOMMATCHRECODE = _descriptor.Descriptor(
name='DBUserCustomMatchRecode',
full_name='PDB_Base.DBUserCustomMatchRecode',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='ulUid', full_name='PDB_Base.DBUserCustomMatchRecode.ulUid', index=0,
number=1, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='sMatchRecode', full_name='PDB_Base.DBUserCustomMatchRecode.sMatchRecode', index=1,
number=2, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2000,
serialized_end=2062,
)
_DBUSEREMAILRECHARGERECORD = _descriptor.Descriptor(
name='DBUserEmailRechargeRecord',
full_name='PDB_Base.DBUserEmailRechargeRecord',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='ulId', full_name='PDB_Base.DBUserEmailRechargeRecord.ulId', index=0,
number=1, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='ulSendUid', full_name='PDB_Base.DBUserEmailRechargeRecord.ulSendUid', index=1,
number=2, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='ulRecvUid', full_name='PDB_Base.DBUserEmailRechargeRecord.ulRecvUid', index=2,
number=3, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='charRechargeTime', full_name='PDB_Base.DBUserEmailRechargeRecord.charRechargeTime', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='ulProductAddCount', full_name='PDB_Base.DBUserEmailRechargeRecord.ulProductAddCount', index=4,
number=5, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='ulEmailType', full_name='PDB_Base.DBUserEmailRechargeRecord.ulEmailType', index=5,
number=6, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2065,
serialized_end=2218,
)
_DBUSERINFO = _descriptor.Descriptor(
name='DBUserInfo',
full_name='PDB_Base.DBUserInfo',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='ulUid', full_name='PDB_Base.DBUserInfo.ulUid', index=0,
number=1, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='charNickName', full_name='PDB_Base.DBUserInfo.charNickName', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='uiSex', full_name='PDB_Base.DBUserInfo.uiSex', index=2,
number=3, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='ulGoldNum', full_name='PDB_Base.DBUserInfo.ulGoldNum', index=3,
number=4, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='ulDiamondNum', full_name='PDB_Base.DBUserInfo.ulDiamondNum', index=4,
number=5, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='uiRobot', full_name='PDB_Base.DBUserInfo.uiRobot', index=5,
number=6, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='uiMatchWinCount', full_name='PDB_Base.DBUserInfo.uiMatchWinCount', index=6,
number=7, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='uiMatchLoseCount', full_name='PDB_Base.DBUserInfo.uiMatchLoseCount', index=7,
number=8, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='ulCustomRoomPoint', full_name='PDB_Base.DBUserInfo.ulCustomRoomPoint', index=8,
number=9, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='sHeadImgurl', full_name='PDB_Base.DBUserInfo.sHeadImgurl', index=9,
number=10, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='uiIdentity', full_name='PDB_Base.DBUserInfo.uiIdentity', index=10,
number=11, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='ulClub', full_name='PDB_Base.DBUserInfo.ulClub', index=11,
number=12, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='uiAccountStatus', full_name='PDB_Base.DBUserInfo.uiAccountStatus', index=12,
number=13, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='ulAccountStatusChangeTime', full_name='PDB_Base.DBUserInfo.ulAccountStatusChangeTime', index=13,
number=14, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='sContributions', full_name='PDB_Base.DBUserInfo.sContributions', index=14,
number=15, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='ulContributionChangeTime', full_name='PDB_Base.DBUserInfo.ulContributionChangeTime', index=15,
number=16, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='sRankExtend', full_name='PDB_Base.DBUserInfo.sRankExtend', index=16,
number=17, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='ulLoginTime', full_name='PDB_Base.DBUserInfo.ulLoginTime', index=17,
number=18, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2221,
serialized_end=2638,
)
_DBUSERRECHARGERECORD = _descriptor.Descriptor(
name='DBUserRechargeRecord',
full_name='PDB_Base.DBUserRechargeRecord',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='ulServerId', full_name='PDB_Base.DBUserRechargeRecord.ulServerId', index=0,
number=1, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='ulUid', full_name='PDB_Base.DBUserRechargeRecord.ulUid', index=1,
number=2, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='uiProductId', full_name='PDB_Base.DBUserRechargeRecord.uiProductId', index=2,
number=3, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='uiProductType', full_name='PDB_Base.DBUserRechargeRecord.uiProductType', index=3,
number=4, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='uiProductPrice', full_name='PDB_Base.DBUserRechargeRecord.uiProductPrice', index=4,
number=5, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='ulProductAddCount', full_name='PDB_Base.DBUserRechargeRecord.ulProductAddCount', index=5,
number=6, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='charRechargeTime', full_name='PDB_Base.DBUserRechargeRecord.charRechargeTime', index=6,
number=7, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='ulRechargeShowTime', full_name='PDB_Base.DBUserRechargeRecord.ulRechargeShowTime', index=7,
number=8, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='uiIsSandbox', full_name='PDB_Base.DBUserRechargeRecord.uiIsSandbox', index=8,
number=9, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='uiIsCaptain', full_name='PDB_Base.DBUserRechargeRecord.uiIsCaptain', index=9,
number=10, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='uiPlatform', full_name='PDB_Base.DBUserRechargeRecord.uiPlatform', index=10,
number=11, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2641,
serialized_end=2909,
)
_DBUSERWEBRECHARGERECORD = _descriptor.Descriptor(
name='DBUserWebRechargeRecord',
full_name='PDB_Base.DBUserWebRechargeRecord',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='uiId', full_name='PDB_Base.DBUserWebRechargeRecord.uiId', index=0,
number=1, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='ulUid', full_name='PDB_Base.DBUserWebRechargeRecord.ulUid', index=1,
number=2, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='uiPlatType', full_name='PDB_Base.DBUserWebRechargeRecord.uiPlatType', index=2,
number=3, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='charRechargeTime', full_name='PDB_Base.DBUserWebRechargeRecord.charRechargeTime', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='ulProductAddCount', full_name='PDB_Base.DBUserWebRechargeRecord.ulProductAddCount', index=4,
number=5, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='ulRMB', full_name='PDB_Base.DBUserWebRechargeRecord.ulRMB', index=5,
number=6, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='charThirdOrderId', full_name='PDB_Base.DBUserWebRechargeRecord.charThirdOrderId', index=6,
number=7, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2912,
serialized_end=3080,
)
DESCRIPTOR.message_types_by_name['DBChallengeMode'] = _DBCHALLENGEMODE
DESCRIPTOR.message_types_by_name['DBChallengeModeRankRecord'] = _DBCHALLENGEMODERANKRECORD
DESCRIPTOR.message_types_by_name['DBChallengeModeRecord'] = _DBCHALLENGEMODERECORD
DESCRIPTOR.message_types_by_name['DBChallengeMode_copy'] = _DBCHALLENGEMODE_COPY
DESCRIPTOR.message_types_by_name['DBClubCreateMatch'] = _DBCLUBCREATEMATCH
DESCRIPTOR.message_types_by_name['DBGameClub'] = _DBGAMECLUB
DESCRIPTOR.message_types_by_name['DBGameLogInfo'] = _DBGAMELOGINFO
DESCRIPTOR.message_types_by_name['DBPlayerClubInfo'] = _DBPLAYERCLUBINFO
DESCRIPTOR.message_types_by_name['DBPlayerEmailInfo'] = _DBPLAYEREMAILINFO
DESCRIPTOR.message_types_by_name['DBPlayerSignIn'] = _DBPLAYERSIGNIN
DESCRIPTOR.message_types_by_name['DBPlayerTaskInfo'] = _DBPLAYERTASKINFO
DESCRIPTOR.message_types_by_name['DBSystemEmailInfo'] = _DBSYSTEMEMAILINFO
DESCRIPTOR.message_types_by_name['DBSystemMsgInfo'] = _DBSYSTEMMSGINFO
DESCRIPTOR.message_types_by_name['DBUserClubMatchRecode'] = _DBUSERCLUBMATCHRECODE
DESCRIPTOR.message_types_by_name['DBUserCustomMatchRecode'] = _DBUSERCUSTOMMATCHRECODE
DESCRIPTOR.message_types_by_name['DBUserEmailRechargeRecord'] = _DBUSEREMAILRECHARGERECORD
DESCRIPTOR.message_types_by_name['DBUserInfo'] = _DBUSERINFO
DESCRIPTOR.message_types_by_name['DBUserRechargeRecord'] = _DBUSERRECHARGERECORD
DESCRIPTOR.message_types_by_name['DBUserWebRechargeRecord'] = _DBUSERWEBRECHARGERECORD
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
DBChallengeMode = _reflection.GeneratedProtocolMessageType('DBChallengeMode', (_message.Message,), dict(
DESCRIPTOR = _DBCHALLENGEMODE,
__module__ = 'DB_Base_pb2'
# @@protoc_insertion_point(class_scope:PDB_Base.DBChallengeMode)
))
_sym_db.RegisterMessage(DBChallengeMode)
DBChallengeModeRankRecord = _reflection.GeneratedProtocolMessageType('DBChallengeModeRankRecord', (_message.Message,), dict(
DESCRIPTOR = _DBCHALLENGEMODERANKRECORD,
__module__ = 'DB_Base_pb2'
# @@protoc_insertion_point(class_scope:PDB_Base.DBChallengeModeRankRecord)
))
_sym_db.RegisterMessage(DBChallengeModeRankRecord)
DBChallengeModeRecord = _reflection.GeneratedProtocolMessageType('DBChallengeModeRecord', (_message.Message,), dict(
DESCRIPTOR = _DBCHALLENGEMODERECORD,
__module__ = 'DB_Base_pb2'
# @@protoc_insertion_point(class_scope:PDB_Base.DBChallengeModeRecord)
))
_sym_db.RegisterMessage(DBChallengeModeRecord)
DBChallengeMode_copy = _reflection.GeneratedProtocolMessageType('DBChallengeMode_copy', (_message.Message,), dict(
DESCRIPTOR = _DBCHALLENGEMODE_COPY,
__module__ = 'DB_Base_pb2'
# @@protoc_insertion_point(class_scope:PDB_Base.DBChallengeMode_copy)
))
_sym_db.RegisterMessage(DBChallengeMode_copy)
DBClubCreateMatch = _reflection.GeneratedProtocolMessageType('DBClubCreateMatch', (_message.Message,), dict(
DESCRIPTOR = _DBCLUBCREATEMATCH,
__module__ = 'DB_Base_pb2'
# @@protoc_insertion_point(class_scope:PDB_Base.DBClubCreateMatch)
))
_sym_db.RegisterMessage(DBClubCreateMatch)
DBGameClub = _reflection.GeneratedProtocolMessageType('DBGameClub', (_message.Message,), dict(
DESCRIPTOR = _DBGAMECLUB,
__module__ = 'DB_Base_pb2'
# @@protoc_insertion_point(class_scope:PDB_Base.DBGameClub)
))
_sym_db.RegisterMessage(DBGameClub)
DBGameLogInfo = _reflection.GeneratedProtocolMessageType('DBGameLogInfo', (_message.Message,), dict(
DESCRIPTOR = _DBGAMELOGINFO,
__module__ = 'DB_Base_pb2'
# @@protoc_insertion_point(class_scope:PDB_Base.DBGameLogInfo)
))
_sym_db.RegisterMessage(DBGameLogInfo)
DBPlayerClubInfo = _reflection.GeneratedProtocolMessageType('DBPlayerClubInfo', (_message.Message,), dict(
DESCRIPTOR = _DBPLAYERCLUBINFO,
__module__ = 'DB_Base_pb2'
# @@protoc_insertion_point(class_scope:PDB_Base.DBPlayerClubInfo)
))
_sym_db.RegisterMessage(DBPlayerClubInfo)
DBPlayerEmailInfo = _reflection.GeneratedProtocolMessageType('DBPlayerEmailInfo', (_message.Message,), dict(
DESCRIPTOR = _DBPLAYEREMAILINFO,
__module__ = 'DB_Base_pb2'
# @@protoc_insertion_point(class_scope:PDB_Base.DBPlayerEmailInfo)
))
_sym_db.RegisterMessage(DBPlayerEmailInfo)
DBPlayerSignIn = _reflection.GeneratedProtocolMessageType('DBPlayerSignIn', (_message.Message,), dict(
DESCRIPTOR = _DBPLAYERSIGNIN,
__module__ = 'DB_Base_pb2'
# @@protoc_insertion_point(class_scope:PDB_Base.DBPlayerSignIn)
))
_sym_db.RegisterMessage(DBPlayerSignIn)
DBPlayerTaskInfo = _reflection.GeneratedProtocolMessageType('DBPlayerTaskInfo', (_message.Message,), dict(
DESCRIPTOR = _DBPLAYERTASKINFO,
__module__ = 'DB_Base_pb2'
# @@protoc_insertion_point(class_scope:PDB_Base.DBPlayerTaskInfo)
))
_sym_db.RegisterMessage(DBPlayerTaskInfo)
DBSystemEmailInfo = _reflection.GeneratedProtocolMessageType('DBSystemEmailInfo', (_message.Message,), dict(
DESCRIPTOR = _DBSYSTEMEMAILINFO,
__module__ = 'DB_Base_pb2'
# @@protoc_insertion_point(class_scope:PDB_Base.DBSystemEmailInfo)
))
_sym_db.RegisterMessage(DBSystemEmailInfo)
DBSystemMsgInfo = _reflection.GeneratedProtocolMessageType('DBSystemMsgInfo', (_message.Message,), dict(
DESCRIPTOR = _DBSYSTEMMSGINFO,
__module__ = 'DB_Base_pb2'
# @@protoc_insertion_point(class_scope:PDB_Base.DBSystemMsgInfo)
))
_sym_db.RegisterMessage(DBSystemMsgInfo)
DBUserClubMatchRecode = _reflection.GeneratedProtocolMessageType('DBUserClubMatchRecode', (_message.Message,), dict(
DESCRIPTOR = _DBUSERCLUBMATCHRECODE,
__module__ = 'DB_Base_pb2'
# @@protoc_insertion_point(class_scope:PDB_Base.DBUserClubMatchRecode)
))
_sym_db.RegisterMessage(DBUserClubMatchRecode)
DBUserCustomMatchRecode = _reflection.GeneratedProtocolMessageType('DBUserCustomMatchRecode', (_message.Message,), dict(
DESCRIPTOR = _DBUSERCUSTOMMATCHRECODE,
__module__ = 'DB_Base_pb2'
# @@protoc_insertion_point(class_scope:PDB_Base.DBUserCustomMatchRecode)
))
_sym_db.RegisterMessage(DBUserCustomMatchRecode)
DBUserEmailRechargeRecord = _reflection.GeneratedProtocolMessageType('DBUserEmailRechargeRecord', (_message.Message,), dict(
DESCRIPTOR = _DBUSEREMAILRECHARGERECORD,
__module__ = 'DB_Base_pb2'
# @@protoc_insertion_point(class_scope:PDB_Base.DBUserEmailRechargeRecord)
))
_sym_db.RegisterMessage(DBUserEmailRechargeRecord)
DBUserInfo = _reflection.GeneratedProtocolMessageType('DBUserInfo', (_message.Message,), dict(
DESCRIPTOR = _DBUSERINFO,
__module__ = 'DB_Base_pb2'
# @@protoc_insertion_point(class_scope:PDB_Base.DBUserInfo)
))
_sym_db.RegisterMessage(DBUserInfo)
DBUserRechargeRecord = _reflection.GeneratedProtocolMessageType('DBUserRechargeRecord', (_message.Message,), dict(
DESCRIPTOR = _DBUSERRECHARGERECORD,
__module__ = 'DB_Base_pb2'
# @@protoc_insertion_point(class_scope:PDB_Base.DBUserRechargeRecord)
))
_sym_db.RegisterMessage(DBUserRechargeRecord)
DBUserWebRechargeRecord = _reflection.GeneratedProtocolMessageType('DBUserWebRechargeRecord', (_message.Message,), dict(
DESCRIPTOR = _DBUSERWEBRECHARGERECORD,
__module__ = 'DB_Base_pb2'
# @@protoc_insertion_point(class_scope:PDB_Base.DBUserWebRechargeRecord)
))
_sym_db.RegisterMessage(DBUserWebRechargeRecord)
# @@protoc_insertion_point(module_scope)
| [
"captainl1993@126.com"
] | captainl1993@126.com |
27ed5b0a4dc4008cf89eac733cd2a6199926ee55 | f249d2536ac5d0320c353b897ae864843bcd1452 | /cma/constraints_handler.py | bc547485f0149199cce7b9dd5b05ccb2a481607e | [
"BSD-3-Clause"
] | permissive | shikhar-1/RL-policy-improvement | 4c488b9a530931b303c0664d121baf5dd3668276 | c244c21658134eae1806fc4e4734cf33de7f0e00 | refs/heads/master | 2023-01-04T06:30:35.854106 | 2020-11-04T00:08:31 | 2020-11-04T00:08:31 | 265,949,806 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 20,396 | py | # -*- coding: utf-8 -*-
"""A collection of boundary and (in future) constraints handling classes.
"""
from __future__ import absolute_import, division, print_function #, unicode_literals
# __package__ = 'cma'
import numpy as np
from .utilities.utils import rglen
# from .utilities.math import Mh
from .transformations import BoxConstraintsLinQuadTransformation
from .utilities.python3for2 import range
del absolute_import, division, print_function #, unicode_literals
class BoundaryHandlerBase(object):
"""quick hack versatile base class"""
def __init__(self, bounds):
"""bounds are not copied, but possibly modified and
put into a normalized form: ``bounds`` can be ``None``
or ``[lb, ub]`` where ``lb`` and ``ub`` are
either None or a vector (which can have ``None`` entries).
Generally, the last entry is recycled to compute bounds
for any dimension.
"""
if bounds in [None, (), []]:
self.bounds = None
else:
if not isinstance(bounds, (tuple, list)) or len(bounds) != 2:
raise ValueError(
"bounds must be None, empty, or a list of length 2"
" where each element may be a scalar, list, array,"
" or None; type(bounds) was: %s" % str(type(bounds)))
l = [None, None] # figure out lengths
for i in [0, 1]:
try:
l[i] = len(bounds[i])
except TypeError:
bounds[i] = [bounds[i]]
l[i] = 1
if all([bounds[i][j] is None or not np.isfinite(bounds[i][j])
for j in rglen(bounds[i])]):
bounds[i] = None
if bounds[i] is not None and any([bounds[i][j] == (-1)**i * np.inf
for j in rglen(bounds[i])]):
raise ValueError('lower/upper is +inf/-inf and ' +
'therefore no finite feasible solution is available')
self.bounds = bounds
def __call__(self, solutions, *args, **kwargs):
"""return penalty or list of penalties, by default zero(s).
This interface seems too specifically tailored to the derived
BoundPenalty class, it should maybe change.
"""
if np.isscalar(solutions[0]):
return 0.0
else:
return len(solutions) * [0.0]
def update(self, *args, **kwargs):
"""end-iteration callback of boundary handler (abstract/empty)"""
return self
def repair(self, x, copy_if_changed=True):
"""projects infeasible values on the domain bound, might be
overwritten by derived class """
copy = copy_if_changed
if self.bounds is None:
return x
for ib in [0, 1]:
if self.bounds[ib] is None:
continue
for i in rglen(x):
idx = min([i, len(self.bounds[ib]) - 1])
if self.bounds[ib][idx] is not None and \
(-1)**ib * x[i] < (-1)**ib * self.bounds[ib][idx]:
if copy:
x = np.array(x, copy=True)
copy = False
x[i] = self.bounds[ib][idx]
def inverse(self, y, copy_if_changed=True):
"""inverse of repair if it exists, at least it should hold
``repair == repair o inverse o repair``"""
return y
def get_bounds(self, which, dimension):
"""``get_bounds('lower', 8)`` returns the lower bounds in 8-D"""
if which in ['lower', 0, '0']:
return self._get_bounds(0, dimension)
elif which in ['upper', 1, '1']:
return self._get_bounds(1, dimension)
else:
raise ValueError("argument which must be 'lower' or 'upper'")
def _get_bounds(self, ib, dimension):
"""ib == 0/1 means lower/upper bound, return a vector of length
`dimension` """
sign_ = 2 * ib - 1
assert sign_**2 == 1
if self.bounds is None or self.bounds[ib] is None:
return np.array(dimension * [sign_ * np.Inf])
res = []
for i in range(dimension):
res.append(self.bounds[ib][min([i, len(self.bounds[ib]) - 1])])
if res[-1] is None:
res[-1] = sign_ * np.Inf
return np.array(res)
def has_bounds(self):
"""return `True` if any variable is bounded"""
bounds = self.bounds
if bounds is None or all(b is None for b in bounds):
return False
for ib, bound in enumerate(bounds):
if bound is not None:
sign_ = 2 * ib - 1
for bound_i in bound:
if bound_i is not None and sign_ * bound_i < np.inf:
return True
return False
def is_in_bounds(self, x):
"""not yet tested"""
if self.bounds is None:
return True
for ib in [0, 1]:
if self.bounds[ib] is None:
continue
for i in rglen(x):
idx = min([i, len(self.bounds[ib]) - 1])
if self.bounds[ib][idx] is not None and \
(-1)**ib * x[i] < (-1)**ib * self.bounds[ib][idx]:
return False
return True
def to_dim_times_two(self, bounds):
"""return boundaries in format ``[[lb0, ub0], [lb1, ub1], ...]``,
as used by ``BoxConstraints...`` class.
"""
if not bounds:
b = [[None, None]]
else:
l = [None, None] # figure out lenths
for i in [0, 1]:
try:
l[i] = len(bounds[i])
except TypeError:
bounds[i] = [bounds[i]]
l[i] = 1
if l[0] != l[1] and 1 not in l and None not in (
bounds[0][-1], bounds[1][-1]): # disallow different lengths
raise ValueError(
"lower and upper bounds must have the same length\n"
"or length one or `None` as last element (the last"
" element is always recycled).\n"
"Lengths were %s"
% str(l))
b = [] # bounds in different format
try:
for i in range(max(l)):
b.append([bounds[0][min((i, l[0] - 1))],
bounds[1][min((i, l[1] - 1))]])
except (TypeError, IndexError):
print("boundaries must be provided in the form " +
"[scalar_of_vector, scalar_or_vector]")
raise
return b
class BoundNone(BoundaryHandlerBase):
"""no boundaries"""
def __init__(self, bounds=None):
if bounds is not None:
raise ValueError()
# BoundaryHandlerBase.__init__(self, None)
super(BoundNone, self).__init__(None)
def is_in_bounds(self, x):
return True
class BoundTransform(BoundaryHandlerBase):
"""Handle boundaries by a smooth, piecewise linear and quadratic
transformation into the feasible domain.
>>> import numpy as np
>>> import cma
>>> from cma.constraints_handler import BoundTransform
>>> from cma import fitness_transformations as ft
>>> veq = cma.utilities.math.Mh.vequals_approximately
>>> b = BoundTransform([None, 1])
>>> assert b.bounds == [[None], [1]]
>>> assert veq(b.repair([0, 1, 1.2]), np.array([ 0., 0.975, 0.975]))
>>> assert b.is_in_bounds([0, 0.5, 1])
>>> assert veq(b.transform([0, 1, 2]), [ 0. , 0.975, 0.2 ])
>>> bounded_sphere = ft.ComposedFunction([
... cma.ff.sphere,
... BoundTransform([[], 5 * [-1] + [np.inf]]).transform
... ])
>>> o1 = cma.fmin(bounded_sphere, 6 * [-2], 0.5) # doctest: +ELLIPSIS
(4_w,9)-aCMA-ES (mu_w=2.8,w_1=49%) in dimension 6 (seed=...
>>> o2 = cma.fmin(cma.ff.sphere, 6 * [-2], 0.5, options={
... 'BoundaryHandler': cma.s.ch.BoundTransform,
... 'bounds': [[], 5 * [-1] + [np.inf]] }) # doctest: +ELLIPSIS
(4_w,9)-aCMA-ES (mu_w=2.8,w_1=49%) in dimension 6 (seed=...
>>> assert o1[1] < 5 + 1e-8 and o2[1] < 5 + 1e-8
>>> b = BoundTransform([-np.random.rand(120), np.random.rand(120)])
>>> for i in range(0, 100, 9):
... x = (-i-1) * np.random.rand(120) + i * np.random.randn(120)
... x_to_b = b.repair(x)
... x2 = b.inverse(x_to_b)
... x2_to_b = b.repair(x2)
... x3 = b.inverse(x2_to_b)
... x3_to_b = b.repair(x3)
... assert veq(x_to_b, x2_to_b)
... assert veq(x2, x3)
... assert veq(x2_to_b, x3_to_b)
Details: this class uses ``class BoxConstraintsLinQuadTransformation``
"""
def __init__(self, bounds=None):
"""Argument bounds can be `None` or ``bounds[0]`` and ``bounds[1]``
are lower and upper domain boundaries, each is either `None` or
a scalar or a list or array of appropriate size.
"""
# BoundaryHandlerBase.__init__(self, bounds)
super(BoundTransform, self).__init__(bounds)
self.bounds_tf = BoxConstraintsLinQuadTransformation(self.to_dim_times_two(bounds))
def repair(self, x, copy_if_changed=True):
"""transforms ``x`` into the bounded domain.
"""
copy = copy_if_changed
if self.bounds is None or (self.bounds[0] is None and
self.bounds[1] is None):
return x
return np.asarray(self.bounds_tf(x, copy))
def transform(self, x):
return self.repair(x)
def inverse(self, x, copy_if_changed=True):
"""inverse transform of ``x`` from the bounded domain.
"""
if self.bounds is None or (self.bounds[0] is None and
self.bounds[1] is None):
return x
return np.asarray(self.bounds_tf.inverse(x, copy_if_changed)) # this doesn't exist
class BoundPenalty(BoundaryHandlerBase):
"""Compute a bound penalty and update coordinate-wise penalty weights.
An instance must be updated each iteration using the `update` method.
Details:
- The penalty computes like ``sum(w[i] * (x[i]-xfeas[i])**2)``,
where ``xfeas`` is the closest feasible (in-bounds) solution from
``x``. The weight ``w[i]`` should be updated during each iteration
using the update method.
Example how this boundary handler is used with `cma.fmin` via the
options (`CMAOptions`) of the class `cma.CMAEvolutionStrategy`:
>>> import cma
>>> res = cma.fmin(cma.ff.elli, 6 * [1], 1,
... {'BoundaryHandler': cma.BoundPenalty,
... 'bounds': [-1, 1],
... 'fixed_variables': {0: 0.012, 2:0.234}
... }) # doctest: +ELLIPSIS
(4_w,8)-aCMA-ES (mu_w=2.6,w_1=52%) in dimension 4 (seed=...
>>> assert res[1] < 13.76
Reference: Hansen et al 2009, A Method for Handling Uncertainty...
IEEE TEC, with addendum, see
http://www.lri.fr/~hansen/TEC2009online.pdf
**todo**: implement a more generic interface, where this becomes a
fitness wrapper which adds the desired penalty and the `update`
method is used as callback argument for `fmin` like::
f = cma.BoundPenalty(cma.ff.elli, bounds=[-1, 1])
res = cma.fmin(f, 6 * [1], callback=f.update)
where callback functions should receive the same arguments as
`tell`, namely an `CMAEvolutionStrategy` instance, an array of the
current solutions and their respective f-values. Such change is
relatively involved. Consider also that bounds are related with the
geno- to phenotype transformation.
"""
def __init__(self, bounds=None):
"""Argument bounds can be `None` or ``bounds[0]`` and ``bounds[1]``
are lower and upper domain boundaries, each is either `None` or
a scalar or a `list` or `np.array` of appropriate size.
"""
# #
# bounds attribute reminds the domain boundary values
# BoundaryHandlerBase.__init__(self, bounds)
super(BoundPenalty, self).__init__(bounds)
self.gamma = 1 # a very crude assumption
self.weights_initialized = False # gamma becomes a vector after initialization
self.hist = [] # delta-f history
def repair(self, x, copy_if_changed=True):
"""sets out-of-bounds components of ``x`` on the bounds.
"""
# TODO (old data): CPU(N,lam,iter=20,200,100): 3.3s of 8s for two bounds, 1.8s of 6.5s for one bound
# remark: np.max([bounds[0], x]) is about 40 times slower than max((bounds[0], x))
copy = copy_if_changed
bounds = self.bounds
if bounds not in (None, [None, None], (None, None)): # solely for effiency
if copy:
x = np.array(x, copy=True)
if bounds[0] is not None:
if np.isscalar(bounds[0]):
for i in rglen(x):
x[i] = max((bounds[0], x[i]))
else:
for i in rglen(x):
j = min([i, len(bounds[0]) - 1])
if bounds[0][j] is not None:
x[i] = max((bounds[0][j], x[i]))
if bounds[1] is not None:
if np.isscalar(bounds[1]):
for i in rglen(x):
x[i] = min((bounds[1], x[i]))
else:
for i in rglen(x):
j = min((i, len(bounds[1]) - 1))
if bounds[1][j] is not None:
x[i] = min((bounds[1][j], x[i]))
return x
# ____________________________________________________________
#
def __call__(self, x, archive, gp):
"""returns the boundary violation penalty for `x`,
where `x` is a single solution or a list or np.array of solutions.
"""
if x in (None, (), []):
return x
if self.bounds in (None, [None, None], (None, None)):
return 0.0 if np.isscalar(x[0]) else [0.0] * len(x) # no penalty
x_is_single_vector = np.isscalar(x[0])
if x_is_single_vector:
x = [x]
# add fixed variables to self.gamma
try:
gamma = list(self.gamma) # fails if self.gamma is a scalar
for i in sorted(gp.fixed_values): # fails if fixed_values is None
gamma.insert(i, 0.0)
gamma = np.array(gamma, copy=False)
except TypeError:
gamma = self.gamma
pen = []
for xi in x:
# CAVE: this does not work with already repaired values!!
# CPU(N,lam,iter=20,200,100)?: 3s of 10s, np.array(xi): 1s
# remark: one deep copy can be prevented by xold = xi first
xpheno = gp.pheno(archive[xi]['geno'])
# necessary, because xi was repaired to be in bounds
xinbounds = self.repair(xpheno)
# could be omitted (with unpredictable effect in case of external repair)
fac = 1 # exp(0.1 * (log(self.scal) - np.mean(self.scal)))
pen.append(sum(gamma * ((xinbounds - xpheno) / fac)**2) / len(xi))
return pen[0] if x_is_single_vector else pen
# ____________________________________________________________
#
def feasible_ratio(self, solutions):
"""counts for each coordinate the number of feasible values in
``solutions`` and returns an `np.array` of length
``len(solutions[0])`` with the ratios.
"""
raise NotImplementedError
# ____________________________________________________________
#
def update(self, function_values, es):
"""updates the weights for computing a boundary penalty.
Arguments
=========
``function_values``:
all function values of recent population of solutions
``es``:
`CMAEvolutionStrategy` object instance, in particular
mean and variances and the methods from the attribute
`gp` of type `GenoPheno` are used.
"""
if self.bounds is None or (self.bounds[0] is None and
self.bounds[1] is None):
return self
N = es.N
# ## prepare
# compute varis = sigma**2 * C_ii
if 11 < 3: # old
varis = es.sigma**2 * np.array(N * [es.C] if np.isscalar(es.C) else (# scalar case
es.C if np.isscalar(es.C[0]) else # diagonal matrix case
[es.C[i][i] for i in range(N)])) # full matrix case
else:
varis = es.sigma**2 * es.sm.variances
# relative violation in geno-space
dmean = (es.mean - es.gp.geno(self.repair(es.gp.pheno(es.mean)))) / varis**0.5
# ## Store/update a history of delta fitness value
fvals = sorted(function_values)
l = 1 + len(fvals)
val = fvals[3 * l // 4] - fvals[l // 4] # exact interquartile range apart interpolation
val = val / np.mean(varis) # new: val is normalized with sigma of the same iteration
# insert val in history
if np.isfinite(val) and val > 0:
self.hist.insert(0, val)
elif val == np.inf and len(self.hist) > 1:
self.hist.insert(0, max(self.hist))
else:
pass # ignore 0 or nan values
if len(self.hist) > 20 + (3 * N) / es.popsize:
self.hist.pop()
# ## prepare
dfit = np.median(self.hist) # median interquartile range
damp = min(1, es.sp.weights.mueff / 10. / N)
# ## set/update weights
# Throw initialization error
if len(self.hist) == 0:
raise ValueError('wrongful initialization, no feasible solution sampled. ' +
'Reasons can be mistakenly set bounds (lower bound not smaller than upper bound) or a too large initial sigma0 or... ' +
'See description of argument func in help(cma.fmin) or an example handling infeasible solutions in help(cma.CMAEvolutionStrategy). ')
# initialize weights
if dmean.any() and (not self.weights_initialized or es.countiter == 2): # TODO
self.gamma = np.array(N * [2 * dfit]) ## BUGBUGzzzz: N should be phenotypic (bounds are in phenotype), but is genotypic
self.weights_initialized = True
# update weights gamma
if self.weights_initialized:
edist = np.array(abs(dmean) - 3 * max(1, N**0.5 / es.sp.weights.mueff))
if 1 < 3: # this is better, around a factor of two
# increase single weights possibly with a faster rate than they can decrease
# value unit of edst is std dev, 3==random walk of 9 steps
self.gamma *= np.exp((edist > 0) * np.tanh(edist / 3) / 2.)**damp
# decrease all weights up to the same level to avoid single extremely small weights
# use a constant factor for pseudo-keeping invariance
self.gamma[self.gamma > 5 * dfit] *= np.exp(-1. / 3)**damp
# self.gamma[idx] *= exp(5*dfit/self.gamma[idx] - 1)**(damp/3)
elif 1 < 3 and (edist > 0).any(): # previous method
# CAVE: min was max in TEC 2009
self.gamma[edist > 0] *= 1.1**min(1, es.sp.weights.mueff / 10. / N)
# max fails on cigtab(N=12,bounds=[0.1,None]):
# self.gamma[edist>0] *= 1.1**max(1, es.sp.weights.mueff/10./N) # this was a bug!?
# self.gamma *= exp((edist>0) * np.tanh(edist))**min(1, es.sp.weights.mueff/10./N)
else: # alternative version, but not better
solutions = es.pop # this has not been checked
r = self.feasible_ratio(solutions) # has to be the averaged over N iterations
self.gamma *= np.exp(np.max([N * [0], 0.3 - r], axis=0))**min(1, es.sp.weights.mueff / 10 / N)
es.more_to_write += list(self.gamma) if self.weights_initialized else N * [1.0]
# ## return penalty
# es.more_to_write = self.gamma if not np.isscalar(self.gamma) else N*[1]
return self # bound penalty values
| [
"shikharsharma@Shikhars-MacBook-Air.local"
] | shikharsharma@Shikhars-MacBook-Air.local |
ea573da5ba54463277dfcb0ea1a4a6b08b967228 | aa0ab3eaee3a04eb39f1819cb411ce9fa2062c14 | /scripts/driver_messaging/proofer_v2.py | 51c3cdca600723c989dfce0fde12a366af95b5b4 | [] | no_license | gilkra/tweet_proofer | 30fd99dd11306e805526044a155a9c34dffc0713 | 04ead63aeb2cb8f0e2a92cc39a731ba926d9b617 | refs/heads/master | 2021-01-12T06:19:58.967602 | 2016-12-25T21:11:27 | 2016-12-25T21:11:27 | 77,342,680 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,202 | py | import sys
import requests
from enchant.checker import SpellChecker
from enchant.tokenize import EmailFilter, URLFilter
from urllib2 import Request, urlopen, URLError
message = raw_input("Please paste the message you'd like to send: ")
#looks for tricky legal terms or combo of terms
def legal_flag(message, wordlist):
message = message.lower()
all_good = True
safety_terms = ['best available', 'industry leading', 'gold standard', 'safest', 'best-in-class']
legal_flag_terms = ["Uber driver", "Uber courier", "Courier" ,
"Uber car" , "Uber vehicle","hire", "Hired", "application", "finish signing up",
"complete your application", "easy application process","Finish your application", "Job",
"career", "work", "entry-level", "Benefits", "Shift", "Supply", "Part-time", "Full-time", "Drive for Uber",
"Discipline", "fired", "Warning", "Punish", "Penalty box", "Wage", "Salary", "Commission", "Bonus",
"your background check is approved", "We need your social security number", "We need you on the road",
"Uber customer", "Uber client", "Surge"]
legal_flag_terms_fixed = [x.lower() for x in legal_flag_terms]
for term in legal_flag_terms_fixed:
if term in message:
all_good = False
print '- The term/phrase "'+term+'" may not be permitted based on the legal guidelines'
if ('guarantee' in message or 'guarantees' in message) and ('rewards' in message or 'reward' in message):
print "- You mentioned both 'guarantee' and 'reward' in your message. Are you sure the message is clear to the driver?"
for phrase in safety_terms:
if phrase in message and ('background check' in message or 'safety' in message):
all_good = False
print "- You mentioned '"+phrase+"' in a message about safety/background checks. This may not be permitted based on the legal guidelines"
for word in wordlist:
if word in ['guarantee','guarantees','rewards','reward', 'earnings boost'] and 't.uber' not in message:
all_good = False
print '- Did you include a link to the terms of the guarantee? Terms should be linked with every guarantee'
return all_good
#tests that t.uber.com url is set to public
def url_set_to_public(url):
all_good = True
response = requests.get(url)
if 'uber.onelogin' in response.url:
print '- Check your t.uber URL. Did you set it to "Public"?'
all_good = False
return all_good
#takes url and tests if valid
def is_valid_url(url):
req = Request(url)
try:
response = urlopen(req)
except URLError, e:
return False
else:
return True
#spellcheck
def spellcheck(message):
all_good = True
wordlist = message.split()
fixed_wordlist = []
for word_index in range(1,len(wordlist)):
if (wordlist[word_index][0].isupper() and
(wordlist[word_index-1][-1] == '.' or
wordlist[word_index-1][-1] == ':' or
wordlist[word_index-1][-1] == '!')) or wordlist[word_index][0].islower():
fixed_wordlist.append(wordlist[word_index])
fixed_wordlist = [x for x in fixed_wordlist if '.co' not in x]
new_message = ' '.join(fixed_wordlist)
d = SpellChecker("en_US", filters=[EmailFilter, URLFilter])
d.set_text(new_message)
for error in d:
all_good = False
print '- Spell check: ', error.word
return all_good
#master tester function
def split_them(message):
wordlist = message.split()
link_counter = 0
exclam_counter = 0
all_good = True
print "Ok, let's have a look here..."
print
for word_index in range(len(wordlist)-1):
next_word = wordlist[word_index+1]
if (wordlist[word_index][-1] == '.' or wordlist[word_index] == 'Uber:') and next_word[0].islower():
all_good = False
print '- The sentence starting with "'+next_word+'" should probably be capitalized'
for word in wordlist:
if '.c' in word.lower() or 't.uber' in word.lower():
for another_word in wordlist:
if "first_name" in another_word.lower() and 160 >= len(message) >= 140:
all_good = False
print '- Long first names may force your URL to go into a second text, making it unclickable. Try cutting a few characters'
if word == wordlist[-1]:
if word[-1] in [',', '.', ';','!']:
all_good = False
print "- No need for punctuation after the link"
link_counter += 1
if link_counter > 1:
all_good = False
print '- You have more than one link! Step up your game'
if 'http' in word:
if is_valid_url(word):
if url_set_to_public(word) == False:
all_good = False
else:
all_good = False
print "- Something's fishy with that URL"
else:
fixed_url = 'http://'+word
if is_valid_url(fixed_url):
if url_set_to_public(fixed_url) == False:
all_good = False
if not is_valid_url(fixed_url):
all_good = False
print "- Something's fishy with that URL"
if word[-1] is '!' and word[-2] is '!':
all_good = False
print "- You're exclaiming super hard right now! Check to make sure you don't have consecutive exclamation marks in your message."
if '!' in word:
exclam_counter += 1
if exclam_counter > 2:
all_good = False
print "- You have more than 2 excalamation marks. Try to find other ways to convey excitement."
if word.lower() in ['bonus', 'commission', 'warning']:
print '- Legal flag: reconsider your use of the word "'+word+'"'
if not 't.uber.com' in word and '.com' in word:
all_good = False
print "- Looks like you didn't shorten your URL. Does it make sense to make a t.uber.com address?"
if wordlist[0] != 'Uber:':
all_good = False
print '- You should start your message with "Uber:"!'
if 'drive for uber' in message.lower() or 'driving for uber' in message.lower():
all_good = False
print '- Legal flag: Partners drive WITH, not FOR Uber'
if 'work for uber' in message.lower() or 'working for uber' in message.lower():
all_good = False
print '- Legal flag: Partners work WITH, not FOR Uber'
if len(message) > 160:
all_good = False
print "- Your message is too long! You're "+str((len(message)-160))+" characters above the 160 character limit."
if spellcheck(message) == False:
all_good = False
if legal_flag(message, wordlist) == False:
all_good = False
if all_good == True:
print '*Looks good to me! Go crush it!*'
print
split_them(message)
| [
"kazimirovg@gmail.com"
] | kazimirovg@gmail.com |
725d730943bf5acde00f52587c2b9b59d8fcc412 | 7068cdc49e1a824bc9687398342fb0f560aa678e | /python/media.py | 63368e96a40153fb5fd60ac16a3c00044847ea93 | [] | no_license | pm0355/Movie-Trailer-Site | 95200d3f29642fae3c01bf86a017b01182e6ca41 | cbdf318b4c6679bf890718fd69b96798c892a993 | refs/heads/master | 2021-01-11T21:08:18.253595 | 2017-01-17T17:24:45 | 2017-01-17T17:24:45 | 79,252,573 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 562 | py | # -*- coding: utf-8 -*-
"""
Created on Sat Jan 14 20:44:21 2017
@author: matti
"""
import webbrowser
class Movie():
"""This class provides a wayto store movie related information"""
valid_ratings=["G","PG","PG-13","R"]
def __init__(self, movie_title, movie_storyline, poster_image, trailer_youtube):
self.title= movie_title
self.storyline= movie_storyline
self.poster_image_url= poster_image
self.trailer_youtube_url=trailer_youtube
def show_trailer(self):
webbrowser.open(self.trailer_youtube_url)
| [
"pm0355a@student.american.edu"
] | pm0355a@student.american.edu |
f5189e11e9fa689aadeb8e36b91851e2371ad9ba | 75b2eced70124bc6a1e71d835ee7a56c2edecd52 | /scintellometry/phasing/uvwcoords.py | 15db38c47c1f4a109874960748fcd0d09886497e | [] | no_license | danasimard/scintellometry | 5c15bb3a2f1b6fa283487c22e9a84453841c1a60 | 222731affb036f695d773c12b343a55400a9bfa1 | refs/heads/master | 2020-12-30T15:21:57.443721 | 2018-03-19T15:13:54 | 2018-03-19T15:13:54 | 91,131,738 | 0 | 0 | null | 2017-05-12T21:42:07 | 2017-05-12T21:42:07 | null | UTF-8 | Python | false | false | 7,041 | py | from __future__ import division, print_function
import numpy as np
from novas.compat import sidereal_time
from astropy.time import Time, TimeDelta
import astropy.units as u
from astropy.coordinates import ICRSCoordinates
from astropy.table import Table
from astropy.constants import c as SPEED_OF_LIGHT
SOURCE = ICRSCoordinates('03h32m59.368s +54d34m43.57s')
OUTFILE = 'outfile.mat'
# first time stamp of all. Maybe should be rounded to minute?
TIME_STAMP0 = '2013 06 29 03 53 00 0.660051'
MAX_NO_IN_SEQ_FILE = 4331
N_BLOCK = MAX_NO_IN_SEQ_FILE - 1
DT_SAMPLE = TimeDelta(0., (3/(200*u.MHz)).to(u.s).value, format='sec')
DT_BLOCK = 2.**24*DT_SAMPLE
TEL_LONGITUDE = 74*u.deg+02*u.arcmin+59.07*u.arcsec
TEL_LATITUDE = 19*u.deg+05*u.arcmin+47.46*u.arcsec
NPOD = 30 # Number of baselines (only used as sanity check)
ANTENNA_FILE = '/home/mhvk/packages/scintellometry/scintellometry/phasing/' \
'antsys.hdr'
OUR_ANTENNA_ORDER = 'CWES' # and by number inside each group
NON_EXISTING_ANTENNAS = ('C07', 'S05') # to remove from antenna file
USE_UT1 = False
if USE_UT1:
IERS_A_FILE = '/home/mhvk/packages/astropy/finals2000A.all'
from astropy.utils.iers import IERS_A
iers_a = IERS_A.open(IERS_A_FILE)
IST_UTC = TimeDelta(0., 5.5/24., format='jd')
def timestamp_to_Time(line):
"""Convert a timestamp item to a astropy Time instance.
Store telescope lon, lat as well for full precision in possible
TDB conversion (not used so far)
"""
tl = line.split()
seconds = float(tl[5])+float(tl[6])
return Time(tl[0] + '-' + tl[1] + '-' + tl[2] + ' ' +
tl[3] + ':' + tl[4] + ':{}'.format(seconds), scale='utc',
lat=TEL_LATITUDE, lon=TEL_LONGITUDE)
def UTC_to_gast(times):
"""Approximate conversion: ignoring UT1-UTC difference."""
gast = np.zeros(len(times))
for i,t in enumerate(times):
gast[i] = sidereal_time(t.utc.jd1, t.utc.jd2,
delta_t=(t.tt.mjd-t.utc.mjd)*24*3600)
return gast*(np.pi/12.)*u.rad
def UT1_to_gast(times):
"""Fairly precise conversion to GAST. Includes unmodelled parts of the
Earth rotation (in UT1), but not yet of polar wander."""
times.delta_ut1_utc = iers_a.ut1_utc(times)
gast = np.zeros(len(times))
for i,t in enumerate(times):
gast[i] = sidereal_time(t.ut1.jd1, t.ut1.jd2,
delta_t=(t.tt.mjd-t.ut1.mjd)*24*3600)
return gast*(np.pi/12.)*u.rad
def get_antenna_coords(filename):
"""Read antenna coordinates from GMRT .hdr file. First store them all
in a dictionary, indexed by the antenna name, remove non-existing
antennas, then get them in the order used in Ue-Li's phasing code,
and finally make it a Table, which is easier to access than a
dictionary. Probably could be done more directly.
"""
with open(filename, 'r') as fh:
antennas = {}
line = fh.readline()
while line != '':
if line[:3] == 'ANT':
al = line.split()
antennas[al[2]] = np.array([float(item) for item in al[3:8]])
line = fh.readline()
for bad in NON_EXISTING_ANTENNAS:
antennas.pop(bad)
antenna_names = order_antenna_names(antennas)
# store all antenna's in a Table
ant_tab = Table()
ant_tab['ant'] = antenna_names
ant_tab['xyz'] = [antennas[ant][:3] for ant in ant_tab['ant']]
ant_tab['delay'] = [antennas[ant][3:] for ant in ant_tab['ant']]
return ant_tab
def order_antenna_names(antennas, order=OUR_ANTENNA_ORDER):
"""Get antenna in the correct order, grouped by C, W, E, S, and
by number within each group.
"""
names = list(antennas)
def cmp_names(x, y):
value_x, value_y = [order.index(t[0])*100+int(t[1:]) for t in x, y]
return -1 if value_x < value_y else 1 if value_x > value_y else 0
names.sort(cmp_names)
return names
def get_uvw(ha, dec, antennas, ref_ant):
"""Get delays in UVW directions between pairs of antenna's for
given hour angle and declination of a source.
"""
h = ha.to(u.rad).value
d = dec.to(u.rad).value
dxyz = antennas['xyz'][ref_ant] - antennas['xyz']
# unit vectors in the U, V, W directions
xyz_u = np.array([-np.sin(d)*np.cos(h), np.sin(d)*np.sin(h), np.cos(d)])
xyz_v = np.array([np.sin(h), np.cos(h), 0.])
xyz_w = np.array([np.cos(d)*np.cos(h), -np.cos(d)*np.sin(h), np.sin(d)])
return np.vstack([(xyz_u*dxyz).sum(1),
(xyz_v*dxyz).sum(1),
(xyz_w*dxyz).sum(1)]).T
if __name__ == '__main__':
# start time in UTC
t0 = timestamp_to_Time(TIME_STAMP0) - IST_UTC
# set of times encomassing the whole scan
times = t0 + DT_BLOCK*np.arange(N_BLOCK)
# precess source coordinate to mid-observation time
tmid = times[len(times)//2]
source = SOURCE.fk5.precess_to(tmid)
# calculate Greenwich Apparent Sidereal Time
if USE_UT1:
gast = UT1_to_gast(times)
else:
gast = UTC_to_gast(times)
# for possible testing
# for t, g in zip(times, gast):
# print("{0:14.8f} {1:11.8f}".format(t.mjd-40000.,
# g.to(u.rad).value*np.pi/12.))
# with Sidereal time, we can calculate the hour hangle
# (annoyingly, which source.ra is in units of angle, cannot subtract
# other angles; this should get better in future versions of astropy)
# Note: HA defined incorrectly before (from c code?)
ha = gast + TEL_LONGITUDE - source.ra.radians * u.rad
# print(times,gast.to(u.deg).value/15.,ha.to(u.deg).value/15. % 24.)
# calculate parallactic angle for possible use in polarimetry
chi = np.arctan2(-np.cos(TEL_LATITUDE.to(u.rad).value) *
np.sin(ha.to(u.rad).value),
np.sin(TEL_LATITUDE.to(u.rad).value) *
np.cos(source.dec.radians) -
np.cos(TEL_LATITUDE.to(u.rad).value) *
np.sin(source.dec.radians) *
np.cos(ha.to(u.rad).value)) * u.rad
# print(times,gast.to(u.deg).value/15.,ha.to(u.deg).value/15. % 24.,
# chi.to(u.deg))
# antennas and their coordinates are will be ordered by OUR_ANTENNA_ORDER
antennas = get_antenna_coords(ANTENNA_FILE)
# sanity check
assert NPOD == len(antennas)
# write out delays for all time stamps, looping over baselines
ref_index = 0 # note, this is not the GMRT default, of 'C02' => index 2
with open(OUTFILE, 'w') as fo:
for h, c in zip(ha, chi):
# get UVW coordinates for this HA
uvw = get_uvw(h, source.dec.radians * u.rad, antennas, ref_index)
# print them by pair
for j in range(len(uvw)):
uvw_us = (uvw[j]*u.m/SPEED_OF_LIGHT).to(u.us).value
fo.write("{:02d} {:02d} {:f} {:f} {:f} {:f}\n".format(
ref_index, j, uvw_us[0], uvw_us[1], uvw_us[2],
c.to(u.rad).value))
| [
"mhvk@astro.utoronto.ca"
] | mhvk@astro.utoronto.ca |
00aded10afb3608226b9a8ecb28391823389c79a | a6e19982bd69fadaea78efc2df4eb2b25261f468 | /src/Python3/Q113017/exsample.py | bb2e0b36202ab900823ddbd2d1aa3ef2c63d18c8 | [
"MIT"
] | permissive | umyuu/Sample | dcb30ca3ee19e4c49a6c9a6a0ff29357222383b5 | 66e8cd725b682db4c9bf93fb80786eea8cbad19d | refs/heads/master | 2021-01-22T21:22:40.228920 | 2018-07-26T17:32:03 | 2018-07-26T17:32:03 | 85,419,771 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 631 | py | # -*- coding: utf8 -*-
import linecache
def split_word_list(line: str) -> list:
word_list = []
word = []
for c in line:
if c == ' ':
print(word)
word_list.append(word)
word = []
continue
word.append(c)
else:
print(word)
word_list.append(word)
return word_list
def main() -> None:
line_no = 1
file_name = r'sample.txt'
target_line = linecache.getline(file_name, line_no)
target_line = 'This is an apple'
word_list = split_word_list(target_line)
print(word_list)
if __name__ == '__main__':
main()
| [
"124dtiaka@gmail.com"
] | 124dtiaka@gmail.com |
896a8d47ade076e584af2548a90dcf1635b3e8c6 | 622d7c9b21cbb0b807a1a16559b6d8b53329e17a | /app/helper_functions/sim_functions.py | f4718a9f60d97046b980717f7c33980ff4893a15 | [] | no_license | spencercweiss/CS4300_Flask_template | 542ac4a220385fddce2926405e4b346fb911a8c4 | 8dd1a0e6b95bc9ed2b249ad993223f42d23f9f29 | refs/heads/master | 2020-03-20T22:03:27.850954 | 2018-04-30T19:49:45 | 2018-04-30T19:49:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,136 | py | import re
import json
import time
import math
from nltk.tokenize import TreebankWordTokenizer
import numpy as np
tokenizer = TreebankWordTokenizer()
def tokenize_transcript(transcripts):
for idx, song in enumerate(transcripts):
lyrics = song["lyrics"]
lyrics = lyrics.replace("\\n", " ").replace("[Hook", " ").replace("[Verse", " ").replace("b", " ", 1)
transcripts[idx]["lyrics"] = re.findall(r"[a-z]+", lyrics.lower())
return transcripts
def build_inverted_index(songs):
indexdict = {}
for i,x in enumerate(songs):
counter_dict = {}
for word in x["lyrics"]:
if word in counter_dict:
counter_dict[word] +=1
else:
counter_dict[word] =1
for term,freq in counter_dict.items():
if term not in indexdict:
indexdict[term] = []
indexdict[term].append((i,counter_dict[term]))
return indexdict
def compute_idf(inv_idx, n_songs, min_df=10, max_df_ratio=0.95):
idf = {}
for i in inv_idx:
if((len(inv_idx[i]) < min_df) or (float(len(inv_idx[i]))/n_songs > max_df_ratio)):
continue
else:
#calculate log base 2
idf[i] = math.log(float(n_songs)/(1 + len(inv_idx[i]))) / math.log(2)
return idf
def computer_doc_norms(inv_idx, idf, n_songs):
norms = np.zeros(n_songs)
for term in idf:
for doc_id, tf in inv_idx[term]:
norms[doc_id] += (tf*idf[term])**2
return np.sqrt(norms)
def song_search(query, index, idf, doc_norms):
querytokens = tokenizer.tokenize(query)
uniquetokens = np.unique(querytokens)
temp = np.zeros(len(doc_norms))
qnorm = 0
for word in uniquetokens:
if word in idf:
qnorm = qnorm + (idf[word]*querytokens.count(word))**2
else:
continue
for word in uniquetokens:
if word not in index:
continue
else:
qtf = querytokens.count(word)
for doc, tf in index[word]:
if word in idf:
temp[doc] = temp[doc] + ((tf * qtf)*(idf[word]**2))
qnorm = math.sqrt(qnorm)
temp = np.divide(temp, qnorm)
results = []
for idx, dnorm in enumerate(doc_norms):
if(dnorm != 0):
results.append(((temp[idx]/dnorm), idx))
else:
results.append(((temp[idx]), idx))
return sorted(results, key=lambda x: x[0], reverse=True)
| [
"mszacillo@dhcp-rhodes-1602.redrover.cornell.edu"
] | mszacillo@dhcp-rhodes-1602.redrover.cornell.edu |
f5c550cbbe76f58a8bc5df6d5eeb4e64f6a27e77 | fe178d9e00714ecbce591b94ad6d6bff4328d24f | /minggu-04/praktik/src/ObjectsClass.py | c0cca7465b9b467db006e515a08372bd28c36715 | [] | no_license | gitaperdani/bigdata | 2d7742646009775a1b2429545ac9f1d5d7d25c59 | b521c73109ce6f33e4d2e92c1fff67a0e6270dda | refs/heads/master | 2021-07-16T14:54:18.836030 | 2019-01-07T17:05:56 | 2019-01-07T17:05:56 | 147,477,369 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 184 | py | class MyClass:
"""A simple example class"""
i = 12345
def f(self):
return 'hello world'
x = MyClass()
def __init__(self):
self.data = []
x = MyClass()
| [
"gitaperdani08@gmail.com"
] | gitaperdani08@gmail.com |
b974256e2fe1fc6c4ea41eea116808e4657046aa | 9969704627d7557a15e469f7eb095ad7897bcf35 | /utils.py | 675941403fee7497591f9295aae63a05d09d0d75 | [] | no_license | jr-xing/strainmatLabeler | b75f164975575f810d345c41462a0c073248188b | 9c5d7d0bc4aa877d474c8008fa0a506a4bb9ed86 | refs/heads/main | 2023-05-05T11:43:22.000363 | 2021-05-28T14:53:00 | 2021-05-28T14:53:00 | 320,592,954 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,576 | py | # -*- coding: utf-8 -*-
"""
Created on Wed Jun 17 18:00:57 2020
@author: remus
"""
import numpy as np
import scipy
import scipy.io as sio
def SVDDenoise(mat, rank=3):
u, s, vh = np.linalg.svd(mat, full_matrices=False)
s[rank:] = 0
return u@np.diag(s)@vh
def loadStrainMat(filename):
datamat = sio.loadmat(filename, struct_as_record=False, squeeze_me = True)
EccDatum, tos = None, None
if 'TransmuralStrainInfo' in datamat.keys():
# EccDatum = SVDDenoise(np.flip(datamat['TransmuralStrainInfo'].Ecc.mid.T, axis=0))
# EccDatum = np.flip(datamat['TransmuralStrainInfo'].Ecc.mid.T, axis=0)
EccDatum = datamat['TransmuralStrainInfo'].Ecc.mid.T
# if 'strainMatFullResolution' in datamat.keys():
# strainMetFullResolution = datamat['strainMatFullResolution']
# else:
# strainMetFullResolution = None
try:
strainMatFullResolution = datamat['StrainInfo'].CCmid
# strainMatFullResolution = SVDDenoise(np.flipud(datamat['StrainInfo'].CCmid))
except:
strainMatFullResolution = None
if 'xs' in datamat.keys():
tos = datamat['xs'][::-1]
tos18_Jerry = None
tos126_Jerry = None
elif 'TOSAnalysis' in datamat.keys():
try:
tos = datamat['TOSAnalysis'].TOS[::-1]
except:
tos = None
try:
tos18_Jerry = datamat['TOSAnalysis'].TOS18_Jerry[::-1]
tos126_Jerry = datamat['TOSAnalysis'].TOSfullRes_Jerry[::-1]
except:
tos18_Jerry = None
tos126_Jerry = None
else:
tos = None
tos18_Jerry = None,
tos126_Jerry = None
try:
tos_interp_mid = datamat['TOSAnalysis'].TOSInterploated[datamat['AnalysisInfo'].fv.layerid==3][::-1]
except:
tos_interp_mid = None
# for datum in dataFull:
# datum[config['data']['outputType']] = datum['TOSInterploated'][:,datum['AnalysisFv'].layerid==3]
# if 'TOSInterploated' in datamat.keys():
# tos_interp = datamat['TOSInterploated'][::-1]
# else:
# tos_interp = None
# return EccDatum, tos, strainMetFullResolution, tos_interp_mid, datamat
return {'strainMat': EccDatum, 'TOS': tos, 'TOS18_Jerry': tos18_Jerry, 'TOS126_Jerry': tos126_Jerry,
'strainMatFullResolution': strainMatFullResolution, 'TOSInterpolatedMid': tos_interp_mid, 'datamat': datamat}
def saveTOS2Mat(tos:np.ndarray, filename:str):
sio.savemat(filename, {'xs': tos})
# def saveTOS2Mat(data:np.ndarray, filename:str, tos_only = True):
# if tos_only:
# pass
# else:
# pass
# sio.savemat(filename, {'xs': tos})
from PyQt5 import QtWidgets
def getScreenSize(displayNr = -1):
# https://stackoverflow.com/questions/35887237/current-screen-size-in-python3-with-pyqt5
sizeObject = QtWidgets.QDesktopWidget().screenGeometry(displayNr)
return sizeObject.height(), sizeObject.width()
def _rect_inter_inner(x1, x2):
n1 = x1.shape[0]-1
n2 = x2.shape[0]-1
X1 = np.c_[x1[:-1], x1[1:]]
X2 = np.c_[x2[:-1], x2[1:]]
S1 = np.tile(X1.min(axis=1), (n2, 1)).T
S2 = np.tile(X2.max(axis=1), (n1, 1))
S3 = np.tile(X1.max(axis=1), (n2, 1)).T
S4 = np.tile(X2.min(axis=1), (n1, 1))
return S1, S2, S3, S4
def _rectangle_intersection_(x1, y1, x2, y2):
S1, S2, S3, S4 = _rect_inter_inner(x1, x2)
S5, S6, S7, S8 = _rect_inter_inner(y1, y2)
C1 = np.less_equal(S1, S2)
C2 = np.greater_equal(S3, S4)
C3 = np.less_equal(S5, S6)
C4 = np.greater_equal(S7, S8)
ii, jj = np.nonzero(C1 & C2 & C3 & C4)
return ii, jj
def intersections(x1, y1, x2, y2):
# https://github.com/sukhbinder/intersection
"""
INTERSECTIONS Intersections of curves.
Computes the (x,y) locations where two curves intersect. The curves
can be broken with NaNs or have vertical segments.
usage:
x,y=intersection(x1,y1,x2,y2)
Example:
a, b = 1, 2
phi = np.linspace(3, 10, 100)
x1 = a*phi - b*np.sin(phi)
y1 = a - b*np.cos(phi)
x2=phi
y2=np.sin(phi)+2
x,y,i,j=intersections(x1,y1,x2,y2)
plt.plot(x1,y1,c='r')
plt.plot(x2,y2,c='g')
plt.plot(x,y,'*k')
plt.show()
"""
x1 = np.asarray(x1)
x2 = np.asarray(x2)
y1 = np.asarray(y1)
y2 = np.asarray(y2)
ii, jj = _rectangle_intersection_(x1, y1, x2, y2)
n = len(ii)
dxy1 = np.diff(np.c_[x1, y1], axis=0)
dxy2 = np.diff(np.c_[x2, y2], axis=0)
T = np.zeros((4, n))
AA = np.zeros((4, 4, n))
AA[0:2, 2, :] = -1
AA[2:4, 3, :] = -1
AA[0::2, 0, :] = dxy1[ii, :].T
AA[1::2, 1, :] = dxy2[jj, :].T
BB = np.zeros((4, n))
BB[0, :] = -x1[ii].ravel()
BB[1, :] = -x2[jj].ravel()
BB[2, :] = -y1[ii].ravel()
BB[3, :] = -y2[jj].ravel()
for i in range(n):
try:
T[:, i] = np.linalg.solve(AA[:, :, i], BB[:, i])
except:
T[:, i] = np.Inf
in_range = (T[0, :] >= 0) & (T[1, :] >= 0) & (
T[0, :] <= 1) & (T[1, :] <= 1)
xy0 = T[2:, in_range]
xy0 = xy0.T
iout = ii[in_range] + T[0, in_range].T
jout = jj[in_range] + T[1, in_range].T
return xy0[:, 0], xy0[:, 1], iout, jout
# https://stackoverflow.com/questions/20924085/python-conversion-between-coordinates
def cart2pol(x, y):
# rho = np.sqrt(x**2 + y**2)
# phi = np.arctan2(y, x)
# return(rho, phi)
# myhypot = @(a,b)sqrt(abs(a).^2+abs(b).^2);
hypot = lambda x,y: np.sqrt(np.abs(x)**2 + np.abs(y)**2)
th = np.arctan2(y,x);
r = hypot(x,y);
return th, r
# def pol2cart(rho, phi):
def pol2cart(th, r):
# x = rho * np.cos(phi)
# y = rho * np.sin(phi)
# return(x, y)
x = r*np.cos(th);
y = r*np.sin(th);
return x, y
def spl2patchSA(datamat):
maxseg = 132
Ccell = datamat['ROIInfo'].RestingContour
origin = datamat['AnalysisInfo'].PositionA
posB = datamat['AnalysisInfo'].PositionB
flag_clockwise = datamat['AnalysisInfo'].Clockwise
Nseg = 18
# total number of theta samples per segment
Nperseg = int(np.floor(maxseg/Nseg))
N = int(Nperseg*Nseg)
# full enclosing contour
C = Ccell.copy()
for cidx in range(len(C)):
C[cidx] = np.concatenate([C[cidx], np.nan*np.ones((1,2))])
C = np.concatenate([c for c in C])
# initial angle
# atan2 -> arctan2
theta0 = np.arctan2(posB[1]-origin[1],posB[0]-origin[0])
# angular range
if flag_clockwise:
theta = np.linspace(0,2*np.pi,N+1).reshape([1,-1])
else:
theta = np.linspace(2*np.pi,0,N+1).reshape([1,-1])
theta = theta[:,:-1] + theta0
# radial range
tmp,r = cart2pol(C[:,0]-origin[0],C[:,1]-origin[1])
mxrad = np.ceil(max(r))
rad = np.array([0, 2*mxrad])
# spokes
THETA,RAD = np.meshgrid(theta,rad)
THETA,RAD = THETA.T,RAD.T
X,Y = pol2cart(THETA,RAD)
xspoke = X.T+origin[0]
xspoke = np.concatenate([xspoke, np.nan*np.ones((1, xspoke.shape[1]))])
yspoke = Y.T+origin[1]
yspoke = np.concatenate([yspoke, np.nan*np.ones((1, xspoke.shape[1]))])
# find intersections
x_eppt,y_eppt,_,_ = intersections(xspoke.flatten(order='F'),
yspoke.flatten(order='F'),
Ccell[0][:,0], Ccell[0][:,1])
# record points
eppts = np.concatenate((x_eppt[:,None], y_eppt[:,None]), axis=1)
# find intersections
x_enpt,y_enpt,_,_ = intersections(xspoke.flatten(order='F'),
yspoke.flatten(order='F'),
Ccell[1][:,0], Ccell[1][:,1])
# record points
enpts = np.concatenate((x_enpt[:,None], y_enpt[:,None]), axis=1)
# Correct if wrong
# Not sure what happened, but seems eppts sometimes duplicate the first point and (127,2)
if enpts.shape[0] < eppts.shape[0]:
eppts = eppts[1:, :]
# def remove_dupicate(data):
# # data: (N, D) e.g. (126,2)
# unq, count = np.unique(data, axis=0, return_counts=True)
# return unq[count == 1]
# if enpts.shape[0] != eppts.shape[0]:
# enpts = remove_dupicate(enpts)
# eppts = remove_dupicate(eppts)
# number of lines
Nline = 6
# vertices
X = np.nan*np.ones((N, Nline))
Y = np.nan*np.ones((N, Nline))
w = np.linspace(0,1,Nline)
# for k = 1:Nline
for k in range(Nline):
X[:,k] = w[k]*enpts[:,0] + (1-w[k])*eppts[:,0]
Y[:,k] = w[k]*enpts[:,1] + (1-w[k])*eppts[:,1]
v = np.concatenate((X.flatten(order='F')[:, None], Y.flatten(order='F')[:,None]), axis=1)
# 4-point faces
f = np.zeros(((Nline-1)*N,4)).astype(int)
tmp1 = np.arange(N)[:, None]
tmp2 = np.append(np.arange(1,N), 0)[:, None]
tmp = np.hstack((tmp1, tmp2))
for k in range(Nline-1):
rows = k*N + np.arange(N)
f[rows,:] = np.hstack((tmp, np.fliplr(tmp)+N)) + k*N
Nface = f.shape[0]
# ids
ids = np.repeat(np.arange(Nseg),Nperseg,0) + 1 # +1 to match the index format of MATLAB
ids = np.repeat(ids[:, None], Nline - 1, 1)
sectorid = ids.flatten(order='F')
layerid = np.repeat(np.arange(Nline-1), N) + 1
# face locations (average of vertices)
# pface = NaN(Nface,2);
pface = np.nan*np.ones((Nface,2))
for k in [0, 1]:
vk = v[:,k]
pface[:,k] = np.mean(vk[f],1)
# orientation (pointed towards center)
ori,rad = cart2pol(origin[0]-pface[:,0], origin[1]-pface[:,1])
# gather output data
fv = {'vertices': v,
'faces': f + 1, # +1 to match the index format of MATLAB
'sectorid': sectorid,
'layerid': layerid,
'orientation': ori
}
return fv
def rectfv2rectfv(fv1, vals1, fv2):
Nfaces1 = fv1['faces'].shape[0]
Nfaces2 = fv2['faces'].shape[0]
centers1 = np.zeros((Nfaces1, 2))
centers2 = np.zeros((Nfaces2, 2))
for faceIdx in range(Nfaces1):
centers1[faceIdx,:] = np.mean(fv1['vertices'][fv1['faces'][faceIdx,:]-1,:], axis=0)
for faceIdx in range(Nfaces2):
centers2[faceIdx,:] = np.mean(fv2['vertices'][fv2['faces'][faceIdx,:]-1,:], axis=0)
# centers2GridX, centers2GridY = np.meshgrid(centers2[:,0],centers2[:,1])
# mask = (xi > 0.5) & (xi < 0.6) & (yi > 0.5) & (yi < 0.6)
# vals2 = griddata((centers1[:,0],centers1[:,1]),vals1,(centers2GridX,centers2GridY),method='nearest')
vals2 = scipy.interpolate.griddata(centers1,vals1,centers2,method='linear')
# interp = scipy.interpolate.LinearNDInterpolator(centers1, vals1)
return vals2
def getStrainMatFull(datamat, fv = None):
if fv is None:
fv = spl2patchSA(datamat)
NFrames = datamat['ImageInfo'].Xunwrap.shape[-1]
NFacesPerLayer = np.sum(fv['layerid'] == 1)
strainMatFull = np.zeros((NFacesPerLayer, NFrames))
for frameIdx in range(NFrames):
CCinNewFv = rectfv2rectfv({'faces': datamat['StrainInfo'].Faces, 'vertices': datamat['StrainInfo'].Vertices}, datamat['StrainInfo'].CC[:,frameIdx], fv)
strainMatFull[:,frameIdx] = CCinNewFv[fv['layerid']==3]
return strainMatFull | [
"noreply@github.com"
] | jr-xing.noreply@github.com |
03ea8e0088c1d57d0e783b8d300902ba66149c7c | 17c67f44ad263fb0f4e6aac3be1444a84727732b | /src/openfermion/circuits/slater_determinants_test.py | dd37547e0bf7596e5fe3675cbca7db71d62e577f | [
"Apache-2.0",
"LicenseRef-scancode-generic-cla"
] | permissive | xabomon/OpenFermion | 389db087fb32432220977fb2f31e4c349f31c13a | 8028082805a8e48d9fd179e7616e7df8a256693c | refs/heads/master | 2021-07-05T02:46:42.955939 | 2020-08-03T13:13:20 | 2020-08-03T13:13:20 | 132,436,626 | 1 | 0 | Apache-2.0 | 2019-06-27T08:53:40 | 2018-05-07T09:17:01 | Python | UTF-8 | Python | false | false | 11,986 | py | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for slater_determinants.py."""
import unittest
import pytest
import numpy
from openfermion.linalg.sparse_tools import (
jw_sparse_givens_rotation, jw_sparse_particle_hole_transformation_last_mode,
get_sparse_operator, get_ground_state, jw_configuration_state)
from openfermion.testing.testing_utils import random_quadratic_hamiltonian
from openfermion.circuits.slater_determinants import (
gaussian_state_preparation_circuit, jw_get_gaussian_state,
jw_slater_determinant)
class JWSlaterDeterminantTest(unittest.TestCase):
def test_hadamard_transform(self):
r"""Test creating the states
1 / sqrt(2) (a^\dagger_0 + a^\dagger_1) |vac>
and
1 / sqrt(2) (a^\dagger_0 - a^\dagger_1) |vac>.
"""
slater_determinant_matrix = numpy.array([[1., 1.]]) / numpy.sqrt(2.)
slater_determinant = jw_slater_determinant(slater_determinant_matrix)
self.assertAlmostEqual(slater_determinant[1], slater_determinant[2])
self.assertAlmostEqual(abs(slater_determinant[1]), 1. / numpy.sqrt(2.))
self.assertAlmostEqual(abs(slater_determinant[0]), 0.)
self.assertAlmostEqual(abs(slater_determinant[3]), 0.)
slater_determinant_matrix = numpy.array([[1., -1.]]) / numpy.sqrt(2.)
slater_determinant = jw_slater_determinant(slater_determinant_matrix)
self.assertAlmostEqual(slater_determinant[1], -slater_determinant[2])
self.assertAlmostEqual(abs(slater_determinant[1]), 1. / numpy.sqrt(2.))
self.assertAlmostEqual(abs(slater_determinant[0]), 0.)
self.assertAlmostEqual(abs(slater_determinant[3]), 0.)
class GaussianStatePreparationCircuitTest(unittest.TestCase):
def setUp(self):
self.n_qubits_range = range(3, 6)
def test_ground_state_particle_conserving(self):
"""Test getting the ground state preparation circuit for a Hamiltonian
that conserves particle number."""
for n_qubits in self.n_qubits_range:
# Initialize a particle-number-conserving Hamiltonian
quadratic_hamiltonian = random_quadratic_hamiltonian(
n_qubits, True, True)
# Compute the true ground state
sparse_operator = get_sparse_operator(quadratic_hamiltonian)
ground_energy, _ = get_ground_state(sparse_operator)
# Obtain the circuit
circuit_description, start_orbitals = (
gaussian_state_preparation_circuit(quadratic_hamiltonian))
# Initialize the starting state
state = jw_configuration_state(start_orbitals, n_qubits)
# Apply the circuit
for parallel_ops in circuit_description:
for op in parallel_ops:
self.assertTrue(op != 'pht')
i, j, theta, phi = op
state = jw_sparse_givens_rotation(i, j, theta, phi,
n_qubits).dot(state)
# Check that the state obtained using the circuit is a ground state
difference = sparse_operator * state - ground_energy * state
discrepancy = numpy.amax(numpy.abs(difference))
self.assertAlmostEqual(discrepancy, 0)
def test_ground_state_particle_nonconserving(self):
"""Test getting the ground state preparation circuit for a Hamiltonian
that does not conserve particle number."""
for n_qubits in self.n_qubits_range:
# Initialize a particle-number-conserving Hamiltonian
quadratic_hamiltonian = random_quadratic_hamiltonian(
n_qubits, False, True)
# Compute the true ground state
sparse_operator = get_sparse_operator(quadratic_hamiltonian)
ground_energy, _ = get_ground_state(sparse_operator)
# Obtain the circuit
circuit_description, start_orbitals = (
gaussian_state_preparation_circuit(quadratic_hamiltonian))
# Initialize the starting state
state = jw_configuration_state(start_orbitals, n_qubits)
# Apply the circuit
particle_hole_transformation = (
jw_sparse_particle_hole_transformation_last_mode(n_qubits))
for parallel_ops in circuit_description:
for op in parallel_ops:
if op == 'pht':
state = particle_hole_transformation.dot(state)
else:
i, j, theta, phi = op
state = jw_sparse_givens_rotation(
i, j, theta, phi, n_qubits).dot(state)
# Check that the state obtained using the circuit is a ground state
difference = sparse_operator * state - ground_energy * state
discrepancy = numpy.amax(numpy.abs(difference))
self.assertAlmostEqual(discrepancy, 0)
def test_bad_input(self):
"""Test bad input."""
with self.assertRaises(ValueError):
gaussian_state_preparation_circuit('a')
class JWGetGaussianStateTest(unittest.TestCase):
def setUp(self):
self.n_qubits_range = range(2, 10)
def test_ground_state_particle_conserving(self):
"""Test getting the ground state of a Hamiltonian that conserves
particle number."""
for n_qubits in self.n_qubits_range:
# Initialize a particle-number-conserving Hamiltonian
quadratic_hamiltonian = random_quadratic_hamiltonian(n_qubits, True)
# Compute the true ground state
sparse_operator = get_sparse_operator(quadratic_hamiltonian)
ground_energy, _ = get_ground_state(sparse_operator)
# Compute the ground state using the circuit
circuit_energy, circuit_state = jw_get_gaussian_state(
quadratic_hamiltonian)
# Check that the energies match
self.assertAlmostEqual(ground_energy, circuit_energy)
# Check that the state obtained using the circuit is a ground state
difference = (sparse_operator * circuit_state -
ground_energy * circuit_state)
discrepancy = numpy.amax(numpy.abs(difference))
self.assertAlmostEqual(discrepancy, 0)
def test_ground_state_particle_nonconserving(self):
"""Test getting the ground state of a Hamiltonian that does not
conserve particle number."""
for n_qubits in self.n_qubits_range:
# Initialize a non-particle-number-conserving Hamiltonian
quadratic_hamiltonian = random_quadratic_hamiltonian(
n_qubits, False)
# Compute the true ground state
sparse_operator = get_sparse_operator(quadratic_hamiltonian)
ground_energy, _ = get_ground_state(sparse_operator)
# Compute the ground state using the circuit
circuit_energy, circuit_state = (
jw_get_gaussian_state(quadratic_hamiltonian))
# Check that the energies match
self.assertAlmostEqual(ground_energy, circuit_energy)
# Check that the state obtained using the circuit is a ground state
difference = (sparse_operator * circuit_state -
ground_energy * circuit_state)
discrepancy = numpy.amax(numpy.abs(difference))
self.assertAlmostEqual(discrepancy, 0)
def test_excited_state_particle_conserving(self):
"""Test getting an excited state of a Hamiltonian that conserves
particle number."""
for n_qubits in self.n_qubits_range:
# Initialize a particle-number-conserving Hamiltonian
quadratic_hamiltonian = random_quadratic_hamiltonian(n_qubits, True)
# Pick some orbitals to occupy
num_occupied_orbitals = numpy.random.randint(1, n_qubits + 1)
occupied_orbitals = numpy.random.choice(range(n_qubits),
num_occupied_orbitals,
False)
# Compute the Gaussian state
circuit_energy, gaussian_state = jw_get_gaussian_state(
quadratic_hamiltonian, occupied_orbitals)
# Compute the true energy
orbital_energies, constant = (
quadratic_hamiltonian.orbital_energies())
energy = numpy.sum(orbital_energies[occupied_orbitals]) + constant
# Check that the energies match
self.assertAlmostEqual(energy, circuit_energy)
# Check that the state obtained using the circuit is an eigenstate
# with the correct eigenvalue
sparse_operator = get_sparse_operator(quadratic_hamiltonian)
difference = (sparse_operator * gaussian_state -
energy * gaussian_state)
discrepancy = numpy.amax(numpy.abs(difference))
self.assertAlmostEqual(discrepancy, 0)
def test_excited_state_particle_nonconserving(self):
"""Test getting an excited state of a Hamiltonian that conserves
particle number."""
for n_qubits in self.n_qubits_range:
# Initialize a non-particle-number-conserving Hamiltonian
quadratic_hamiltonian = random_quadratic_hamiltonian(
n_qubits, False)
# Pick some orbitals to occupy
num_occupied_orbitals = numpy.random.randint(1, n_qubits + 1)
occupied_orbitals = numpy.random.choice(range(n_qubits),
num_occupied_orbitals,
False)
# Compute the Gaussian state
circuit_energy, gaussian_state = jw_get_gaussian_state(
quadratic_hamiltonian, occupied_orbitals)
# Compute the true energy
orbital_energies, constant = (
quadratic_hamiltonian.orbital_energies())
energy = numpy.sum(orbital_energies[occupied_orbitals]) + constant
# Check that the energies match
self.assertAlmostEqual(energy, circuit_energy)
# Check that the state obtained using the circuit is an eigenstate
# with the correct eigenvalue
sparse_operator = get_sparse_operator(quadratic_hamiltonian)
difference = (sparse_operator * gaussian_state -
energy * gaussian_state)
discrepancy = numpy.amax(numpy.abs(difference))
self.assertAlmostEqual(discrepancy, 0)
def test_bad_input(self):
"""Test bad input."""
with self.assertRaises(ValueError):
jw_get_gaussian_state('a')
def test_not_implemented_spinr_reduced():
"""Tests that currently un-implemented functionality is caught."""
msg = "Specifying spin sector for non-particle-conserving "
msg += "Hamiltonians is not yet supported."
for n_qubits in [2, 4, 6]:
# Initialize a particle-number-conserving Hamiltonian
quadratic_hamiltonian = random_quadratic_hamiltonian(
n_qubits, False, True)
# Obtain the circuit
with pytest.raises(NotImplementedError):
_ = gaussian_state_preparation_circuit(quadratic_hamiltonian,
spin_sector=1)
| [
"noreply@github.com"
] | xabomon.noreply@github.com |
6c9f5f657dd080bca72a1fb1160fee3016dc69cc | e0c1a7195b23b74d0a487c3e01f0da3a84b2143a | /algorithms/stack/evaluate_postfix.py | df331b4d35499c66366f10cc6ddd663de4854bda | [
"MIT"
] | permissive | nisaruj/algorithms | 0fb1d19d55ea4d5f28240f6ea4934e3670ab63e1 | 1e03cd259c2d7ada113eb99843dcada9f20adf54 | refs/heads/master | 2021-01-25T11:57:32.417615 | 2018-10-02T13:38:00 | 2018-10-02T13:38:00 | 123,449,977 | 0 | 0 | MIT | 2018-03-01T15:01:16 | 2018-03-01T15:01:15 | null | UTF-8 | Python | false | false | 914 | py | """
Given a postfix expression, a function eval_postfix
takes a string of postfix expression and evaluates it.
Note that numbers and operators are seperated by whitespace.
For example:
eval_postfix('5 14 - 3 /') should return -3.0.
eval_postfix('-1.3 3 + 2 *') should return 3.4.
"""
def eval_postfix(expression):
stack = []
split_exp = expression.split()
for item in split_exp:
try:
stack.append(float(item))
except ValueError:
val1 = stack.pop()
val2 = stack.pop()
if item == '+':
stack.append(val2 + val1)
elif item == '-':
stack.append(val2 - val1)
elif item == '*':
stack.append(val2 * val1)
elif item == '/':
stack.append(val2 / val1)
return stack.pop()
print(eval_postfix('5 14 - 3 /'))
print(eval_postfix('-1.3 3 + 2 *')) | [
"nisaruj@hotmail.com"
] | nisaruj@hotmail.com |
d8953a19ca131b166a738b58ec8f4d8559674832 | fe0c17b9bf357b4ae41ef744132d1263e44156a9 | /db-web.py | c98142f694ffd3b88d64b94bc19315fc647eaf3f | [
"Apache-2.0"
] | permissive | jwcroppe/python-db-web | dfaa6f094f179fed36c50e58c49338d8a3b15b7d | b6356b4fdd4ced6e25a6efc41fe2426d1b73c2cc | refs/heads/master | 2023-05-11T01:47:53.387280 | 2022-07-21T18:08:49 | 2022-07-21T18:08:49 | 210,747,666 | 0 | 0 | Apache-2.0 | 2023-05-01T21:16:13 | 2019-09-25T03:28:29 | Python | UTF-8 | Python | false | false | 2,837 | py | # A simple Flask-based application that opens an SSH tunnel to a remote
# server over which MySQL (version 3.23) connections are made. The application
# queries for some users and displays some basic information about them.
#
# The following environment variables can be defined to customize the runtime:
# SSH_REMOTE_SERVER: remote endpoint address for the SSH tunnel
# SSH_REMOTE_PORT: remote endpoint port for the SSH tunnel (default: 22)
# SSH_REMOTE_USER_NAME: user name on the remote SSH server (default: root)
# SSH_REMOTE_PASSWORD: password for the user on the remote SSH server (default: s3cur3Pa5sw0rd)
# SSH_TUNNEL_LOCAL_PORT: local port to be used for the SSH tunnel (default: 3306)
# FLASK_HOST: host name on the local server for the Flask server (default: 0.0.0.0)
# FLASK_PORT: port on the local server for the Flask server (default: 5000)
# ENTITY_NAME: name of the event to display when the page is rendered (default: IBM Systems Tech U Attendees)
from configdb import connection_kwargs
from flask import Flask, render_template
from sshtunnel import SSHTunnelForwarder
import MySQLdb
import os
app = Flask(__name__)
server = SSHTunnelForwarder(
(os.environ.get("SSH_REMOTE_SERVER"),
int(os.environ.get("SSH_REMOTE_PORT", "22"))),
ssh_username=os.environ.get("SSH_REMOTE_USER_NAME", "root"),
ssh_password=os.environ.get("SSH_REMOTE_PASSWORD", "s3cur3Pa5sw0rd"),
set_keepalive=5.0,
remote_bind_address=("localhost", 3306),
local_bind_address=("127.0.0.1",
int(os.environ.get("SSH_TUNNEL_LOCAL_PORT", "3306")))
)
entity_name = os.environ.get("ENTITY_NAME", "IBM Systems Tech U Attendees")
server.start()
class Database:
driver = MySQLdb
connect_args = ()
connect_kw_args = connection_kwargs({})
def _connect(self):
return self.driver.connect(*self.connect_args, **self.connect_kw_args)
def list_employees(self):
con = None
result = None
try:
con = self._connect()
cur = con.cursor()
cur.execute(
"SELECT first_name, last_name, gender FROM employees LIMIT 50")
result = map(lambda x: {
'first_name': x[0], 'last_name': x[1], 'gender': x[2]}, cur.fetchall())
finally:
if con is not None:
con.close()
return result
@app.route('/')
def employees():
def db_query():
db = Database()
emps = db.list_employees()
return emps
res = db_query()
return render_template("employees.html", result=res, entity_name=entity_name,
content_type="application/json")
if __name__ == "__main__":
app.run(host=os.environ.get("FLASK_HOST", "0.0.0.0"),
port=int(os.environ.get("FLASK_PORT", 5000)))
| [
"noreply@github.com"
] | jwcroppe.noreply@github.com |
a96342a33ea96d93f2cbea77cd5ccae3e127d279 | 19a1924e398d009d1f70665d3d0bda8bce0b67b2 | /feature-TFIDF-word-combine.py | 93d5ee4c0acfd85f6aee5c44b79045aaf02f8f0f | [] | no_license | syruphanabi/Readmission-prediction | 0a5ac8ab4875c452a3f928c34f16f9a3e3551e7d | c650dd13d51cfa4391b61ce2b717713a2b2a0211 | refs/heads/master | 2020-03-13T03:15:15.225011 | 2018-04-25T06:33:08 | 2018-04-25T06:33:08 | 130,940,526 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 599 | py | # -*- coding: utf-8 -*-
"""
@author: Shenghua Xiang
"""
from sklearn.datasets import load_svmlight_file
import scipy.sparse as sp
import numpy as np
import utils
#this script combines features from TF-IDF features and word2vec
word2vec = load_svmlight_file('features/word2vec2000.text',n_features=2000)
word2vec_mat = word2vec[0].todense()
word2vec_label = word2vec[1]
uni = load_svmlight_file('features/Uni2000.whole',n_features =2000 )
uni_mat = uni[0].todense()
c = np.concatenate((uni_mat,word2vec_mat), axis=1)
utils.save_svmlight(c,word2vec_label,'Combined')
| [
"syrup@lawn-128-61-24-32.lawn.gatech.edu"
] | syrup@lawn-128-61-24-32.lawn.gatech.edu |
26beec3a279f35330716835f9ed0aa2b9088c394 | d76e726403e8fad407c80cfaa3d0be49fdae3099 | /SRM 755/OneHandSort.py | 86a0d21ed54eff33093ecbfa4c593bf82ffff4d4 | [] | no_license | mukeshtiwari/Topcoder | e81e4d3865a3fd43662d5e2bdf6f85c4d1aeea12 | 7846ac0fb780c9fad36ba4e6c81088b03b4575b1 | refs/heads/master | 2020-04-05T20:16:14.643529 | 2019-07-01T05:13:34 | 2019-07-01T05:13:34 | 17,953,195 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,123 | py | # -*- coding: utf-8 -*-
import math,string,itertools,fractions,heapq,collections,re,array,bisect
class OneHandSort:
def sortShelf(self, target):
return ()
# CUT begin
# TEST CODE FOR PYTHON {{{
import sys, time, math
def tc_equal(expected, received):
try:
_t = type(expected)
received = _t(received)
if _t == list or _t == tuple:
if len(expected) != len(received): return False
return all(tc_equal(e, r) for (e, r) in zip(expected, received))
elif _t == float:
eps = 1e-9
d = abs(received - expected)
return not math.isnan(received) and not math.isnan(expected) and d <= eps * max(1.0, abs(expected))
else:
return expected == received
except:
return False
def pretty_str(x):
if type(x) == str:
return '"%s"' % x
elif type(x) == tuple:
return '(%s)' % (','.join( (pretty_str(y) for y in x) ) )
else:
return str(x)
def do_test(target, __expected):
startTime = time.time()
instance = OneHandSort()
exception = None
try:
__result = instance.sortShelf(target);
except:
import traceback
exception = traceback.format_exc()
elapsed = time.time() - startTime # in sec
if exception is not None:
sys.stdout.write("RUNTIME ERROR: \n")
sys.stdout.write(exception + "\n")
return 0
if tc_equal(__expected, __result):
sys.stdout.write("PASSED! " + ("(%.3f seconds)" % elapsed) + "\n")
return 1
else:
sys.stdout.write("FAILED! " + ("(%.3f seconds)" % elapsed) + "\n")
sys.stdout.write(" Expected: " + pretty_str(__expected) + "\n")
sys.stdout.write(" Received: " + pretty_str(__result) + "\n")
return 0
def run_tests():
sys.stdout.write("OneHandSort (250 Points)\n\n")
passed = cases = 0
case_set = set()
for arg in sys.argv[1:]:
case_set.add(int(arg))
with open("OneHandSort.sample", "r") as f:
while True:
label = f.readline()
if not label.startswith("--"): break
target = []
for i in range(0, int(f.readline())):
target.append(int(f.readline().rstrip()))
target = tuple(target)
f.readline()
__answer = []
for i in range(0, int(f.readline())):
__answer.append(int(f.readline().rstrip()))
__answer = tuple(__answer)
cases += 1
if len(case_set) > 0 and (cases - 1) in case_set: continue
sys.stdout.write(" Testcase #%d ... " % (cases - 1))
passed += do_test(target, __answer)
sys.stdout.write("\nPassed : %d / %d cases\n" % (passed, cases))
T = time.time() - 1555840536
PT, TT = (T / 60.0, 75.0)
points = 250 * (0.3 + (0.7 * TT * TT) / (10.0 * PT * PT + TT * TT))
sys.stdout.write("Time : %d minutes %d secs\n" % (int(T/60), T%60))
sys.stdout.write("Score : %.2f points\n" % points)
if __name__ == '__main__':
run_tests()
# }}}
# CUT end
| [
"mukeshtiwari.iiitm@gmail.com"
] | mukeshtiwari.iiitm@gmail.com |
1ba1fb67c7437894538f3e6952f374498b9a113a | d5f7e099f220a6fab4dd29c13e4d0af2af6cf96a | /reply_keyboard.py | 7788412515947cddd4355ec4b7af3e49ce0e0c38 | [] | no_license | googolmogol/schedulePybot | a235ac7f0da8e2ee1a5beb48af3c1191aadab34b | c4799a0bd0b3d14077be3ec390e41d060e44111c | refs/heads/master | 2023-03-12T21:38:15.631183 | 2021-02-28T15:27:30 | 2021-02-28T15:27:30 | 340,902,862 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,881 | py | import telebot
changed_week = '' # var to define which week choose user
def get_mark(resize, onetime):
return telebot.types.ReplyKeyboardMarkup(resize, onetime)
# creating inline buttons
def inline_button(text, url):
markup = telebot.types.InlineKeyboardMarkup()
button = telebot.types.InlineKeyboardButton(text=text, url=url)
markup.add(button)
return markup
class Keyboard:
def __init__(self, bot):
self.bot = bot
# just send photo of schedule
def show_schedule(self, message):
img = open('restfiles/schedule.jpg', 'rb')
self.bot.send_photo(message.chat.id, img, "<strong>Ваш розклад</strong>", parse_mode="HTML")
def send_msg_to_bot(self, message, text, markup):
self.bot.send_message(message.chat.id, text, parse_mode="HTML", reply_markup=markup)
@staticmethod
def main_menu(error, hide):
markup = get_mark(True, hide)
if error:
markup.add('Показати розклад', 'Редагувати розклад')
else:
markup.add('Головне меню')
return markup
def edit_schedule(self, message):
mar = get_mark(True, False)
mar.add('Додати пару', 'Редагувати пару')
mar.add('Видалити пару', 'Головне меню')
text = "<strong>Оберіть, що необхідно редагувати:</strong>"
self.send_msg_to_bot(message, text, mar)
# changing and deleting lesson
def edit_lesson(self, message):
markup = get_mark(True, False)
markup.add('Парний', 'Непарний', 'Обидва')
markup.add('Назад в меню редагування розкладу', 'Головне меню')
text = "<strong>Оберіть тиждень:</strong>"
self.send_msg_to_bot(message, text, markup)
def choosing_day(self, message):
markup = get_mark(True, True)
markup.add('Понеділок', 'Вівторок', 'Середа')
markup.add('Четвер', "П'ятниця", 'Субота')
markup.add('Неділя')
from data_processing import status_user
from data_processing import user_step_add
if status_user(user_step_add, "action"):
text = '<strong>Оберіть день, на який необхідно додати пару:</strong>'
markup.add('Назад в меню редагування розкладу', 'Головне меню')
else:
text = '<strong>Оберіть день, коли відбувається пара:</strong>'
markup.add('Назад до вибору тижня', 'Головне меню')
self.send_msg_to_bot(message, text, markup)
def choosing_lesson(self, message):
self.bot.send_message(message.chat.id, "<strong>Вантажу...почекайте, нічого не клацайте!!!</strong>",
parse_mode="HTML")
from data_processing import get_lesson_to_change
from data_processing import user_step_edit
btn_list, text = get_lesson_to_change(user_step_edit["week"], message.text)
markup = few_btn_row(btn_list, False)
self.send_msg_to_bot(message, text, markup)
def choosing_lesson_num(self, message):
from data_processing import get_text_choosing_lesson_num
text = get_text_choosing_lesson_num(message)
markup = get_mark(True, False)
from data_processing import user_step_edit
if user_step_edit['action'] == 'Редагувати пару':
markup.add('Назву пари', 'Викладача', 'Час', 'Посилання', 'Тиждень')
markup.add('Назад до вибору дня', 'Головне меню')
if user_step_edit['action'] == 'Видалити пару':
markup.add('Видалити цю пару')
markup.add('Назад до вибору дня', 'Головне меню')
self.send_msg_to_bot(message, text, markup)
def back_choosing_lesson(self, message):
markup = get_mark(True, False)
markup.add('Назву пари', 'Викладача', 'Час', 'Посилання', 'Назад до вибору параметра для редагування',
'Головне меню')
text = "<strong>Оберіть, що необхідно редагувати:</strong>"
self.send_msg_to_bot(message, text, markup)
def choosing_item_to_change(self, message):
markup = get_mark(True, False)
markup.add('Зберегти')
markup.add('Назад до вибору дня', 'Головне меню')
from data_processing import user_step_edit
text = "<strong>Введіть " + user_step_edit["item_to_change"].lower() + \
' та натисність кнопку "Зберегти"</strong>'
self.send_msg_to_bot(message, text, markup)
def enter_lesson_values(self, message, text, last):
if last:
markup = get_mark(True, False)
markup.add('Зберегти додану пару')
markup.add('Назад в меню редагування', 'Головне меню')
self.send_msg_to_bot(message, text, markup)
else:
self.bot.send_message(message.chat.id, text, parse_mode="HTML")
def save_changed_value(self, message):
markup = get_mark(True, False)
markup.add('Зберегти', 'Назад до вибору дня')
text = '<strong>Натисність кнопку "Зберегти"</strong>'
self.send_msg_to_bot(message, text, markup)
def save_edit_lesson(self, message):
markup = get_mark(True, False)
markup.add('Назад до вибору дня', 'Головне меню')
from data_processing import user_step_edit
text = "<strong>Зберіг\nЩо далі, шеф?</strong>"
if message.text == "Так" and user_step_edit["action"] == 'Видалити пару':
text = "<strong>Видалив\nСподіваюсь, що пари дійсно немає</strong>"
elif message.text == "Ні" and user_step_edit["action"] == 'Видалити пару':
text = "<strong>Охрана атмєна</strong>"
self.send_msg_to_bot(message, text, markup)
def sure_delete(self, message):
markup = get_mark(True, False)
markup.add('Так', 'Ні')
from data_processing import lesson_to_change1
from data_processing import user_step_edit
text = 'Ви впевнені, що хочете видалити пару "' + lesson_to_change1[int(user_step_edit['lesson_num']) - 1][2] + \
'"?'
self.send_msg_to_bot(message, text, markup)
# function which creates few reply buttons in the row
def few_btn_row(btn_list, hide):
markup = get_mark(True, hide)
length = len(btn_list) - 2
if length >= 3:
length2 = length % 3
if length2 == 0:
for i in range(0, length, 3):
markup.add(btn_list[i], btn_list[i+1], btn_list[i+2])
elif length2 == 1:
for i in range(0, length - 1, 3):
markup.add(btn_list[i], btn_list[i+1], btn_list[i+2])
markup.add(btn_list[length - 1])
elif length2 == 2:
for i in range(0, length - 2, 3):
markup.add(btn_list[i], btn_list[i+1], btn_list[i+2])
markup.add(btn_list[length-2], btn_list[length-1])
elif length == 2:
markup.add(btn_list[0], btn_list[1])
elif length == 1:
markup.add(btn_list[0])
markup.add(btn_list[-2], btn_list[-1])
return markup
| [
"googolmogolua@gmail.com"
] | googolmogolua@gmail.com |
cbc3ee7ce35a446a8fa1d07a177d6b85ce9435c6 | c69f0416ef237932dc4d79aa4d9cf352408988b1 | /base/signals.py | 6c7b842f9e273a86a499bbb1c056c9524bf60feb | [] | no_license | rohiem/django-react-ecommerce | 683093d69a49a565bcb8ac51541240acb5486fdc | 129f8594cf7582e2170f3ebeab0e5ea12fa5fa9d | refs/heads/main | 2023-08-31T18:14:55.572442 | 2021-09-17T16:08:04 | 2021-09-17T16:08:04 | 407,599,621 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 245 | py | from django.db.models.signals import pre_save
from django.contrib.auth.models import User
def updateuser(sender,instance, **kwargs):
if instance.email !="":
instance.username=instance.email
pre_save.connect(updateuser,sender=User) | [
"rehimovich@gmail.com"
] | rehimovich@gmail.com |
56c2e1b4e01fad685d69aa5dac801fe3007f1539 | a743c4f840139720c7ffdea4ac89f07515c0d593 | /flask_server/run.py | 7013ffb04ab046939327d1039a4908bfc6d943f4 | [] | no_license | bhargavkuchipudi0/web_scraper | 7ae7decfe21012ad6bca95fd7efaf1ebf1d955fb | 0874a69496774a7bdd56b6f36d5e2ebac777aa0d | refs/heads/master | 2022-12-10T18:39:31.953587 | 2019-08-30T22:59:03 | 2019-08-30T22:59:03 | 195,532,546 | 0 | 0 | null | 2022-07-06T20:11:21 | 2019-07-06T11:35:15 | Python | UTF-8 | Python | false | false | 1,080 | py | from flask import Flask
from task import task
from crontab import CronTab
import os
cron_tab = CronTab(user=True)
directory_path = os.getcwd()
path_to_cron_file = directory_path.split('/')[1: -1]
path_to_cron_file = 'python3' + '/' + '/'.join(path_to_cron_file) + '/scrapers/cron.py'
# To remove all the cron jobs
def remove_all_cron_jobs():
cron_tab.remove_all()
cron_tab.write()
# To start a cron job if not started
def start_cron_job():
if len(cron_tab) < 1:
job = cron_tab.new(
command=path_to_cron_file, comment='1234')
job.minute.every(1)
job.enable()
cron_tab.write()
cron_tab.render()
print('New cron job created with comment %s' % ('1234'))
else:
print('cron job already started with comment %s.' % ('1234'))
app = Flask(__name__)
app.register_blueprint(task)
@app.route('/')
def sys_ip():
return 'server running on port 3500'
if __name__ == '__main__':
print('Server started on port 3500')
# start_cron_job()
app.run(port=3500, debug=True, use_reloader=False)
| [
"bhargavkuchipudi0@gmail.com"
] | bhargavkuchipudi0@gmail.com |
32961560e0850643339eb47d2339e14ce2208375 | bd83377e720c503dcce530546a0edc8d11d5f2da | /LaserRender.py | 99ff884268258ce37fe35d1df6f6c560e99261a6 | [
"MIT"
] | permissive | martonmiklos/meerk40t | 8f17543bb5fd0a650628e3e6515027e4ed4bbe81 | e843d0493e6eb3d35eecf6e3d3b422c8b13f0c2a | refs/heads/master | 2020-12-08T15:37:52.820924 | 2019-12-30T12:16:07 | 2019-12-30T12:16:07 | 233,019,839 | 0 | 0 | MIT | 2020-01-10T10:07:54 | 2020-01-10T10:07:53 | null | UTF-8 | Python | false | false | 11,138 | py | import wx
from PIL import Image
from LaserNode import *
from ZMatrix import ZMatrix
"""
Laser Render provides GUI relevant methods of displaying the given project nodes.
"""
# TODO: Raw typically uses path, but could just use a 1 bit image to visualize it.
def swizzlecolor(c):
if c is None:
return None
if isinstance(c, int):
c = Color(c)
return c.blue << 16 | c.green << 8 | c.red
class LaserRender:
def __init__(self, project):
self.project = project
self.cache = None
self.pen = wx.Pen()
self.brush = wx.Brush()
self.color = wx.Colour()
def render(self, dc, draw_mode):
for element in self.project.elements.flat_elements(types=('image', 'path', 'text')):
try:
element.draw(element, dc, draw_mode)
except AttributeError:
if isinstance(element.element, Path):
element.draw = self.draw_path
elif isinstance(element.element, SVGImage):
element.draw = self.draw_image
elif isinstance(element.element, SVGText):
element.draw = self.draw_text
else:
element.draw = self.draw_path
element.draw(element, dc, draw_mode)
def make_raster(self, group):
flat_elements = list(group.flat_elements(types='path'))
bounds = group.scene_bounds
if bounds is None:
self.project.validate()
bounds = group.scene_bounds
xmin, ymin, xmax, ymax = bounds
width = int(xmax - xmin)
height = int(ymax - ymin)
bitmap = wx.Bitmap(width, height, 32)
dc = wx.MemoryDC()
dc.SelectObject(bitmap)
dc.SetBrush(wx.TRANSPARENT_BRUSH)
dc.Clear()
gc = wx.GraphicsContext.Create(dc)
for e in flat_elements:
element = e.element
matrix = element.transform
fill_color = e.fill
if fill_color is None:
continue
p = gc.CreatePath()
parse = LaserCommandPathParser(p)
matrix.post_translate(-xmin, -ymin)
for event in e.generate():
parse.command(event)
matrix.post_translate(+xmin, +ymin)
self.color.SetRGB(swizzlecolor(fill_color))
self.brush.SetColour(self.color)
gc.SetBrush(self.brush)
gc.FillPath(p)
del p
img = bitmap.ConvertToImage()
buf = img.GetData()
image = Image.frombuffer("RGB", tuple(bitmap.GetSize()), bytes(buf), "raw", "RGB", 0, 1)
gc.Destroy()
del dc
return image
def draw_path(self, node, dc, draw_mode):
"""Default draw routine for the laser element.
If the generate is defined this will draw the
element as a series of lines, as defined by generate."""
try:
matrix = node.element.transform
except AttributeError:
matrix = Matrix()
drawfills = draw_mode & 1 == 0
gc = wx.GraphicsContext.Create(dc)
gc.SetTransform(wx.GraphicsContext.CreateMatrix(gc, ZMatrix(matrix)))
c = swizzlecolor(node.stroke)
if c is None:
self.pen.SetColour(None)
else:
self.color.SetRGB(c)
self.pen.SetColour(self.color)
self.pen.SetWidth(node.stroke_width)
gc.SetPen(self.pen)
cache = None
try:
cache = node.cache
except AttributeError:
pass
if cache is None:
p = gc.CreatePath()
parse = LaserCommandPathParser(p)
for event in node.generate():
parse.command(event)
node.cache = p
if drawfills and node.fill is not None:
c = node.fill
if c is not None and c != 'none':
swizzle_color = swizzlecolor(c)
self.color.SetRGB(swizzle_color) # wx has BBGGRR
self.brush.SetColour(self.color)
gc.SetBrush(self.brush)
gc.FillPath(node.cache)
gc.StrokePath(node.cache)
def draw_text(self, node, dc, draw_mode):
try:
matrix = node.element.transform
except AttributeError:
matrix = Matrix()
gc = wx.GraphicsContext.Create(dc)
gc.SetTransform(wx.GraphicsContext.CreateMatrix(gc, ZMatrix(matrix)))
if node.element.text is not None:
dc.DrawText(node.element.text, matrix.value_trans_x(), matrix.value_trans_y())
def draw_image(self, node, dc, draw_mode):
try:
matrix = node.element.transform
except AttributeError:
matrix = Matrix()
gc = wx.GraphicsContext.Create(dc)
gc.SetTransform(wx.GraphicsContext.CreateMatrix(gc, ZMatrix(matrix)))
cache = None
try:
cache = node.cache
except AttributeError:
pass
if cache is None:
try:
max_allowed = node.max_allowed
except AttributeError:
max_allowed = 2048
pil_data = node.element.image
node.c_width, node.c_height = pil_data.size
width, height = pil_data.size
dim = max(width, height)
if dim > max_allowed or max_allowed == -1:
width = int(round(width * max_allowed / float(dim)))
height = int(round(height * max_allowed / float(dim)))
pil_data = pil_data.copy().resize((width, height))
else:
pil_data = pil_data.copy()
if pil_data.mode != "RGBA":
pil_data = pil_data.convert('RGBA')
pil_bytes = pil_data.tobytes()
node.cache = wx.Bitmap.FromBufferRGBA(width, height, pil_bytes)
gc.DrawBitmap(node.cache, 0, 0, node.c_width, node.c_height)
class LaserCommandPathParser:
"""This class converts a set of laser commands into a
graphical representation of those commands."""
def __init__(self, graphic_path):
self.graphic_path = graphic_path
self.on = False
self.relative = False
self.x = 0
self.y = 0
def command(self, event):
command, values = event
if command == COMMAND_LASER_OFF:
self.on = False
elif command == COMMAND_LASER_ON:
self.on = True
elif command == COMMAND_RAPID_MOVE:
x, y = values
if self.relative:
x += self.x
y += self.y
self.graphic_path.MoveToPoint(x, y)
self.x = x
self.y = y
elif command == COMMAND_SET_SPEED:
pass
elif command == COMMAND_SET_POWER:
pass
elif command == COMMAND_SET_STEP:
pass
elif command == COMMAND_SET_DIRECTION:
pass
elif command == COMMAND_MODE_COMPACT:
pass
elif command == COMMAND_MODE_DEFAULT:
pass
elif command == COMMAND_MODE_CONCAT:
pass
elif command == COMMAND_SET_ABSOLUTE:
self.relative = False
elif command == COMMAND_SET_INCREMENTAL:
self.relative = True
elif command == COMMAND_HSTEP:
x = values
y = self.y
x += self.x
self.graphic_path.MoveToPoint(x, y)
self.x = x
self.y = y
elif command == COMMAND_VSTEP:
x = self.x
y = values
y += self.y
self.graphic_path.MoveToPoint(x, y)
self.x = x
self.y = y
elif command == COMMAND_HOME:
self.graphic_path.MoveToPoint(0, 0)
self.x = 0
self.y = 0
elif command == COMMAND_LOCK:
pass
elif command == COMMAND_UNLOCK:
pass
elif command == COMMAND_PLOT:
plot = values
for e in plot:
if isinstance(e, Move):
self.graphic_path.MoveToPoint(e.end[0], e.end[1])
elif isinstance(e, Line):
self.graphic_path.AddLineToPoint(e.end[0], e.end[1])
elif isinstance(e, Close):
self.graphic_path.CloseSubpath()
elif isinstance(e, QuadraticBezier):
self.graphic_path.AddQuadCurveToPoint(e.control[0], e.control[1],
e.end[0], e.end[1])
elif isinstance(e, CubicBezier):
self.graphic_path.AddCurveToPoint(e.control1[0], e.control1[1],
e.control2[0], e.control2[1],
e.end[0], e.end[1])
elif isinstance(e, Arc):
for curve in e.as_cubic_curves():
self.graphic_path.AddCurveToPoint(curve.control1[0], curve.control1[1],
curve.control2[0], curve.control2[1],
curve.end[0], curve.end[1])
elif command == COMMAND_SHIFT:
x, y = values
if self.relative:
x += self.x
y += self.y
self.graphic_path.MoveToPoint(x, y)
self.x = x
self.y = y
elif command == COMMAND_MOVE:
x, y = values
if self.relative:
x += self.x
y += self.y
if self.on:
self.graphic_path.MoveToPoint(x, y)
else:
self.graphic_path.AddLineToPoint(x, y)
self.x = x
self.y = y
elif command == COMMAND_CUT:
x, y = values
if self.relative:
x += self.x
y += self.y
self.graphic_path.AddLineToPoint(x, y)
self.x = x
self.y = y
elif command == COMMAND_CUT_QUAD:
cx, cy, x, y = values
if self.relative:
x += self.x
y += self.y
cx += self.x
cy += self.y
self.graphic_path.AddQuadCurveToPoint(cx, cy, x, y)
self.x = x
self.y = y
elif command == COMMAND_CUT_CUBIC:
c1x, c1y, c2x, c2y, x, y = values
if self.relative:
x += self.x
y += self.y
c1x += self.x
c1y += self.y
c2x += self.x
c2y += self.y
self.graphic_path.AddCurveToPoint(c1x, c1y, c2x, c2y, x, y)
self.x = x
self.y = y
| [
"noreply@github.com"
] | martonmiklos.noreply@github.com |
88c8f5d20e25b36ccdf75d727194ee2ae5aa8c26 | 72ba6d463974ee1fec22ae1da5897e69c3feb455 | /mlinsights/mlmodel/kmeans_l1.py | 3e812265ec4e4c90be90008a35c1a5a0b48c7689 | [
"MIT"
] | permissive | astrogilda/mlinsights | bb07df34e15d0959a61cc5e278c1a7e0b8de62db | 3af8defd3dc94889c1311911925f769573481a62 | refs/heads/master | 2022-09-24T11:33:31.474084 | 2020-06-05T10:14:46 | 2020-06-05T10:14:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 31,416 | py | # pylint: disable=C0302
"""
@file
@brief Implements k-means with norms L1 and L2.
"""
import warnings
import numpy
from scipy.sparse import issparse
from joblib import Parallel, delayed, effective_n_jobs
from sklearn.cluster import KMeans
from sklearn.cluster._kmeans import (
_tolerance as _tolerance_skl,
_check_normalize_sample_weight,
_validate_center_shape
)
from sklearn.exceptions import ConvergenceWarning
from sklearn.metrics.pairwise import (
euclidean_distances,
manhattan_distances,
pairwise_distances_argmin_min
)
from sklearn.utils import check_random_state, check_array
from sklearn.utils.validation import _num_samples, check_is_fitted
from sklearn.utils.extmath import stable_cumsum
from ._kmeans_022 import (
_labels_inertia_skl,
_labels_inertia_precompute_dense,
)
def _k_init(norm, X, n_clusters, random_state, n_local_trials=None):
"""Init n_clusters seeds according to k-means++
Parameters
----------
norm : `l1` or `l2`
manhattan or euclidean distance
X : array or sparse matrix, shape (n_samples, n_features)
The data to pick seeds for. To avoid memory copy, the input data
should be double precision (dtype=numpy.float64).
n_clusters : integer
The number of seeds to choose
random_state : int, RandomState instance
The generator used to initialize the centers. Use an int to make the
randomness deterministic.
See :term:`Glossary <random_state>`.
n_local_trials : integer, optional
The number of seeding trials for each center (except the first),
of which the one reducing inertia the most is greedily chosen.
Set to None to make the number of trials depend logarithmically
on the number of seeds (2+log(k)); this is the default.
Notes
-----
Selects initial cluster centers for k-mean clustering in a smart way
to speed up convergence. see: Arthur, D. and Vassilvitskii, S.
"k-means++: the advantages of careful seeding". ACM-SIAM symposium
on Discrete algorithms. 2007
Version ported from http://www.stanford.edu/~darthur/kMeansppTest.zip,
which is the implementation used in the aforementioned paper.
"""
n_samples, n_features = X.shape
centers = numpy.empty((n_clusters, n_features), dtype=X.dtype)
# Set the number of local seeding trials if none is given
if n_local_trials is None:
# This is what Arthur/Vassilvitskii tried, but did not report
# specific results for other than mentioning in the conclusion
# that it helped.
n_local_trials = 2 + int(numpy.log(n_clusters))
# Pick first center randomly
center_id = random_state.randint(n_samples)
if issparse(X):
centers[0] = X[center_id].toarray()
else:
centers[0] = X[center_id]
# Initialize list of closest distances and calculate current potential
if norm.lower() == 'l2':
dist_fct = lambda x, y: euclidean_distances(x, y, squared=True)
elif norm.lower() == 'l1':
dist_fct = lambda x, y: manhattan_distances(x, y)
else:
raise NotImplementedError( # pragma no cover
"norm must be 'l1' or 'l2' not '{}'.".format(norm))
closest_dist_sq = dist_fct(centers[0, numpy.newaxis], X)
current_pot = closest_dist_sq.sum()
# Pick the remaining n_clusters-1 points
for c in range(1, n_clusters):
# Choose center candidates by sampling with probability proportional
# to the squared distance to the closest existing center
rand_vals = random_state.random_sample(n_local_trials) * current_pot
candidate_ids = numpy.searchsorted(stable_cumsum(closest_dist_sq),
rand_vals)
numpy.clip(candidate_ids, None, closest_dist_sq.size - 1,
out=candidate_ids)
# Compute distances to center candidates
distance_to_candidates = dist_fct(X[candidate_ids], X)
# update closest distances squared and potential for each candidate
numpy.minimum(closest_dist_sq, distance_to_candidates,
out=distance_to_candidates)
candidates_pot = distance_to_candidates.sum(axis=1)
# Decide which candidate is the best
best_candidate = numpy.argmin(candidates_pot)
current_pot = candidates_pot[best_candidate]
closest_dist_sq = distance_to_candidates[best_candidate]
best_candidate = candidate_ids[best_candidate]
# Permanently add best center candidate found in local tries
if issparse(X):
centers[c] = X[best_candidate].toarray()
else:
centers[c] = X[best_candidate]
return centers
def _init_centroids(norm, X, k, init, random_state=None,
init_size=None):
"""Compute the initial centroids
Parameters
----------
norm : 'l1' or 'l2'
X : array, shape (n_samples, n_features)
k : int
number of centroids
init : {'k-means++', 'random' or ndarray or callable} optional
Method for initialization
random_state : int, RandomState instance or None (default)
Determines random number generation for centroid initialization. Use
an int to make the randomness deterministic.
See :term:`Glossary <random_state>`.
init_size : int, optional
Number of samples to randomly sample for speeding up the
initialization (sometimes at the expense of accuracy): the
only algorithm is initialized by running a batch KMeans on a
random subset of the data. This needs to be larger than k.
Returns
-------
centers : array, shape(k, n_features)
"""
random_state = check_random_state(random_state)
n_samples = X.shape[0]
if init_size is not None and init_size < n_samples:
if init_size < k: # pragma: no cover
warnings.warn(
"init_size=%d should be larger than k=%d. "
"Setting it to 3*k" % (init_size, k),
RuntimeWarning, stacklevel=2)
init_size = 3 * k
init_indices = random_state.randint(0, n_samples, init_size)
X = X[init_indices]
n_samples = X.shape[0]
elif n_samples < k:
raise ValueError(
"n_samples=%d should be larger than k=%d" % (n_samples, k))
if isinstance(init, str) and init == 'k-means++':
centers = _k_init(norm, X, k, random_state=random_state)
elif isinstance(init, str) and init == 'random':
seeds = random_state.permutation(n_samples)[:k]
centers = X[seeds]
elif hasattr(init, '__array__'):
# ensure that the centers have the same dtype as X
# this is a requirement of fused types of cython
centers = numpy.array(init, dtype=X.dtype)
elif callable(init):
centers = init(norm, X, k, random_state=random_state)
centers = numpy.asarray(centers, dtype=X.dtype)
else:
raise ValueError("the init parameter for the k-means should "
"be 'k-means++' or 'random' or an ndarray, "
"'%s' (type '%s') was passed." % (init, type(init)))
if issparse(centers):
centers = centers.toarray()
_validate_center_shape(X, k, centers)
return centers
def _centers_dense(X, sample_weight, labels, n_clusters, distances,
X_sort_index):
"""
M step of the K-means EM algorithm.
Computation of cluster centers / means.
Parameters
----------
X : array-like, shape (n_samples, n_features)
sample_weight : array-like, shape (n_samples,)
The weights for each observation in X.
labels : array of integers, shape (n_samples)
Current label assignment
n_clusters : int
Number of desired clusters
distances : array-like, shape (n_samples)
Distance to closest cluster for each sample.
X_sort_index : array-like, shape (n_samples, n_features)
index of each feature in all features
Returns
-------
centers : array, shape (n_clusters, n_features)
The resulting centers
"""
dtype = X.dtype
n_features = X.shape[1]
n_samples = X.shape[0]
centers = numpy.zeros((n_clusters, n_features), dtype=dtype)
weight_in_cluster = numpy.zeros((n_clusters,), dtype=dtype)
for i in range(n_samples):
c = labels[i]
weight_in_cluster[c] += sample_weight[i]
empty_clusters = numpy.where(weight_in_cluster == 0)[0]
if len(empty_clusters) > 0: # pragma: no cover
# find points to reassign empty clusters to
far_from_centers = distances.argsort()[::-1]
for i, cluster_id in enumerate(empty_clusters):
far_index = far_from_centers[i]
new_center = X[far_index] * sample_weight[far_index]
centers[cluster_id] = new_center
weight_in_cluster[cluster_id] = sample_weight[far_index]
if sample_weight.min() == sample_weight.max():
# to optimize
for i in range(n_clusters):
sub = X[labels == i]
med = numpy.median(sub, axis=0)
centers[i, :] = med
else:
raise NotImplementedError( # pragma: no cover
"Non uniform weights are not implemented yet as "
"the cost would be very high. "
"See https://en.wikipedia.org/wiki/Weighted_median#Algorithm.")
return centers
def _kmeans_single_lloyd(norm, X, sample_weight, n_clusters, max_iter=300,
init='k-means++', verbose=False,
random_state=None, tol=1e-4,
precompute_distances=True):
"""
A single run of k-means, assumes preparation completed prior.
Parameters
----------
norm : 'l1' or 'l2'
X : array-like of floats, shape (n_samples, n_features)
The observations to cluster.
n_clusters : int
The number of clusters to form as well as the number of
centroids to generate.
sample_weight : array-like, shape (n_samples,)
The weights for each observation in X.
max_iter : int, optional, default 300
Maximum number of iterations of the k-means algorithm to run.
init : {'k-means++', 'random', or ndarray, or a callable}, optional
Method for initialization, default to 'k-means++':
'k-means++' : selects initial cluster centers for k-mean
clustering in a smart way to speed up convergence. See section
Notes in k_init for more details.
'random': choose k observations (rows) at random from data for
the initial centroids.
If an ndarray is passed, it should be of shape (k, p) and gives
the initial centers.
If a callable is passed, it should take arguments X, k and
and a random state and return an initialization.
tol : float, optional
The relative increment in the results before declaring convergence.
verbose : boolean, optional
Verbosity mode
precompute_distances : boolean, default: True
Precompute distances (faster but takes more memory).
random_state : int, RandomState instance or None (default)
Determines random number generation for centroid initialization. Use
an int to make the randomness deterministic.
See :term:`Glossary <random_state>`.
Returns
-------
centroid : float ndarray with shape (k, n_features)
Centroids found at the last iteration of k-means.
label : integer ndarray with shape (n_samples,)
label[i] is the code or index of the centroid the
i'th observation is closest to.
inertia : float
The final value of the inertia criterion (sum of squared distances to
the closest centroid for all observations in the training set).
n_iter : int
Number of iterations run.
"""
random_state = check_random_state(random_state)
sample_weight = _check_normalize_sample_weight(sample_weight, X)
best_labels, best_inertia, best_centers = None, None, None
# init
centers = _init_centroids(
norm, X, n_clusters, init, random_state=random_state)
if verbose: # pragma no cover
print("Initialization complete")
# Allocate memory to store the distances for each sample to its
# closer center for reallocation in case of ties
distances = numpy.zeros(shape=(X.shape[0],), dtype=X.dtype)
X_sort_index = numpy.argsort(X, axis=0)
# iterations
for i in range(max_iter):
centers_old = centers.copy()
# labels assignment is also called the E-step of EM
labels, inertia = \
_labels_inertia(norm, X, sample_weight, centers,
precompute_distances=precompute_distances,
distances=distances)
# computation of the means is also called the M-step of EM
centers = _centers_dense(X, sample_weight, labels, n_clusters, distances,
X_sort_index)
if verbose: # pragma no cover
print("Iteration %2d, inertia %.3f" % (i, inertia))
if best_inertia is None or inertia < best_inertia:
best_labels = labels.copy()
best_centers = centers.copy()
best_inertia = inertia
center_shift_total = numpy.sum(
numpy.abs(centers_old - centers).ravel())
if center_shift_total <= tol:
if verbose: # pragma no cover
print("Converged at iteration %d: "
"center shift %r within tolerance %r"
% (i, center_shift_total, tol))
break
if center_shift_total > 0:
# rerun E-step in case of non-convergence so that predicted labels
# match cluster centers
best_labels, best_inertia = \
_labels_inertia(norm, X, sample_weight, best_centers,
precompute_distances=precompute_distances,
distances=distances)
return best_labels, best_inertia, best_centers, i + 1
def _labels_inertia(norm, X, sample_weight, centers,
precompute_distances=True, distances=None):
"""
E step of the K-means EM algorithm.
Computes the labels and the inertia of the given samples and centers.
This will compute the distances in-place.
Parameters
----------
norm : 'l1' or 'l2'
X : float64 array-like or CSR sparse matrix, shape (n_samples, n_features)
The input samples to assign to the labels.
sample_weight : array-like, shape (n_samples,)
The weights for each observation in X.
centers : float array, shape (k, n_features)
The cluster centers.
precompute_distances : boolean, default: True
Precompute distances (faster but takes more memory).
distances: existing distances
Returns
-------
labels : int array of shape(n)
The resulting assignment
inertia : float
Sum of squared distances of samples to their closest cluster center.
"""
if norm == 'l2':
return _labels_inertia_skl(
X, sample_weight=sample_weight, centers=centers,
precompute_distances=precompute_distances,
x_squared_norms=None)
sample_weight = _check_normalize_sample_weight(sample_weight, X)
# set the default value of centers to -1 to be able to detect any anomaly
# easily
if distances is None:
distances = numpy.zeros(shape=(0,), dtype=X.dtype)
# distances will be changed in-place
if issparse(X):
raise NotImplementedError( # pragma no cover
"Sparse matrix is not implemented for norm 'l1'.")
if precompute_distances:
return _labels_inertia_precompute_dense(
norm=norm, X=X, sample_weight=sample_weight,
centers=centers, distances=distances)
raise NotImplementedError( # pragma no cover
"precompute_distances is False, not implemented for norm 'l1'.")
def _tolerance(norm, X, tol):
"""Return a tolerance which is independent of the dataset"""
if norm == 'l2':
return _tolerance_skl(X, tol)
if norm == 'l1':
variances = numpy.sum(numpy.abs(X), axis=0) / X.shape[0]
return variances.sum()
raise NotImplementedError( # pragma no cover
"not implemented for norm '{}'.".format(norm))
class KMeansL1L2(KMeans):
"""
K-Means clustering with either norm L1 or L2.
See notebook :ref:`kmeansl1rst` for an example.
Parameters
----------
n_clusters : int, default=8
The number of clusters to form as well as the number of
centroids to generate.
init : {'k-means++', 'random'} or ndarray of shape \
(n_clusters, n_features), default='k-means++'
Method for initialization, defaults to 'k-means++':
'k-means++' : selects initial cluster centers for k-mean
clustering in a smart way to speed up convergence. See section
Notes in k_init for more details.
'random': choose k observations (rows) at random from data for
the initial centroids.
If an ndarray is passed, it should be of shape (n_clusters, n_features)
and gives the initial centers.
n_init : int, default=10
Number of time the k-means algorithm will be run with different
centroid seeds. The final results will be the best output of
n_init consecutive runs in terms of inertia.
max_iter : int, default=300
Maximum number of iterations of the k-means algorithm for a
single run.
tol : float, default=1e-4
Relative tolerance with regards to inertia to declare convergence.
precompute_distances : 'auto' or bool, default='auto'
Precompute distances (faster but takes more memory).
'auto' : do not precompute distances if n_samples * n_clusters > 12
million. This corresponds to about 100MB overhead per job using
double precision.
True : always precompute distances.
False : never precompute distances.
verbose : int, default=0
Verbosity mode.
random_state : int, RandomState instance, default=None
Determines random number generation for centroid initialization. Use
an int to make the randomness deterministic.
See :term:`Glossary <random_state>`.
copy_x : bool, default=True
When pre-computing distances it is more numerically accurate to center
the data first. If copy_x is True (default), then the original data is
not modified, ensuring X is C-contiguous. If False, the original data
is modified, and put back before the function returns, but small
numerical differences may be introduced by subtracting and then adding
the data mean, in this case it will also not ensure that data is
C-contiguous which may cause a significant slowdown.
n_jobs : int, default=None
The number of jobs to use for the computation. This works by computing
each of the n_init runs in parallel.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
algorithm : {"auto", "full", "elkan"}, default="auto"
K-means algorithm to use. The classical EM-style algorithm is "full".
The "elkan" variation is more efficient by using the triangle
inequality, but currently doesn't support sparse data. "auto" chooses
"elkan" for dense data and "full" for sparse data.
norm : {"L1", "L2"}
The norm *L2* is identical to :epkg:`KMeans`.
Norm *L1* uses a complete different path.
Attributes
----------
cluster_centers_ : ndarray of shape (n_clusters, n_features)
Coordinates of cluster centers. If the algorithm stops before fully
converging (see ``tol`` and ``max_iter``), these will not be
consistent with ``labels_``.
labels_ : ndarray of shape (n_samples,)
Labels of each point
inertia_ : float
Sum of squared distances of samples to their closest cluster center.
n_iter_ : int
Number of iterations run.
"""
def __init__(self, n_clusters=8, init='k-means++', n_init=10,
max_iter=300, tol=1e-4, precompute_distances='auto',
verbose=0, random_state=None, copy_x=True,
n_jobs=None, algorithm='full', norm='L2'):
KMeans.__init__(self, n_clusters=n_clusters, init=init, n_init=n_init,
max_iter=max_iter, tol=tol,
precompute_distances=precompute_distances,
verbose=verbose, random_state=random_state,
copy_x=copy_x, n_jobs=n_jobs, algorithm=algorithm)
self.norm = norm.lower()
if self.norm == 'l1' and self.algorithm != 'full':
raise NotImplementedError( # pragma no cover
"Only algorithm 'full' is implemented with norm 'l1'.")
def fit(self, X, y=None, sample_weight=None):
"""
Computes k-means clustering.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Training instances to cluster. It must be noted that the data
will be converted to C ordering, which will cause a memory
copy if the given data is not C-contiguous.
y : Ignored
Not used, present here for API consistency by convention.
sample_weight : array-like, shape (n_samples,), optional
The weights for each observation in X. If None, all observations
are assigned equal weight (default: None).
Returns
-------
self
Fitted estimator.
"""
if self.norm == 'l2':
KMeans.fit(self, X=X, y=y, sample_weight=sample_weight)
elif self.norm == 'l1':
self._fit_l1(X=X, y=y, sample_weight=sample_weight)
else:
raise NotImplementedError( # pragma no cover
"Norm is not L1 or L2 but '{}'.".format(self.norm))
return self
def _fit_l1(self, X, y=None, sample_weight=None):
"""
Computes k-means clustering with norm `'l1'`.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Training instances to cluster. It must be noted that the data
will be converted to C ordering, which will cause a memory
copy if the given data is not C-contiguous.
y : Ignored
Not used, present here for API consistency by convention.
sample_weight : array-like, shape (n_samples,), optional
The weights for each observation in X. If None, all observations
are assigned equal weight (default: None).
Returns
-------
self
Fitted estimator.
"""
random_state = check_random_state(self.random_state)
n_init = self.n_init
if n_init <= 0:
raise ValueError( # pragma no cover
"Invalid number of initializations."
" n_init=%d must be bigger than zero." % n_init)
if self.max_iter <= 0:
raise ValueError( # pragma no cover
'Number of iterations should be a positive number,'
' got %d instead' % self.max_iter
)
# avoid forcing order when copy_x=False
order = "C" if self.copy_x else None
X = check_array(X, accept_sparse='csr', dtype=[numpy.float64, numpy.float32],
order=order, copy=self.copy_x)
# verify that the number of samples given is larger than k
if _num_samples(X) < self.n_clusters:
raise ValueError( # pragma no cover
"n_samples=%d should be >= n_clusters=%d" % (
_num_samples(X), self.n_clusters))
tol = _tolerance(self.norm, X, self.tol)
# If the distances are precomputed every job will create a matrix of
# shape (n_clusters, n_samples). To stop KMeans from eating up memory
# we only activate this if the created matrix is guaranteed to be
# under 100MB. 12 million entries consume a little under 100MB if they
# are of type double.
precompute_distances = self.precompute_distances
if precompute_distances == 'auto':
n_samples = X.shape[0]
precompute_distances = (self.n_clusters * n_samples) < 12e6
elif isinstance(precompute_distances, bool): # pragma: no cover
pass
else:
raise ValueError( # pragma no cover
"precompute_distances should be 'auto' or True/False"
", but a value of %r was passed" % precompute_distances)
# Validate init array
init = self.init
if hasattr(init, '__array__'): # pragma: no cover
init = check_array(init, dtype=X.dtype.type, copy=True)
_validate_center_shape(X, self.n_clusters, init)
if n_init != 1:
warnings.warn(
'Explicit initial center position passed: '
'performing only one init in k-means instead of n_init=%d'
% n_init, RuntimeWarning, stacklevel=2)
n_init = 1
best_labels, best_inertia, best_centers = None, None, None
algorithm = self.algorithm
if self.n_clusters == 1:
# elkan doesn't make sense for a single cluster, full will produce
# the right result.
algorithm = "full" # pragma: no cover
if algorithm == "auto":
algorithm = "full" # pragma: no cover
if algorithm == "full":
kmeans_single = _kmeans_single_lloyd
else:
raise ValueError( # pragma no cover
"Algorithm must be 'auto', 'full' or 'elkan', got"
" %s" % str(algorithm))
seeds = random_state.randint(numpy.iinfo(numpy.int32).max, size=n_init)
if effective_n_jobs(self.n_jobs) == 1:
# For a single thread, less memory is needed if we just store one
# set of the best results (as opposed to one set per run per
# thread).
for seed in seeds:
# run a k-means once
labels, inertia, centers, n_iter_ = kmeans_single(
self.norm, X, sample_weight, self.n_clusters,
max_iter=self.max_iter, init=init, verbose=self.verbose,
precompute_distances=precompute_distances, tol=tol,
random_state=seed)
# determine if these results are the best so far
if best_inertia is None or inertia < best_inertia:
best_labels = labels.copy()
best_centers = centers.copy()
best_inertia = inertia
best_n_iter = n_iter_
else:
# parallelisation of k-means runs
results = Parallel(n_jobs=self.n_jobs, verbose=0)(
delayed(kmeans_single)(
self.norm, X, sample_weight, self.n_clusters,
max_iter=self.max_iter, init=init,
verbose=self.verbose, tol=tol,
precompute_distances=precompute_distances,
# Change seed to ensure variety
random_state=seed
)
for seed in seeds)
# Get results with the lowest inertia
labels, inertia, centers, n_iters = zip(*results)
best = numpy.argmin(inertia)
best_labels = labels[best]
best_inertia = inertia[best]
best_centers = centers[best]
best_n_iter = n_iters[best]
distinct_clusters = len(set(best_labels))
if distinct_clusters < self.n_clusters:
warnings.warn( # pragma no cover
"Number of distinct clusters ({}) found smaller than "
"n_clusters ({}). Possibly due to duplicate points "
"in X.".format(distinct_clusters, self.n_clusters),
ConvergenceWarning, stacklevel=2)
self.cluster_centers_ = best_centers
self.labels_ = best_labels
self.inertia_ = best_inertia
self.n_iter_ = best_n_iter
return self
def transform(self, X):
"""
Transforms *X* to a cluster-distance space.
In the new space, each dimension is the distance to the cluster
centers. Note that even if X is sparse, the array returned by
`transform` will typically be dense.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
New data to transform.
Returns
-------
X_new : array, shape [n_samples, k]
X transformed in the new space.
"""
if self.norm == 'l2':
return KMeans.transform(self, X)
if self.norm == 'l1':
return self._transform_l1(X)
raise NotImplementedError( # pragma no cover
"Norm is not L1 or L2 but '{}'.".format(self.norm))
def _transform_l1(self, X):
"""
Returns the distance of each point in *X* to
every fit clusters.
"""
check_is_fitted(self)
X = self._check_test_data(X)
return manhattan_distances(X, self.cluster_centers_)
def predict(self, X, sample_weight=None):
"""
Predicts the closest cluster each sample in X belongs to.
In the vector quantization literature, `cluster_centers_` is called
the code book and each value returned by `predict` is the index of
the closest code in the code book.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
New data to predict.
sample_weight : array-like, shape (n_samples,), optional
The weights for each observation in X. If None, all observations
are assigned equal weight (default: None), unused here
Returns
-------
labels : array, shape [n_samples,]
Index of the cluster each sample belongs to.
"""
if self.norm == 'l2':
return KMeans.predict(self, X)
if self.norm == 'l1':
return self._predict_l1(X, sample_weight=sample_weight)
raise NotImplementedError( # pragma no cover
"Norm is not L1 or L2 but '{}'.".format(self.norm))
def _predict_l1(self, X, sample_weight=None, return_distances=False):
"""
Returns the distance of each point in *X* to
every fit clusters.
:param X: features
:param sample_weight: (unused)
:param return_distances: returns distances as well
:return: labels or `labels, distances`
"""
labels, mindist = pairwise_distances_argmin_min(
X=X, Y=self.cluster_centers_, metric='manhattan')
labels = labels.astype(numpy.int32, copy=False)
if return_distances:
return labels, mindist
return labels
| [
"xavier.dupre@gmail.com"
] | xavier.dupre@gmail.com |
5fd2cc951a434908089dd81418522ef4a9fab088 | 31b56ff4192e639e25e5a8dc19bc050e7cf76f4e | /app/common_func.py | 743157a755a72ff99dbe3ce2e5b6b4299c21c84e | [
"MIT"
] | permissive | githubtaotao/flask_antvirus | 33d2d32cd0e2a23f1d889c15750b8d56fcf909ca | c212de77dbdb7f7b5a461e2fca334805bbb9c069 | refs/heads/main | 2023-06-21T02:53:41.312226 | 2021-07-23T05:58:41 | 2021-07-23T05:58:41 | 388,641,996 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 622 | py | # -*- coding: utf-8 -*-
# @Time : 2021/7/1 14:47
# @Author : Dotao
# @File : common_func.py
import os
import configparser
def load_path():
'''
get db filepath
:return:
'''
current_path = os.path.abspath(__file__)
father_path = os.path.abspath(os.path.dirname(current_path) + os.path.sep + ".")
return father_path + os.path.sep + "antivirus.conf"
def load_conf(software):
'''
get Antivirus software exe
:param software:
:return:
'''
full_path = load_path()
cf = configparser.ConfigParser()
cf.read(full_path)
return cf.get(software, "absoluteLocation")
| [
"mstaotao@gmail.com"
] | mstaotao@gmail.com |
76fa71b42e400f7e50931870441ed32bf8cb2f16 | 6e833ec2eeb905e74ed5c6602c951f028d4a7768 | /features/preprocessing.py | de0ae8d321a7d40cb6bf12235b233514f2c7401c | [
"Apache-2.0"
] | permissive | Fakhraddin/DeepOnKHATT-1 | 4a930c204a2c67774de68c3bfae7ddb1f8c6f504 | 0024c3d45d050901a1b0e7fb6491ffad0cdd5bf4 | refs/heads/main | 2023-03-21T17:04:03.511116 | 2021-03-18T22:53:22 | 2021-03-18T22:53:22 | 349,980,764 | 1 | 0 | Apache-2.0 | 2021-03-21T11:40:16 | 2021-03-21T11:40:16 | null | UTF-8 | Python | false | false | 7,337 | py |
import math
import numpy as np
from scipy.stats import linregress
from scipy.special import binom
"""
All normalization steps are based on the following two papers:
Liwicki, M. ; Bunke, H.: HMM-based on-line recognition of handwritten whiteboard notes.
In: Tenth International Workshop on Frontiers in Handwriting Recognition,
Suvisoft, 2006
Jaeger, S. ; Manke, S. ; Waibel, A.: Npen++: An On-Line Handwriting Recognition System.
In: 7th International Workshop on Frontiers in Handwriting Recognition,
2000, S.249–260
"""
def preprocess_handwriting(ink, args):
"""
Applies given normalization steps in args to ink of points in ink.
Valid normalizations are "flip", "slope", "origin", "resample", "slant", "height",
"smooth" and "delayed". Note that with application of "delayed" there will be
two objects returned, the ink and the list of delayed strokes.
The object that "ink" points to WILL BE CHANGED!
"""
if "slope" in args:
ink = correct_slope(ink)
if "origin" in args:
ink = move_to_origin(ink)
#Added
if "flip_h" in args:
ink = flip_horizontally(ink)
if "slant" in args:
ink = correct_slant(ink)
if "height" in args:
ink = normalize_height(ink)
if "resample" in args:
ink = resampling(ink)
if "smooth" in args:
ink = smoothing(ink)
return ink
def flip_horizontally(ink):
#Flip
ink[:,0]=(ink[:,0]-ink[:,0].max())*-1
return ink
def move_to_origin(ink):
"""
Move ink so that the lower left corner
of its bounding box is the origin afterwards.
"""
#print('origin')
min_x = min(ink[:, 0])
min_y = min(ink[:, 1])
return ink - [min_x, min_y, 0]
def flip_vertically(ink):
"""
Rotates ink by 180 degrees.
"""
#print('flip')
max_y = max(ink[:, 1])
return np.array([[x, max_y - y, p] for [x, y, p] in ink])
def correct_slope(ink):
"""
Rotates ink so that the regression line through
all points is the horizontal line afterwards.
"""
#print('slope')
[slope, intercept, _, _, _] = linregress(ink[:, :2])
alpha = math.atan(-slope)
cos_alpha = math.cos(alpha)
sin_alpha = math.sin(alpha)
min_x = min(ink[:, 0])
min_y = min(ink[:, 1])
rot_x = lambda x, y: min_x + cos_alpha * (x - min_x) - sin_alpha * (y - min_y)
rot_y = lambda x, y: min_y + sin_alpha * (x - min_x) + cos_alpha * (y - min_y)
new_ink = np.array([[rot_x(x, y), rot_y(x, y), p] for [x, y, p] in ink])
new_min_x = min(new_ink[:, 0])
new_min_y = min(new_ink[:, 1])
return new_ink - [new_min_x, new_min_y, 0]
def correct_slant(ink):
"""
Removes the most dominant slant-angle from the ink.
"""
#print('slant')
last_point = ink[0]
angles = []
for cur_point in ink[1:]:
# check for penup
if last_point[2] == 1:
# don't measure angles for "invisible" lines
last_point = cur_point
continue
if (cur_point[0] - last_point[0]) == 0:
angles.append(90)
else:
angle = math.atan((cur_point[1] - last_point[1]) / float(cur_point[0] - last_point[0])) * 180 / math.pi
angles.append(int(angle))
last_point = cur_point
# print("found {} angles for {} points".format(len(angles), len(ink)))
# we move angles from [-90,90] to [0, 180] for calculations
angles = np.array(angles) + 90
bins = np.bincount(angles, minlength=181)
# weighting all angles with discrete standard gaussian distribution
weights = [binom(181, k)/181.0 for k in range (1, 182)]
weights /= sum(weights)
bins = bins.astype(float) * weights
# smoothing entries with neighbours, first and last points remain unchanged
gauss = lambda p, c, n: 0.25 * p + 0.5 * c + 0.25 * n
smoothed = [bins[0]] + [gauss(bins[i-1], bins[i], bins[i+1]) for i in range(len(bins)-1)] + [bins[len(bins)-1]]
# reverse interval shift
slant = np.argmax(smoothed) - 90
# print("slant is {}".format(slant))
# print(len(smoothed))
min_x = min(ink[:, 0])
min_y = min(ink[:, 1])
rotate = lambda x, y: min_x + (x - min_x) - math.tan(slant * math.pi / 180) * (y - min_y)
return np.array([[rotate(x, y), y, p] for [x, y, p] in ink])
def resampling(ink, step_size=10):
"""
Replaces given ink by a recalculated sequence of equidistant points.
"""
#print('resampling')
t = []
t.append(ink[0, :])
i = 0
length = 0
current_length = 0
old_length = 0
curr, last = 0, None
len_ink = ink.shape[0]
while i < len_ink:
current_length += step_size
while length <= current_length and i < len_ink:
i += 1
if i < len_ink:
last = curr
curr = i
old_length = length
length += math.sqrt((ink[curr, 0] - ink[last, 0])**2) + math.sqrt((ink[curr, 1] - ink[last, 1])**2)
if i < len_ink:
c = (current_length - old_length) / float(length-old_length)
x = ink[last, 0] + (ink[curr, 0] - ink[last, 0]) * c
y = ink[last, 1] + (ink[curr, 1] - ink[last, 1]) * c
p = ink[last, 2]
t.append([x, y, p])
t.append(ink[-1, :])
#np.savetxt('resample.txt', np.array(t))
return np.array(t)
def normalize_height(ink, new_height=120):
"""
Returns scaled ink whose height will be new_height.
TODO: try to scale core height instead
"""
#print('normalize')
min_y = min(ink[:, 1])
max_y = max(ink[:, 1])
old_height = max_y - min_y
scale_factor = new_height / float(old_height)
ink[:, :2] *= scale_factor
return ink
def smoothing(ink):
"""
Applies gaussian smoothing to the ink with a (0.25, 0.5, 0.25) sliding
window. Smoothing point p(t) uses un-smoothed points p(t-1) and p(t+1).
"""
#print('smooth')
s = lambda p, c, n: 0.25 * p + 0.5 * c + 0.25 * n
smoothed = np.array([s(ink[i-1], ink[i], ink[i+1]) for i in range(1, ink.shape[0]-1)])
# the code above also changes penups, so we just copy them again
smoothed[:, 2] = ink[1:-1, 2]
# we deleted the unsmoothed first and last points,
# so the last penup needs to be moved to the second to last point
smoothed[-1, 2] = 1
#np.savetxt('smooth.txt', smoothed)
return smoothed
def remove_delayed_strokes(ink):
"""
Removes points of delayed strokes (segments between two penups)
from the ink. Removal if right edge of stroke's bounding box
is to the left of the right edge of the last non-delayed stroke.
"""
#print('delayed')
stroke_endpoints = np.where(ink[:, 2] == 1)[0]
# first stroke is by convention never delayed
begin = stroke_endpoints[0] + 1
new_ink = []
new_ink.extend(ink[:begin, :])
delayed = []
# delayed strokes must begin and end left of the current orientation point
orientation_point = ink[begin-1, :2]
for end in stroke_endpoints[1:]:
stroke = ink[begin:end+1, :]
max_x = max(stroke[:, 0])
begin = end + 1
if max_x >= orientation_point[0]:
new_ink.extend(stroke)
orientation_point = ink[begin-1, :2]
else:
delayed.append(stroke)
return np.array(new_ink), np.array(delayed)
| [
"fakhri100@gmail.com"
] | fakhri100@gmail.com |
20b893640de6b5e7a6b538268933029cf26bef41 | ca25949ff3971d7577f60834a6461dee6898408a | /PlaylistConversion/music_library.py | 9eadb0a09d4946a869f114278b04eda4fcaab923 | [] | no_license | sienatime/playlist_conversion | 0d2d44a98c9fec0220b6f3195babaaf8b1c9e606 | 7298e4f3933cdc0592069d58051854edd0267d3e | refs/heads/master | 2021-01-22T01:18:45.475319 | 2017-09-02T19:02:59 | 2017-09-02T19:02:59 | 102,216,481 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,517 | py | # sample data from google play music library (abbreviated):
# {
# 'id':'5924d75a-931c-30ed-8790-f7fce8943c85',
# 'nid':'Txsffypukmmeg3iwl3w5a5s3vzy',
# 'artistId':[
# 'Aod62yyj3u3xsjtooghh2glwsdi'
# ],
# 'title':'Haxprocess',
# 'artist':'Opeth',
# }
class MusicLibrary:
def __init__(self, gm_songs):
self.artists, self.songs = self.make_library(gm_songs)
def make_library(self, gm_songs):
artists = {} # key: artist name, value: list of Song
songs = {}
for gm_song in gm_songs:
artist_name = gm_song['artist']
if not artists.get(artist_name):
artist = Artist(artist_name)
song = Song(gm_song['id'], gm_song['title'])
artist.add_song(song)
artists[artist_name] = artist
else:
artist = artists[artist_name]
song = Song(gm_song['id'], gm_song['title'])
artist.add_song(song)
songs[song.title] = song
return artists, songs
def find_song(self, artist_name, song_title):
artist = self.artists.get(artist_name)
if artist and artist.get_song(song_title):
return artist.get_song(song_title)
else:
return self.songs.get(song_title)
class Artist:
def __init__(self, name):
self.name = name
self.songs = {}
def add_song(self, song):
if not self.songs.get(song.title):
self.songs[song.title] = song
def get_song(self, title):
return self.songs.get(title)
class Song:
def __init__(self, id, title):
self.id = id
self.title = title
| [
"siena@indiegogo.com"
] | siena@indiegogo.com |
4bc05de0d77b4db4af4ca5c0c12874e7495fee27 | 14423796c9ef1ac17314b27f17e8f05e7079d666 | /producthunter/urls.py | 32d287cbeb417ae9614ec28c37c33b5a0413ab0b | [] | no_license | stiehlrobot/producthunter-project | 0a676d098b565e4535e84c77c5dc9767e12a852f | f4c16a49df8519d281b28c27778c5164fb916ce9 | refs/heads/master | 2020-12-19T19:50:59.783968 | 2020-01-25T13:41:29 | 2020-01-25T13:41:29 | 235,834,571 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,073 | py | """producthunter URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from products import views
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
path('admin/', admin.site.urls),
path('', views.home, name="home"),
path('accounts/', include('accounts.urls')),
path('products/', include('products.urls')),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| [
"olli.ohls@gmail.com"
] | olli.ohls@gmail.com |
b029bb2e6b1204f2893534ddf7904567b9bbc0d5 | 915c98ce84ed155b5e4d855ffaa7cbc8e9a10712 | /Notebooks/scripts/utils.py | a26ad5203829a0055b4eefd7498c5dbd4593d672 | [
"MIT"
] | permissive | gallardorafael/EfficientMobileDL_Bacterial | e22131c1920a91ef32d86a11480bcf4d20ed16fa | 15550e9d094f65760f5c35de4c115dd1b9ad273d | refs/heads/main | 2023-03-24T06:26:31.971821 | 2021-03-27T04:06:03 | 2021-03-27T04:06:03 | 350,090,143 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,120 | py | from sklearn.metrics import f1_score, precision_score, recall_score, classification_report
import pandas as pd
import torch
class prediction:
def __init__(self, ground_truth, top5_classes, top5_probs):
self.ground_truth = ground_truth
self.top5_classes = top5_classes
self.top5_probs = top5_probs
def get_gt(self):
return self.ground_truth
def get_top5_classes(self):
return self.top5_classes
def get_top5_probs(self):
return self.top5_probs
def get_classes(probabilities, idx_to_class):
# Most probable class
top_probabilities, top_indices = probabilities.topk(5)
top_probabilities = torch.nn.functional.softmax(top_probabilities, dim=1)
# Convert to lists
top_probabilities = top_probabilities.detach().type(torch.FloatTensor).numpy().tolist()[0]
top_indices = top_indices.detach().type(torch.FloatTensor).numpy().tolist()[0]
# Convert topk_indices to the actual class labels using class_to_idx
# Invert the dictionary so you get a mapping from index to class.
#print(idx_to_class)
top_classes = [idx_to_class[index] for index in top_indices]
return top_probabilities, top_classes
def test_accuracy(model, test_loader):
evaluation_results = []
# Do validation on the test set
model.eval()
if torch.cuda.is_available():
model = model.cuda()
with torch.no_grad():
accuracy = 0
for images, labels in iter(test_loader):
if torch.cuda.is_available():
images = images.cuda()
labels = labels.cuda()
output = model.forward(images)
probabilities = torch.exp(output)
# Getting indices to their corresponding classes
idx_to_class = {value: key for key, value in model.class_to_idx.items()}
probs, classes = get_classes(probabilities, idx_to_class)
# List with results to form a confusion matrix
hr_label = labels.data.detach().type(torch.FloatTensor).numpy().tolist()[0]
hr_label = idx_to_class[hr_label]
pred = prediction(hr_label, classes, probs)
evaluation_results.append(pred)
print("Finished.")
return evaluation_results
def results_pandas(model, test_loader):
# Getting results
results = test_accuracy(model, test_loader)
gt = []
top1 = []
certainty = []
results_dict = {'Ground truth': [],
'Top 1 prediction': [],
'Certainty': [],
'Top 1 Correct': [],
'Top 5 Correct': []}
# Preparing data to create a Pandas DataFrame
for result in results:
results_dict['Ground truth'].append(result.get_gt())
results_dict['Top 1 prediction'].append(result.get_top5_classes()[0])
results_dict['Certainty'].append(result.get_top5_probs()[0])
results_dict['Top 1 Correct'].append(1 if result.get_gt() == result.get_top5_classes()[0] else 0)
results_dict['Top 5 Correct'].append(1 if result.get_gt() in result.get_top5_classes() else 0)
results_df = pd.DataFrame(results_dict)
return results_df
def get_scores(model, test_loader):
results_df = results_pandas(model, test_loader)
y_true = results_df['Ground truth'].tolist()
y_pred = results_df['Top 1 prediction'].tolist()
macro_f1 = f1_score(y_true = y_true,
y_pred = y_pred,
average = 'weighted',
zero_division=0)
precision_s = precision_score(y_true = y_true,
y_pred = y_pred,
average = 'weighted',
zero_division=0)
recall_s = recall_score(y_true = y_true,
y_pred = y_pred,
average = 'weighted',
zero_division=0)
top1_acc = results_df['Top 1 Correct'].mean()
top5_acc = results_df['Top 5 Correct'].mean()
return [top1_acc, top5_acc, precision_s, recall_s, macro_f1]
| [
"rafael.gallardo@alumno.buap.mx"
] | rafael.gallardo@alumno.buap.mx |
6645c9aa5ca275f9558b5c98dafa3d4bb9492f6c | f5d8b76eaa04c9477a6f17773472dcdfd8b93f98 | /residue_order.py | 9d4cbf461f974f8416a5384ea4a5911a45d9e27c | [] | no_license | salockhart/pycrypto | 7c6704568cd822cc6cb5c5bdf2745ed0b7f5a141 | f009e08061e32c1231624de155b935f1acb64d45 | refs/heads/master | 2021-08-16T11:33:57.118142 | 2017-11-19T19:04:41 | 2017-11-19T19:04:41 | 111,323,565 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 154 | py | import sys
g = int(sys.argv[1])
p = int(sys.argv[2])
for k in range(0, p):
if k != 0 and pow(g, k, p) == 1:
print "Order =", k
break | [
"salexlockhart@gmail.com"
] | salexlockhart@gmail.com |
13a23e2e0b445168886f0f9d892a6952c598cf96 | cd32f14f735ed44f66e7b28f411575c4721d4406 | /Wall.py | 5e896813384d910f5dec87b923f4f4d48539302f | [] | no_license | ragarg/python_game | f549ec2b0be292de83be52ef5f34435ca2e455ab | f7123477fe09f84512b4bcf25eeec15c8dcb9361 | refs/heads/master | 2021-05-10T22:38:25.682405 | 2018-02-21T17:38:29 | 2018-02-21T17:38:29 | 118,261,974 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 242 | py | import Object
class Wall(Object.Object):
def __init__(self, x, y, image):
Object.Object.__init__(self, x, y, image)
self.size = (24, 24)
self.patency = 0
def GetSize(self):
return self.size
| [
"noreply@github.com"
] | ragarg.noreply@github.com |
921b7004ad7a12099d737586a481cb52324834da | 0ea89b50c65afa72bad56f352584d8e6e20e50cd | /python/list2_p16.py | b85b2e216da1260a4a2d7a8281bf59842afed90e | [] | no_license | GreatZaNaRak/python | 8ca62705c18f5ecb7d91573899805cc1fb015eb1 | 76d70ff399618911d85631cc06647a74bf33722f | refs/heads/master | 2021-08-08T03:58:54.302812 | 2021-07-07T13:52:08 | 2021-07-07T13:52:08 | 141,234,217 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 484 | py | n,c = [int(e) for e in input().split()]
vec1 = []
vec2 = []
result = []
for i in range(n):
num1 = [int(e) for e in input().split()]
vec1.append(num1)
for i in range(n):
num2 = [int(e) for e in input().split()]
vec2.append(num2)
for i in range(n):
check = []
for j in range(len(vec1[i])):
check.append(vec1[i][j]+vec2[i][j])
result.append(check)
for i in result :
for j in i :
print(j,end= ' ')
print('',end='\n')
| [
"noreply@github.com"
] | GreatZaNaRak.noreply@github.com |
0e82345bfc2a4898cdc3ce00bb8f00de3a318603 | 8215d7b33a80eeef592ca44827269ae79b195563 | /project/LeonWu/cluster.py | 5c118c29f641d5a7a75182f6686df1e37e65358d | [] | no_license | SiyuanWuSFU/CMPT353-Computational-Data-Science | 74746886f053cbbbdb74022820eb39e834e258eb | 5e3eed325ee45115e7f38cc41592eb86beff3546 | refs/heads/main | 2023-08-15T03:30:31.735344 | 2021-09-08T05:32:34 | 2021-09-08T05:32:34 | 403,918,705 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 885 | py | # The code is adapted from Zuo Yifan
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
def lat_lon_mean(l):
lat_sum, lon_sum = 0, 0
length = len(l)
for pair in l:
lat_sum += pair[0]
lon_sum += pair[1]
lat_mean = lat_sum / length
lon_mean = lon_sum / length
return (lat_mean, lon_mean)
def cluster_mean(file, num_cluster):
# read data file
data = pd.read_csv(file, index_col=0)
X = np.stack([data['lat'], data['lon']], axis=1)
model = KMeans(n_clusters=num_cluster)
y = model.fit_predict(X)
dic = {}
for index, row in data.iterrows():
if y[index] not in dic.keys():
dic[y[index]] = [(row['lat'], row['lon'])] # append a new index of lat lon pair
else:
dic[y[index]].append((row['lat'], row['lon']))
mean = []
for i in range(num_cluster):
mean.append(lat_lon_mean(dic[i]))
return mean
| [
"swa173@sfu.ca"
] | swa173@sfu.ca |
60872802041012410a8387a6fe54382762e1707a | 0ff778d92c4c131b3dbb5e5ca60285080fd4cce6 | /parse_darknet.py | ef6632253c8eb1b2b7961630a3a3c228278806bf | [
"MIT",
"WTFPL"
] | permissive | victorlwchen/tensorflow-yolov3 | c2555e1f7ec468ed181ac8c9be90919d9fe7c3e7 | 9cbd3f94794e410cd1b7968c39c98d708e8b4919 | refs/heads/master | 2020-07-28T14:24:41.848376 | 2019-10-25T09:17:20 | 2019-10-25T09:17:20 | 209,438,491 | 0 | 0 | null | 2019-09-19T01:46:08 | 2019-09-19T01:46:08 | null | UTF-8 | Python | false | false | 2,693 | py | # convert labeled file
import os
from PIL import Image
train_file_path='/mnt/darknet_VOC/train.txt'
test_file_path='/mnt/darknet_VOC/2007_test.txt'
train_target_path = '/mnt/darknet_VOC/tensor_train.txt'
test_target_path = '/mnt/darknet_VOC/tensor_test.txt'
def new_class(num):
numbers = {
'18' : "0",
'37' : "1",
'38' : "2"
}
return numbers.get(num, None)
def convert_all_files(path, target):
ary=[]
count=0
with open(path,'r') as fp:
img_paths = fp.readlines()
for img_path in img_paths:
img_path = img_path.strip()
label_path=img_path.replace('.jpg','.txt')
try:
im = Image.open(img_path)
width, height = im.size
count+=1
with open(label_path, 'r') as label:
lines = label.readlines()
new_line=''
objs=[]
for line in lines:
items=line.splitlines()[0].split(' ')
x_center=float(items[1])*width
y_center=float(items[2])*height
obj_w=float(items[3])*width
obj_h=float(items[4])*height
x_min=str(int(x_center-(obj_w/2)))
y_min=str(int(y_center-(obj_h/2)))
x_max=str(int(x_center+(obj_w/2)))
y_max=str(int(y_center+(obj_h/2)))
#class_id=new_class(items[0])
class_id=items[0]
if (int(x_min) < 0):
x_min=str(int(x_min)+1)
if (int(y_min) < 0):
y_min=str(int(y_min)+1)
if (int(x_max) > width ):
x_max=str(int(x_max)-1)
if(int(y_max) > height):
y_max=str(int(y_max)-1)
if class_id is None:
continue
objs.append(x_min+','+y_min+','+x_max+','+y_max+','+class_id)
if len(objs) != 0:
for obj in objs:
new_line+=' '+obj
ary.append(img_path+new_line)
except Exception as e:
print('Exception '+str(e))
pass
print(count)
with open(target, "w") as txt_file:
for line in ary:
txt_file.write(line + "\n")
convert_all_files(train_file_path, train_target_path)
convert_all_files(test_file_path, test_target_path) | [
"victorlw_chen@asus.com"
] | victorlw_chen@asus.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.