blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
330b9a16243835ebf2e6255abc9c539afae9e81c
|
9c3a23eddc4104d2b60252e6b7934983e174f319
|
/avalon/cli/__init__.py
|
5746e913881469c57ccf04f6d792b80eabc3bcae
|
[
"MIT"
] |
permissive
|
56quarters/avalonms
|
b5e6e7949eee7f06b99061895c8ca11acaa154a0
|
2291f8457ca86f2e963d518abbdce04af02d9054
|
refs/heads/master
| 2021-10-19T06:43:23.052941
| 2019-02-18T21:52:51
| 2019-02-18T21:52:51
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 945
|
py
|
# -*- coding: utf-8 -*-
#
# Avalon Music Server
#
# Copyright 2012-2015 TSH Labs <projects@tshlabs.org>
#
# Available under the MIT license. See LICENSE for details.
#
"""CLI entry points associated with the Avalon Music Server."""
from __future__ import absolute_import, print_function, unicode_literals
import signal
import sys
import avalon.compat
def install_sigint_handler():
"""Install a simple signal handler to quietly exit on SIGINT."""
def handler(signum, _):
print("Exiting on signal {0}...".format(signum), file=sys.stderr)
sys.exit(1)
signal.signal(signal.SIGINT, handler)
def input_to_text(s):
"""Convert the given byte string or text type to text using the
file system encoding of the current system.
:param basestring s: String or text type to convert
:return: The string as text
:rtype: unicode
"""
return avalon.compat.to_text(s, sys.getfilesystemencoding())
|
[
"nick@tshlabs.org"
] |
nick@tshlabs.org
|
43441c1fa1062719c7a1743f55dffdb8e2db487a
|
8b9108ecf99a88ebd9995fd91aadc9c0a1258ef0
|
/tests/test_type.py
|
e0a395087c0ccb7a8f814153d9e157d73b25e9de
|
[
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer"
] |
permissive
|
nkoep/python-libjit
|
5129abe331b266433bfe0b4358a09197be92b914
|
6ca42dc44432cd54c61e6066d382b7f5a9d58f38
|
refs/heads/master
| 2021-01-01T19:20:33.277266
| 2014-06-18T15:42:10
| 2014-06-18T15:42:10
| 20,837,857
| 4
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,455
|
py
|
import unittest
import jit
class TestType(unittest.TestCase):
def test_constructors(self):
with self.assertRaises(TypeError):
jit.Type()
def test_create_struct(self):
struct = jit.Type.create_struct([jit.Type.INT, jit.Type.INT])
self.assertTrue(struct.is_struct())
def test_create_union(self):
union = jit.Type.create_union((jit.Type.INT, jit.Type.UINT))
self.assertTrue(union.is_union())
def test_create_signature(self):
signature = jit.Type.create_signature(
jit.ABI_FASTCALL, None, (jit.Type.INT,))
self.assertIsInstance(signature, jit.Type)
self.assertTrue(signature.is_signature())
def test_create_pointer(self):
pointer = jit.Type.INT.create_pointer()
self.assertIsInstance(pointer, jit.Type)
self.assertEqual(pointer.get_ref(), jit.Type.INT)
self.assertIsNone(jit.Type.INT.get_ref())
def test_create_tagged(self):
tagged_type = jit.Type.INT.create_tagged(jit.TYPETAG_NAME, data="data")
self.assertIsInstance(tagged_type, jit.Type)
self.assertEqual(tagged_type.get_tagged_type(), jit.Type.INT)
def test_set_names(self):
struct = jit.Type.create_struct((jit.Type.INT,))
name = "some_int"
self.assertTrue(struct.set_names([name]))
self.assertEqual(struct.get_name(0), name)
self.assertIsNone(struct.get_name(1))
# Names must be strings.
with self.assertRaises(TypeError):
jit.Type.VOID.set_names((1,))
def test_set_size_and_alignment(self):
fields = (jit.Type.INT, jit.Type.VOID_PTR)
struct = jit.Type.create_struct(fields)
size = alignment = -1
struct.set_size_and_alignment(size, alignment)
# FIXME: For some reason, get_size() returns the optimal size here
# whereas get_alignment() just returns -1.
self.assertIsInstance(struct.get_size(), long)
self.assertEqual(struct.get_alignment(), alignment)
def test_set_offset(self):
struct = jit.Type.create_struct([jit.Type.INT, jit.Type.VOID])
offset = 7
struct.set_offset(1, offset)
self.assertEqual(struct.get_offset(1), offset)
def test_get_kind(self):
self.assertEqual(jit.Type.INT.get_kind(), jit.TYPE_INT)
def test_num_fields(self):
fields = (jit.Type.INT, jit.Type.VOID)
struct = jit.Type.create_struct(fields)
num_fields = struct.num_fields()
self.assertIsInstance(num_fields, long)
self.assertEqual(num_fields, len(fields))
def test_get_field(self):
field = jit.Type.VOID
union = jit.Type.create_union([field])
self.assertEqual(union.get_field(0), field)
def test_find_name(self):
self.assertEqual(jit.Type.INT.find_name("name"), jit.INVALID_NAME)
fields = (jit.Type.INT, jit.Type.VOID)
struct = jit.Type.create_struct(fields)
names = ["field1", "field2"]
struct.set_names(names)
self.assertEqual(struct.find_name("field2"), names.index("field2"))
def test_num_params(self):
signature = jit.Type.create_signature(
jit.ABI_VARARG, jit.Type.VOID, (jit.Type.VOID,))
self.assertEqual(signature.num_params(), 1)
def test_get_return(self):
signature = jit.Type.create_signature(
jit.ABI_VARARG, jit.Type.VOID, (jit.Type.VOID,))
self.assertEqual(signature.get_return(), jit.Type.VOID)
# Primitive type is not a signature type.
self.assertIsNone(jit.Type.INT.get_return())
def test_get_param(self):
signature = jit.Type.create_signature(
jit.ABI_CDECL, jit.Type.INT, (jit.Type.INT,))
param = signature.get_param(0)
self.assertIsInstance(param, jit.Type)
self.assertEqual(param.get_kind(), jit.TYPE_INT)
self.assertIsNone(signature.get_param(2)) # Out-of-range
self.assertIsNone(jit.Type.INT.get_param(-1)) # Not a signature
def test_get_abi(self):
signature = jit.Type.create_signature(
jit.ABI_VARARG, jit.Type.INT, (jit.Type.INT,))
self.assertEqual(signature.get_abi(), jit.ABI_VARARG)
# Non-signature types always return jit.ABI_CDECL.
self.assertEqual(jit.Type.INT.get_abi(), jit.ABI_CDECL)
def test_tagged_type(self):
untagged_type = jit.Type.INT
# Untagged types should return None.
self.assertIsNone(untagged_type.get_tagged_type())
self.assertEqual(untagged_type.get_tagged_kind(), -1)
kind = jit.TYPETAG_CONST
type_ = jit.Type.INT.create_tagged(kind)
underlying = jit.Type.VOID
type_.set_tagged_type(underlying=underlying)
self.assertEqual(type_.get_tagged_type(), underlying)
self.assertEqual(type_.get_tagged_kind(), kind)
def test_set_get_tagged_data(self):
tagged_type = jit.Type.INT.create_tagged(jit.TYPETAG_NAME, data=None)
data = "data"
tagged_type.set_tagged_data(data=data)
self.assertEqual(tagged_type.get_tagged_data(), data)
def test_is_primitive(self):
self.assertTrue(jit.Type.VOID.is_primitive())
def test_best_alignment(self):
self.assertIsInstance(jit.Type.best_alignment(), long)
def test_normalize(self):
type_ = jit.Type.VOID
type_normalized = type_.normalize()
self.assertEqual(type_, type_normalized)
self.assertTrue(type_ is type_normalized)
def test_remove_tags(self):
tagged_type = jit.Type.INT.create_tagged(jit.TYPETAG_NAME, data="data")
collapsed_type = tagged_type.remove_tags()
self.assertNotEqual(tagged_type, collapsed_type)
self.assertEqual(collapsed_type, jit.Type.INT)
def test_promote_int(self):
types = [(jit.Type.SBYTE, jit.Type.INT),
(jit.Type.USHORT, jit.Type.UINT),
(jit.Type.VOID_PTR, jit.Type.VOID_PTR)]
for type_, type_int_promoted in types:
self.assertEqual(type_.promote_int(), type_int_promoted)
def test_return_via_pointer(self):
self.assertIsInstance(jit.Type.INT.return_via_pointer(), bool)
def test_has_tag(self):
self.assertIsInstance(jit.Type.INT.has_tag(jit.TYPETAG_CONST), bool)
tagged_type = jit.Type.INT.create_tagged(jit.TYPETAG_NAME, data="data")
self.assertTrue(tagged_type.has_tag(jit.TYPETAG_NAME))
|
[
"niklas.koep@gmail.com"
] |
niklas.koep@gmail.com
|
bfe3839ba8029e94971e49a4a32139d7695702fd
|
765fedd094cd56a6cefac498b88a06f91b7ae473
|
/lib/xlayers/super_transformer.py
|
f21ac54abf14290d245e7e84c075a81de6d44017
|
[
"MIT"
] |
permissive
|
xgmiao/AutoDL-Projects
|
ed74220b1b779adfc98c848c87510bc3982bde15
|
0dbbc286c9f56136291590136fffd513af881c36
|
refs/heads/main
| 2023-04-27T17:04:09.115305
| 2021-05-10T06:14:06
| 2021-05-10T06:14:06
| 366,212,937
| 1
| 0
|
MIT
| 2021-05-11T00:41:13
| 2021-05-11T00:41:12
| null |
UTF-8
|
Python
| false
| false
| 4,402
|
py
|
#####################################################
# Copyright (c) Xuanyi Dong [GitHub D-X-Y], 2021.03 #
#####################################################
from __future__ import division
from __future__ import print_function
import math
from functools import partial
from typing import Optional, Callable
import torch
import torch.nn as nn
import torch.nn.functional as F
import spaces
from .super_module import IntSpaceType
from .super_module import BoolSpaceType
from .super_module import LayerOrder
from .super_module import SuperModule
from .super_linear import SuperMLPv2
from .super_norm import SuperLayerNorm1D
from .super_attention import SuperAttention
class SuperTransformerEncoderLayer(SuperModule):
"""TransformerEncoderLayer is made up of self-attn and feedforward network.
This is a super model for TransformerEncoderLayer that can support search for the transformer encoder layer.
Reference:
- Paper: Attention Is All You Need, NeurIPS 2017
- PyTorch Implementation: https://pytorch.org/docs/stable/_modules/torch/nn/modules/transformer.html#TransformerEncoderLayer
Details:
the original post-norm version: MHA -> residual -> norm -> MLP -> residual -> norm
the pre-norm version: norm -> MHA -> residual -> norm -> MLP -> residual
"""
def __init__(
self,
d_model: IntSpaceType,
num_heads: IntSpaceType,
qkv_bias: BoolSpaceType = False,
mlp_hidden_multiplier: IntSpaceType = 4,
drop: Optional[float] = None,
act_layer: Callable[[], nn.Module] = nn.GELU,
order: LayerOrder = LayerOrder.PreNorm,
):
super(SuperTransformerEncoderLayer, self).__init__()
mha = SuperAttention(
d_model,
d_model,
num_heads=num_heads,
qkv_bias=qkv_bias,
attn_drop=drop,
proj_drop=drop,
)
mlp = SuperMLPv2(
d_model,
hidden_multiplier=mlp_hidden_multiplier,
out_features=d_model,
act_layer=act_layer,
drop=drop,
)
if order is LayerOrder.PreNorm:
self.norm1 = SuperLayerNorm1D(d_model)
self.mha = mha
self.drop1 = nn.Dropout(drop or 0.0)
self.norm2 = SuperLayerNorm1D(d_model)
self.mlp = mlp
self.drop2 = nn.Dropout(drop or 0.0)
elif order is LayerOrder.PostNorm:
self.mha = mha
self.drop1 = nn.Dropout(drop or 0.0)
self.norm1 = SuperLayerNorm1D(d_model)
self.mlp = mlp
self.drop2 = nn.Dropout(drop or 0.0)
self.norm2 = SuperLayerNorm1D(d_model)
else:
raise ValueError("Unknown order: {:}".format(order))
self._order = order
@property
def abstract_search_space(self):
root_node = spaces.VirtualNode(id(self))
xdict = dict(
mha=self.mha.abstract_search_space,
norm1=self.norm1.abstract_search_space,
mlp=self.mlp.abstract_search_space,
norm2=self.norm2.abstract_search_space,
)
for key, space in xdict.items():
if not spaces.is_determined(space):
root_node.append(key, space)
return root_node
def apply_candidate(self, abstract_child: spaces.VirtualNode):
super(SuperTransformerEncoderLayer, self).apply_candidate(abstract_child)
valid_keys = ["mha", "norm1", "mlp", "norm2"]
for key in valid_keys:
if key in abstract_child:
getattr(self, key).apply_candidate(abstract_child[key])
def forward_candidate(self, input: torch.Tensor) -> torch.Tensor:
return self.forward_raw(input)
def forward_raw(self, input: torch.Tensor) -> torch.Tensor:
if self._order is LayerOrder.PreNorm:
x = self.norm1(input)
x = x + self.drop1(self.mha(x))
x = self.norm2(x)
x = x + self.drop2(self.mlp(x))
elif self._order is LayerOrder.PostNorm:
# multi-head attention
x = self.mha(input)
x = x + self.drop1(x)
x = self.norm1(x)
# feed-forward layer
x = x + self.drop2(self.mlp(x))
x = self.norm2(x)
else:
raise ValueError("Unknown order: {:}".format(self._order))
return x
|
[
"280835372@qq.com"
] |
280835372@qq.com
|
fb6470f29aaad7c213cae0d37ef84b08e9684089
|
07ee05b02ab05fc4e6088109bf6617bd71bbd335
|
/pycheribuild/projects/soaap.py
|
fb5019e47a254245c571f7c4908f270e6a2b1807
|
[
"BSD-2-Clause"
] |
permissive
|
dodsonmg/cheribuild
|
666b4203cca7096a04e2e47e6f5504b38325fca6
|
88a86ea4d90f2f79a40e87df8154ad1fa41d8510
|
refs/heads/master
| 2021-08-26T07:49:26.339185
| 2020-06-19T13:12:01
| 2020-06-19T13:45:05
| 253,057,878
| 0
| 0
|
NOASSERTION
| 2020-04-04T17:24:37
| 2020-04-04T17:24:37
| null |
UTF-8
|
Python
| false
| false
| 3,082
|
py
|
#
# Copyright (c) 2016 Alex Richardson
# All rights reserved.
#
# This software was developed by SRI International and the University of
# Cambridge Computer Laboratory under DARPA/AFRL contract FA8750-10-C-0237
# ("CTSRD"), as part of the DARPA CRASH research programme.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
#
from .llvm import BuildLLVMSplitRepoBase
from .project import CMakeProject, GitRepository
from ..config.loader import ComputedDefaultValue
install_to_soaap_dir = ComputedDefaultValue(function=lambda config, project: config.outputRoot / "soaap",
as_string="$INSTALL_ROOT/soaap")
class BuildSoaapLLVM(BuildLLVMSplitRepoBase):
target = "soaap-llvm"
project_name = "soaap-llvm"
githubBaseUrl = "https://github.com/CTSRD-SOAAP/"
repository = GitRepository(githubBaseUrl + "llvm.git")
no_default_sysroot = True
skip_misc_llvm_tools = False
skip_static_analyzer = False
_default_install_dir_fn = install_to_soaap_dir
skip_cheri_symlinks = True
@classmethod
def setup_config_options(cls, **kwargs):
cls.included_projects = ["llvm", "clang"]
super().setup_config_options(include_lldb_revision=False, include_lld_revision=False, **kwargs)
class BuildSoaap(CMakeProject):
dependencies = ["soaap-llvm"]
repository = GitRepository("https://github.com/CTSRD-SOAAP/soaap")
_default_install_dir_fn = install_to_soaap_dir
def configure(self, **kwargs):
soaap_llvm = BuildSoaapLLVM.get_instance(self)
print(soaap_llvm.configureArgs)
build_shared_libs = any(x == "-DBUILD_SHARED_LIBS=ON" for x in soaap_llvm.configureArgs)
self.add_cmake_options(LLVM_DIR=soaap_llvm.buildDir / "share/llvm/cmake")
self.add_cmake_options(BUILD_SHARED_LIBS=build_shared_libs)
super().configure(**kwargs)
|
[
"Alexander.Richardson@cl.cam.ac.uk"
] |
Alexander.Richardson@cl.cam.ac.uk
|
75d0c7d478ec0b15d343bd9228b13d4c9db5ab79
|
bf72f86417cff663557addf8f9ff31c5192637c5
|
/lists/tests/test_forms.py
|
588e4230ead7c74a38153da9c19dc6508104a699
|
[] |
no_license
|
robbystk/python-tdd
|
6cbf95353fd7a195a4110bb450b981b9fb21f2bc
|
8eeaa4f811b4d5f921933992669c3f8fb8414354
|
refs/heads/master
| 2021-06-25T05:40:55.588090
| 2019-10-14T03:51:39
| 2019-10-14T03:51:39
| 170,801,167
| 0
| 0
| null | 2021-06-10T22:05:34
| 2019-02-15T04:20:12
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 2,115
|
py
|
from django.test import TestCase
from lists.forms import (
DUPLICATE_ITEM_ERROR, EMPTY_ITEM_ERROR,
ExistingListItemForm, ItemForm
)
from lists.models import List, Item
class ItemFormTest(TestCase):
def test_form_item_input_has_placeholder_and_css_classes(self):
form = ItemForm()
self.assertIn('placeholder="Enter a to-do item"', form.as_p())
self.assertIn('class="form-control input-lg"', form.as_p())
def test_form_validation_for_blank_items(self):
form = ItemForm(data={'text': ''})
self.assertFalse(form.is_valid())
self.assertEqual(form.errors['text'], [EMPTY_ITEM_ERROR])
def test_form_save_handles_saving_to_a_list(self):
list_ = List.objects.create()
form = ItemForm(data={'text': 'task'})
new_item = form.save(for_list=list_)
self.assertEqual(new_item, Item.objects.first())
self.assertEqual(new_item.text, 'task')
self.assertEqual(new_item.list, list_)
class ExistingListItemFormTest(TestCase):
def test_form_renders_item_text_input(self):
list_ = List.objects.create()
form = ExistingListItemForm(for_list=list_)
self.assertIn('placeholder="Enter a to-do item"', form.as_p())
def test_form_validation_for_blank_items(self):
list_ = List.objects.create()
form = ExistingListItemForm(for_list=list_, data={'text': ''})
self.assertFalse(form.is_valid())
self.assertEqual(form.errors['text'], [EMPTY_ITEM_ERROR])
def test_form_validation_for_duplicate_items(self):
list_ = List.objects.create()
Item.objects.create(list=list_, text="no twins!")
form = ExistingListItemForm(for_list=list_, data={'text': 'no twins!'})
self.assertFalse(form.is_valid())
self.assertEqual(form.errors['text'], [DUPLICATE_ITEM_ERROR])
def test_existing_list_item_form_save(self):
list_ = List.objects.create()
form = ExistingListItemForm(for_list=list_, data={'text': 'hi'})
new_item = form.save()
self.assertEqual(new_item, Item.objects.all()[0])
|
[
"robbystk@gmail.com"
] |
robbystk@gmail.com
|
cbd307d49d75f544592ce8a8994cc5decdb75ceb
|
0a09d4dec96def19ef33ace4815eddcad48878c3
|
/archive/NLTK/NLTK获得文本语料和词汇资源.py
|
3bd91044b57ba3d268b8a2ab914ec1445d39d463
|
[] |
no_license
|
amssj/python
|
216522e32e0af137ef76bd41a4b91b7571ab4ff7
|
f5211c9d083813f4d5a3c48faab79806104e3f4a
|
refs/heads/master
| 2022-12-06T13:01:15.477782
| 2020-08-26T06:23:47
| 2020-08-26T06:23:47
| 288,282,579
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 506
|
py
|
import nltk
nltk.corpus.gutenberg.fileids()
emma = nltk.corpus.gutenberg.words('austen-emma.txt')
print(len(emma))
from nltk.corpus import gutenberg
gutenberg.fileids()
emma = gutenberg.words('austen-emma.txt')
print(emma)
macbeth_sentences = gutenberg.sents('shakespeare-macbeth.txt')
macbeth_sentences
print(macbeth_sentences[1037])
longest_len = max([len(s) for s in macbeth_sentences])
print(longest_len)
A = [s for s in macbeth_sentences if len(s) == longest_len]
print(A)
##words(),raw(),sents()
|
[
"69820922+amssj@users.noreply.github.com"
] |
69820922+amssj@users.noreply.github.com
|
56e30790d5f002830a86c58b9c70e5831cfd7695
|
f94106bb5cd3f5e60242cf75ac2424dd8ba0df3e
|
/moduloInventario/admin.py
|
5263d4e3fedf56a3bdc16e50dc98eff3359ca2db
|
[] |
no_license
|
rianjara/Ing.Software
|
c09be82a8434699f9c8444c1d1c769fcf4e782e8
|
673ee35780db329e720771c0d23f1bc579ae54b4
|
refs/heads/master
| 2020-05-18T16:59:36.496459
| 2013-08-21T05:00:17
| 2013-08-21T05:00:17
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 296
|
py
|
from django.contrib import admin
from moduloInventario.models import Item, Categoria, Proveedor, Orden_Compra, Detalle_Orden_Compra
admin.site.register(Item)
admin.site.register(Categoria)
admin.site.register(Proveedor)
admin.site.register(Orden_Compra)
admin.site.register(Detalle_Orden_Compra)
|
[
"Administrador@edgar-PC"
] |
Administrador@edgar-PC
|
e0a57f279a8faf560f53f63bbed103fe897a86a5
|
f9e9aa5cef137e6a0b869ebbab90ac5d6289461b
|
/__init__.py
|
0af5fc107874d56c97b3349ba834386df4a2dc3d
|
[
"MIT"
] |
permissive
|
rupesh28/pollytts
|
c6836ce859cb440dbe9493e84c9cfccb2ea23b56
|
279bd4bc1a06d2f893880fc3426d1411c59ac886
|
refs/heads/master
| 2022-07-17T21:14:21.705786
| 2020-05-19T05:44:46
| 2020-05-19T05:44:46
| 265,155,451
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 12,635
|
py
|
# -*- coding: utf-8 -*-
# MIT License
#
# Copyright (c) [year] [fullname]
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
"""
Library to convert text to speech using Amazon Polly Service
https://aws.amazon.com/polly/
"""
import logging
import tempfile
import os
import boto3 as aws
from Voices import Voices
from Exceptions import (LanguageException, OutputFormatException, EngineException, BotoException, RegionException)
from botocore.exceptions import ClientError
class PollyTTS:
"""
API for Amazon Polly TTS services
"""
def __init__(self, access_key_id, secret_access_key, region='us-west-1', debug=False):
"""
Initiate class
@param access_key_id: AWS Polly access key id
@param secret_access_key: AWS Polly secret access key
@param region: AWS region. Default - US-WEST-1
@param debug: Debugging option. Default - False
"""
self.access_key_id = access_key_id
self.secret_access_key = secret_access_key
self.region = region
self.debug = debug
self.logger = logging.getLogger(__name__)
self.lang = None
self.voice = None
self.formatted_text = None
self.output_format = None
self.engine = None
self.text_type = None
# AWS Polly Engines
self.supported_engines = ['standard', 'neural']
# AWS Polly Output Format
self.supported_output_formats = ['json', 'mp3', 'ogg_vorbis', 'pcm']
# AWS Polly supported regions
self.supported_regions = ['ap-east-1', 'ap-northeast-1', 'ap-northeast-2', 'ap-south-1', 'ap-southeast-1',
'ap-southeast-2', 'ca-central-1', 'eu-central-1', 'eu-north-1', 'eu-west-1',
'eu-west-2', 'eu-west-3', 'me-south-1', 'sa-east-1', 'us-east-1', 'us-east-2',
'us-west-1', 'us-west-2']
# AWS Polly supported voices
self.supported_voices = Voices()
# Logging
if self.debug:
self.logger.setLevel(level=logging.DEBUG)
# Create Polly Client
if self.region not in self.supported_regions:
raise RegionException("Requested region {} does not support polly".format(self.region))
self.session = aws.session.Session(
aws_access_key_id=self.access_key_id,
aws_secret_access_key=self.secret_access_key,
region_name=self.region
)
self.client = self.session.client('polly')
self.logger.debug('Authorized to polly service region - {}'.format(self.region))
def speak(self, text, lang=None, voice=None, engine=None, output_format=None, save_to_file=False, text_type='text'):
"""
Generate the request body for Polly
@param text: Text to convert to speech
@param text_type: Type can be text or SSML. (Default: Text)
@param lang: Speech output language (Default: en-US)
@param voice: Speech output voice (Default: Joanna)
@param engine: Speech Engine (Default: Standard)
@param output_format: Speech output file format (Default : MP3)
@param save_to_file: Save speech data to a file
@return: Return the request for polly
When passing text to speak - you can utilize certain SSML features by wrapping the text around
with the following tags.
<break> - Add a pause in speech for 2 seconds
<emphasize></emphasize> - Text will be read in louder and slower tone
<max duration='duration'></max> (TODO) - Maximum length of the speech response. Duration in milliseconds.
<spell></spell> - Text will be spelled out in individual characters.
<number></number> - Text will be read as cardinal number
<ordinal></ordinal> - Text will be read as ordinal. Ex 1234 will be read 1234th
<digits></digits> - Text digits will be spelled out individually.
<unit></unit> - Text will be read as a measurement.
<time></time> - Text will be read as time. Example - 1'30" represents 1 min 30 seconds
<address></address> - Text will be read as a address
<bleep></bleep> - Text will be bleeped out
<fraction></fraction> - Text will be read as a fraction.
<date format='format'></date> (TODO) - Text will be read as date. For supported formats refer -
https://docs.aws.amazon.com/polly/latest/dg/supportedtags.html#say-as-tag
<telephone></telephone> - Text will be read as a telephone number
<convo></convo> - Text will be spoke conversation style. Only available for neural format and for certain
voices - Refer https://docs.aws.amazon.com/polly/latest/dg/supportedtags.html#newscaster-tag
<newscaster></newscaster> - Text will be spoke newscaster style. Only available for neural format and for
certain voices - Refer https://docs.aws.amazon.com/polly/latest/dg/supportedtags.html#newscaster-tag
<soft></soft> - Text will be spoken in a soft tone
<whisper></whisper> - Text will be spoken in a whispered voice
Amazon polly provides more customization options when using SSML input. Above options provided are only a
subset that can be used for most common purposes. For more detailed control over speech synthesis refer the
below link and directly provide input in SSML format.
https://docs.aws.amazon.com/polly/latest/dg/supportedtags.html
"""
self.lang = lang
self.voice = voice
self.engine = engine
self.output_format = output_format
self.text_type = text_type
if not text:
raise LanguageException("Text is not provided for speech conversion")
if not self.voice and not self.lang:
self.lang = 'en-US'
self.voice = 'Joanna'
if not self.engine:
self.engine = 'standard'
if not self.output_format:
self.output_format = 'mp3'
if self.lang and not self.voice:
self.voice = self.supported_voices.get_language_details(self.lang).default
# Validate input parameters
self.validate_request()
# Reformat the input text to SSML format
if self.text_type.upper() == 'TEXT':
self.convert_text_to_ssml(text)
else:
self.formatted_text = text
print(self.formatted_text)
# Log parameters determined for use.
self.logger.debug('Language - {}, Voice - {}, Engine - {}, Output Format - {}'.format(self.lang,
self.voice,
self.engine,
self.output_format))
return self.send_request_to_polly(save_to_file)
def validate_request(self):
"""
Verify all the required parameters are valid for polly service
@return: None
"""
if self.voice and not self.lang:
raise LanguageException("Voice defined witout defining language!")
if self.lang is not None and self.lang not in self.supported_voices.supported_languages():
raise LanguageException("Requested language {} not available!".format(self.lang))
if self.voice not in self.supported_voices.get_language_details(self.lang).voices:
raise LanguageException("Requested language {} does not have voice {}!".format(self.lang, self.voice))
if self.output_format not in self.supported_output_formats:
raise OutputFormatException("Requested output format {} is not supported".format(self.output_format))
if self.engine not in self.supported_engines:
raise EngineException("Requested engine {} is not supported".format(self.engine))
return None
def send_request_to_polly(self, save_to_file=False):
"""
Send formatted text as request and return the response.
@param save_to_file: If True - output will be written to a temporary file
@return: If save_to_file is true - location of the audio file will be returned. If false - the audio in raw
byte format will be returned.
"""
try:
response = self.client.synthesize_speech(VoiceId=self.voice,
OutputFormat=self.output_format,
Text=self.formatted_text,
TextType='ssml')
if response['ResponseMetadata']['HTTPStatusCode'] == 200:
if save_to_file:
file = open(os.path.join(tempfile.gettempdir(), response['ResponseMetadata']['RequestId'] + '.mp3'),
'wb')
file.write(response['AudioStream'].read())
file.close()
return file.name
return response['AudioStream'].read()
except ClientError as e:
raise BotoException(e.response['Error']['Code'], e.response['Error']['Message'])
def sent_request_stream_to_polly(self):
"""
TODO : support audio streaming
"""
pass
def convert_text_to_ssml(self, text):
"""
Convert plain text to ssml format.
@param text: Input text to convert to SSML format
@return: SSML formatted text
"""
text_formatter = []
text = text.strip()
replacement_map = {
'<whisper>': '<amazon:effect name="whispered">',
'</whisper>': '</amazon:effect>',
'<soft>': '<amazon:effect phonation="soft">',
'</soft>': '</amazon:effect>',
'<newscaster>': '<amazon:domain name="news">',
'</newscaster>': '</amazon:domain>',
'<convo>': '<amazon:domain name="conversational">',
'</convo>': '</amazon:domain>',
'<telephone>': '<say-as interpret-as="telephone">',
'</telephone>': '</say-as>',
'<fraction>': '<say-as interpret-as="fraction">',
'</fraction>': '</say-as>',
'<bleep>': '<say-as interpret-as="expletive">',
'</bleep>': '</say-as>',
'<address>': '<say-as interpret-as="address">',
'</address>': '</say-as>',
'<time>': '<say-as interpret-as="time">',
'</time>': '</say-as>',
'<unit>': '<say-as interpret-as="unit">',
'</unit>': '</say-as>',
'<digits>': '<say-as interpret-as="digits">',
'</digits>': '</say-as>',
'<ordinal>': '<say-as interpret-as="ordinal">',
'</ordinal>': '</say-as>',
'<number>': '<say-as interpret-as="number">',
'</number>': '</say-as>',
'<spell>': '<say-as interpret-as="spell-out">',
'</spell>': '</say-as>',
'<emphasize>': '<emphasis level="strong">',
'</emphasize>': '</emphasis>',
'<break>': '<break time="2s"/>'
}
for k, v in replacement_map.items():
if k in text:
text = text.replace(k, v)
text_formatter.append('<speak>')
text_formatter.append(text)
text_formatter.append('</speak>')
self.formatted_text = ''.join(text_formatter)
return None
|
[
"noreply@github.com"
] |
rupesh28.noreply@github.com
|
5eb0df4fd2be9efb632d9ef3d2abe2cbf0c37bfa
|
16acd0b785e23c9b05f374b186d537ad43a90cb5
|
/shop/migrations/0002_remove_product_owner.py
|
351ea1b6f94344455e5858e9012e17372f5605e2
|
[] |
no_license
|
utkarshwebllisto/ecommerce
|
83edfab1767d2705a0ab6d20b9e807fe6826548b
|
d57aa9ed19aeefc7ebbdbece582145ae864d1f69
|
refs/heads/master
| 2021-01-03T10:52:08.477539
| 2020-02-26T12:44:56
| 2020-02-26T12:44:56
| 237,432,593
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 314
|
py
|
# Generated by Django 3.0.3 on 2020-02-21 07:36
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('shop', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='product',
name='owner',
),
]
|
[
"utkarsh.webllisto@gmail.com"
] |
utkarsh.webllisto@gmail.com
|
41a01e4a689a804503dada113ac3115d9fcef976
|
9670126c5636e6b0f123cd5cd7bd794b1d0a5cd2
|
/app212/serializers/tick_request_serializer.py
|
7a179b90e01f89cb1bce107efc11923efec575d1
|
[] |
no_license
|
mikegordo/app212
|
c64f6cd8ec18d7d55cad42111f73da91ec71b401
|
4e53627dbbeeb1508a8153a5857df5fd0664fdeb
|
refs/heads/master
| 2023-05-02T13:08:00.983346
| 2021-05-24T06:02:38
| 2021-05-24T06:02:38
| 370,244,151
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 267
|
py
|
from rest_framework import serializers
from app212.models import Tick
class TickRequestSerializer(serializers.ModelSerializer):
class Meta:
model = Tick
fields = ['payload']
payload = serializers.JSONField(required=False, allow_null=True)
|
[
"himor.cre@gmail.com"
] |
himor.cre@gmail.com
|
179c76e3d89998854ba16ce18f04f81ebb3f6231
|
4ea40fecde97e048e9f1731354080b949b9c3736
|
/model/group_policy.py
|
91316163fca25cb3c4239e9e60f78861d2f5df18
|
[] |
no_license
|
znnby1997/magent_workspace
|
93e2ab92bbeabb861e694481f9f90eeb354a82fa
|
5db34580c69c82d305c8c590e20a1e079ff85ab5
|
refs/heads/master
| 2023-05-30T19:27:35.120533
| 2021-06-21T13:32:47
| 2021-06-21T13:32:47
| 278,375,910
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 648
|
py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class GroupPolicy(nn.Module):
def __init__(self, input_shape, n_groups, hidden_dim):
super().__init__()
self.tokens_fc = nn.Sequential(
nn.Linear(input_shape, hidden_dim),
nn.ReLU(),
nn.Linear(hidden_dim, n_groups)
)
def forward(self, x, h=None):
"""
x: [batch, num, tokens],
h: [batch, hidden_dim]
"""
if h is not None:
x = torch.cat([x, h], dim=2) # shape: nums, batch, 2*(input_shape+h_dim)
return f.softmax(self.tokens_fc(x))
|
[
"1025142964@qq.com"
] |
1025142964@qq.com
|
0714bc8e4f734c3c75dfd51b1b71364a7e4b6331
|
a9a534ecbcdd55b1859ef4f401901f36c2a8e5ed
|
/applications/home/forms.py
|
606a720676a7ade7dc3752a34cdbaf5fa8918900
|
[] |
no_license
|
arielmamaniorellana/cursoprodj
|
bdb251c0f57c1eaf824ffe4b70cb13a77a5140f9
|
1061928612b0a45be3319dc275af8f1291d54b4a
|
refs/heads/main
| 2023-08-15T03:08:09.400783
| 2021-09-25T16:40:43
| 2021-09-25T16:40:43
| 395,439,075
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 663
|
py
|
from django import forms
from django.forms import widgets
from .models import Prueba
class PruebaForm(forms.ModelForm):
class Meta:
model = Prueba
fields = (
'titulo',
'subtitulo',
'cantidad',
)
widgets = {
'titulo': forms.TextInput(
attrs = {
'placeholder': 'Ingrese texto aqui'
}
)
}
def clean_cantidad(self):
cantidad = self.cleaned_data['cantidad']
if cantidad < 10:
raise forms.ValidationError('Ingrese un numero mayor a 10')
return cantidad
|
[
"arielmamaniorellana12@gmail.com"
] |
arielmamaniorellana12@gmail.com
|
a4a326f14f26006a00f0a145175cb0dc8be59b7b
|
cbfe19d792781de4b481fb97580c4da9e6fe3e6f
|
/decisiontree.py
|
c721be5d463f78cc5d49b055a667615498b62f45
|
[] |
no_license
|
khan133/decisiontree
|
c3c7cb1b1cc8911487e4734dd1dc73c94d7864c7
|
b33e79b4d9e0c789b501f12318701d618c68a9e3
|
refs/heads/master
| 2020-03-17T22:10:22.264782
| 2018-05-18T18:41:27
| 2018-05-18T18:41:27
| 133,991,723
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 16,235
|
py
|
import sys
import math
import pandas as pd
class Node():
def __init__(self, value):
self.value = value
self.left = None
self.right = None
self.total = 0
self.above = 0
class decision():
myRootNode = Node(None)
def start(self):
#self.listOfAttributesAlreadyUsed = []
self.attributes = ['workclass', 'education', 'marital-status', 'occupation', 'relationship', 'race', 'sex', 'native-country', 'salaryLevel']
filePathToTraining = sys.argv[1]
filePathToTesting = sys.argv[2]
method = sys.argv[3]
#percentsToUse = [2, 10, 20, 30, 40, 50, 60]
#for eachPercent in percentsToUse:
percentToUse = sys.argv[4]
if str(method) == 'vanilla':
self.vanilla(filePathToTraining, filePathToTesting, percentToUse)
if str(method) == 'depth':
percentToUseValidation = sys.argv[5]
maxDepth = sys.argv[6]
self.depth(filePathToTraining, filePathToTesting, percentToUse, percentToUseValidation, int(maxDepth))
if str(method) == 'prune':
valPercent = sys.argv[5]
self.prune(filePathToTraining, filePathToTesting, percentToUse, valPercent)
def prune(self, filePathToTraining, filePathToTesting, percentToUse, percentForValidation):
rootNode = self.vanillaForPrune(filePathToTraining, filePathToTesting, percentToUse, percentForValidation)
attributes = ('workclass', 'education', 'marital-status', 'occupation', 'relationship', 'race', 'sex', 'native-country', 'salaryLevel')
train = pd.read_csv(filePathToTraining, sep = ', ', quotechar='"', header = None, engine= 'python', names = attributes)
#X = data[['workclass', 'education', 'marital-status', 'occupation', 'relationship', 'race', 'sex', 'native-country']].as_matrix()
#X = data.as_matrix()
X = train
size = len(X)
#print X
percentToUse = int(percentToUse)
size *= percentToUse
size /= 100
X = X[:size]
validationData = pd.read_csv(filePathToTraining, sep = ', ', quotechar='"', header = None, engine= 'python', names = attributes)
valX = validationData
sizeVal = len(valX)
percentToUseVal = 100 - int(percentForValidation)
sizeVal *= percentToUseVal
sizeVal /= 100
valX = valX[sizeVal:]
#print rootNode.value
rootNew = self.getCounts(rootNode, valX)
#print rootNew.value
global myRootNode
myRootNode = rootNew
self.rep(rootNew)
#print myRootNode.value
trainAcc = self.accuracy(myRootNode, X)
print "Train set accuracy: " + str(trainAcc)
dataTest = pd.read_csv(filePathToTesting, sep = ', ', quotechar='"', header = None, engine= 'python', names = attributes)
Y = dataTest
size = len(Y)
copyY = Y.copy()
#rootNode1 = self.makeTree(Y, Y['salaryLevel'], dataTest)
testAcc = self.accuracy(myRootNode, copyY)
print ("Test set accuracy: " + str(testAcc))
def countNodes(self, node):
if node.value in ['<=50K', ">50K"]:
return 1
else:
x = self.countNodes(node.left)
y = self.countNodes(node.right)
return x + y + 1
def rep(self, rootNode):
realNode = rootNode
ourNode = rootNode
if ourNode.value in ['<=50K', ">50K"]:
if ourNode.value == '>50K':
return ourNode.total - ourNode.above
else:
return ourNode.above
else:
error = self.rep(ourNode.left) + self.rep(ourNode.right)
ourNode.above = ourNode.left.above + ourNode.right.above
ourNode.total = ourNode.left.total + ourNode.right.total
if error < min(ourNode.above, ourNode.total - ourNode.above):
return error
else:
if ourNode.above > ourNode.total - ourNode.above:
ourNode.value = ">50K"
return ourNode.total - ourNode.above
else:
ourNode.value = "<=50K"
return ourNode.above
def vanillaForPrune(self, filePathToTraining, filePathToTesting, percentToUse, percentForValidation):
attributes = ('workclass', 'education', 'marital-status', 'occupation', 'relationship', 'race', 'sex', 'native-country', 'salaryLevel')
data = pd.read_csv(filePathToTraining, sep = ', ', quotechar='"', header = None, engine= 'python', names = attributes)
#X = data[['workclass', 'education', 'marital-status', 'occupation', 'relationship', 'race', 'sex', 'native-country']].as_matrix()
#X = data.as_matrix()
X = data
size = len(X)
#print X
percentToUse = int(percentToUse)
size *= percentToUse
size /= 100
X = X[:size]
#print X[4]
#data = X[X[7] == '>50k']
copy = X.copy()
rootNode = self.makeTree(X, X['salaryLevel'], data)
return rootNode
#nodes = self.getCounts(rootNode, valX)
#trainAcc = self.accuracy(rootNode, copy)
#print ("Training set accuracy: " + str(trainAcc))
#dataTest = pd.read_csv(filePathToTesting, sep = ', ', quotechar='"', header = None, engine= 'python', names = attributes)
#Y = dataTest
#size = len(Y)
#copyY = Y.copy()
#rootNode1 = self.makeTree(Y, Y['salaryLevel'], dataTest)
#testAcc = self.accuracy(rootNode, copyY)
#print ("Test set accuracy: " + str(testAcc))
def depth(self, filePathToTraining, filePathToTesting, percentToUseTraining, percentToUseValidation, maxDepth):
attributes = ('workclass', 'education', 'marital-status', 'occupation', 'relationship', 'race', 'sex', 'native-country', 'salaryLevel')
data = pd.read_csv(filePathToTraining, sep = ', ', quotechar='"', header = None, engine= 'python', names = attributes)
#X = data[['workclass', 'education', 'marital-status', 'occupation', 'relationship', 'race', 'sex', 'native-country']].as_matrix()
#X = data.as_matrix()
X = data
size = len(X)
#print X
percentToUse = int(percentToUseTraining)
size *= percentToUse
size /= 100
X = X[:size]
#print X[4]
#data = X[X[7] == '>50k']
copy = X.copy()
rootNode = self.makeTreeDepth(X, X['salaryLevel'], data, 0, maxDepth)
trainAcc = self.accuracy(rootNode, copy)
print ("Training set accuracy: " + str(trainAcc))
dataTestZ = pd.read_csv(filePathToTraining, sep = ', ', quotechar='"', header = None, engine= 'python', names = attributes)
Z = dataTestZ
sizeZ = len(Z)
percentToUseVal = 100 - int(percentToUseValidation)
sizeZ *= percentToUseVal
sizeZ /= 100
Z = Z[size:]
copyZ = Z.copy()
#rootNode2 = self.makeTreeDepth(Z, Z['salaryLevel'], dataTestZ, 0, maxDepth)
validationAcc = self.accuracy(rootNode, copyZ)
print ("Validation set accuracy: " + str(validationAcc))
dataTest = pd.read_csv(filePathToTesting, sep = ', ', quotechar='"', header = None, engine= 'python', names = attributes)
Y = dataTest
sizeY = len(Y)
copyY = Y.copy()
#rootNode1 = self.makeTreeDepth(Y, Y['salaryLevel'], dataTest, 0, maxDepth)
testAcc = self.accuracy(rootNode, copyY)
print ("Test set accuracy: " + str(testAcc))
def vanilla(self, filePathToTraining, filePathToTesting, percentToUse):
attributes = ('workclass', 'education', 'marital-status', 'occupation', 'relationship', 'race', 'sex', 'native-country', 'salaryLevel')
data = pd.read_csv(filePathToTraining, sep = ', ', quotechar='"', header = None, engine= 'python', names = attributes)
#X = data[['workclass', 'education', 'marital-status', 'occupation', 'relationship', 'race', 'sex', 'native-country']].as_matrix()
#X = data.as_matrix()
X = data
size = len(X)
#print X
percentToUse = int(percentToUse)
size *= percentToUse
size /= 100
X = X[:size]
#print X[4]
#data = X[X[7] == '>50k']
copy = X.copy()
rootNode = self.makeTree(X, X['salaryLevel'], data)
trainAcc = self.accuracy(rootNode, copy)
print ("Training set accuracy: " + str(trainAcc))
dataTest = pd.read_csv(filePathToTesting, sep = ', ', quotechar='"', header = None, engine= 'python', names = attributes)
Y = dataTest
size = len(Y)
copyY = Y.copy()
#rootNode1 = self.makeTree(Y, Y['salaryLevel'], dataTest)
testAcc = self.accuracy(rootNode, copyY)
print ("Test set accuracy: " + str(testAcc))
#self.makeTree(X, data[data[7])
def majority(self, data, target):
counts = {}
for i, row in data.iterrows():
#print row
if row[-1] in counts:
counts[row['salaryLevel']] += 1.0
else:
counts[row['salaryLevel']] = 1.0
maxVal = 0
majority = ""
for individualKey in counts.keys():
if counts[individualKey] > maxVal:
maxVal = counts[individualKey]
majority = individualKey
return majority
def entropy(self, data, target):
counts = {}
counts = data['salaryLevel'].value_counts()
#print counts
summation = 0.0
for vals in counts:
if len(counts) <= 1:
return 0
prob = float(vals) / len(data)
summation += ((-1) * (prob * math.log(prob, 10)))
return summation
def informationGain(self, data, attributeIndex, target):
counts = {}
#print attributeIndex
#print data
#print type(attributeIndex)
#print 'hi'
#print self.attributes[attributeIndex]
# print data[self.attributes]
#print data
values = data[attributeIndex].unique() #self.getValues(data, attributeIndex)
values = list(values)
bestValue = values[0]
maxInfoGain = float("-inf")
for singleValue in values:
entropy = self.entropy(data, target)
subset0 = data[data[attributeIndex] == singleValue]
subset1 = data[data[attributeIndex] != singleValue]
entropy -= (float(len(subset0)) / len(data) * self.entropy(subset0, target))
entropy -= (float(len(subset1)) / len(data) * self.entropy(subset1, target))
if entropy > maxInfoGain:
maxInfoGain = entropy
bestValue = singleValue
return maxInfoGain, bestValue
def getValues(self, data, attributeIndex):
values = []
for index, entry in data.iterrows():
if (entry[attributeIndex] not in values):
values.append(entry[attributeIndex])
return values
def bestAttribute(self, data, target):
maxBest, bestValueToSplit = float("-inf"), ""
index = 0
columnValues = list(data.columns.values)
columnValues.remove('salaryLevel')
for columns in columnValues:
#if i in
currGain, valueToSplit = self.informationGain(data, columns, target)
if (currGain >= maxBest):
maxBest = currGain
bestValueToSplit = valueToSplit
index = columns
if maxBest <= 0.0:
return self.majority(data, target), 20
#if maxBest > 0.0:
#return self.majority(data, target)
return bestValueToSplit, index
#else:
#return self.majority(data, target)
def makeTree(self, data, target, oldData):
listOfAttributesAlreadyUsed = []
rows, columns = data.shape
#print columns
#print "HELLO"
valuesInTargetAttribute = data['salaryLevel'].unique()
#target = data[columns - 1]
#print data
if len(valuesInTargetAttribute) == 1:
root = Node(valuesInTargetAttribute[0])
return root
if len(data) <= 0 or columns <= 1:
root = Node(self.majority(oldData, target))
return root
valueToSplit, index = self.bestAttribute(data, target)
if index == 20:
root = Node(self.majority(data, target))
return root
listOfAttributesAlreadyUsed.append(index)
root = Node(valueToSplit)
oldData = data
rightData = data[data[index] != valueToSplit]
leftData = data[data[index] == valueToSplit]
leftData = leftData.drop(str(index), axis = 1)
root.left = self.makeTree(leftData, target, oldData)
root.right = self.makeTree(rightData, target, oldData)
return root
def makeTreeDepth(self, data, target, oldData, depth, maxDepth):
listOfAttributesAlreadyUsed = []
rows, columns = data.shape
valuesInTargetAttribute = data['salaryLevel'].unique()
if len(valuesInTargetAttribute) == 1:
root = Node(valuesInTargetAttribute[0])
return root
#return valuesInTargetAttribute[0]
if len(data) <= 0 or columns <= 1:
root = Node(self.majority(oldData, target))
#return self.majority(oldData, target)
return root
valueToSplit, index = self.bestAttribute(data, target)
if index == 20:
root = Node(self.majority(data, target))
return root
if depth == maxDepth:
root = Node(self.majority(data, target))
return root
#print valueToSplit
listOfAttributesAlreadyUsed.append(index)
#attributes = ['workclass', 'education', 'marital-status', 'occupation', 'relationship', 'race', 'sex', 'native-country', 'salaryLevel']
#print valueToSplit
root = Node(valueToSplit)
oldData = data
#att = self.attributes[index]
#print valueToSplit
rightData = data[data[index] != valueToSplit]
leftData = data[data[index] == valueToSplit]
#print leftData[3]
leftData = leftData.drop(str(index), axis = 1)
#leftData.drop(leftData.columns[[index]], axis = 1, inplace = True)
#print leftData
depth += 1
root.left = self.makeTreeDepth(leftData, target, oldData, depth, maxDepth)
#print "sjdfsf"
root.right = self.makeTreeDepth(rightData, target, oldData, depth, maxDepth)
return root
def getCounts(self, rootNode, dataSet):
total = 0
above = 0
flag = False
size = len(dataSet)
#print rootNode
for index, i in dataSet.iterrows():
rootTree = rootNode
while rootTree.value not in ['<=50K', ">50K"]:
flag = False
for allValues in i:
if allValues == rootTree.value:
rootTree = rootTree.left
flag = True
break
if flag == False:
rootTree = rootTree.right
rootTree.total += 1
if (i[8] == ">50K"):
rootTree.above += 1
return rootNode
def accuracy(self, rootNode, dataSet):
tp = 0
flag = False
size = len(dataSet)
#print rootNode
for index, i in dataSet.iterrows():
rootTree = rootNode
while rootTree.value not in ['<=50K', ">50K"]:
flag = False
for allValues in i:
if allValues == rootTree.value:
#if i[()] == rootTree.value:
rootTree = rootTree.left
flag = True
break
if flag == False:
rootTree = rootTree.right
if rootTree.value == i[8]:
tp += 1
accuracy = tp*1.0/len(dataSet)
return accuracy
D = decision()
D.start()
|
[
"noreply@github.com"
] |
khan133.noreply@github.com
|
e9529270c8f8e0c4bb0813644eeb51d0564f5e22
|
fa8dc1924aeef19f4a1e6aae2b182de8f6e41f1b
|
/Model-zyc/nets/vgg.py
|
a13944a07178ad90aa85a13dc431d8d348cd18d4
|
[] |
no_license
|
Lihit/Ensemble
|
6ae9afe7fd14cabae9696caad4404614c43e3315
|
c0aaf72847b163779566641c8508c90f6de38b2e
|
refs/heads/master
| 2021-05-16T03:48:51.444886
| 2017-10-11T05:03:31
| 2017-10-11T05:03:31
| 105,557,287
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 11,895
|
py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains model definitions for versions of the Oxford VGG network.
These model definitions were introduced in the following technical report:
Very Deep Convolutional Networks For Large-Scale Image Recognition
Karen Simonyan and Andrew Zisserman
arXiv technical report, 2015
PDF: http://arxiv.org/pdf/1409.1556.pdf
ILSVRC 2014 Slides: http://www.robots.ox.ac.uk/~karen/pdf/ILSVRC_2014.pdf
CC-BY-4.0
More information can be obtained from the VGG website:
www.robots.ox.ac.uk/~vgg/research/very_deep/
Usage:
with slim.arg_scope(vgg.vgg_arg_scope()):
outputs, end_points = vgg.vgg_a(inputs)
with slim.arg_scope(vgg.vgg_arg_scope()):
outputs, end_points = vgg.vgg_16(inputs)
@@vgg_a
@@vgg_16
@@vgg_19
"""
from __future__ import division
from __future__ import print_function
import tensorflow as tf
slim = tf.contrib.slim
def vgg_arg_scope(weight_decay=0.0005):
"""Defines the VGG arg scope.
Args:
weight_decay: The l2 regularization coefficient.
Returns:
An arg_scope.
"""
with slim.arg_scope([slim.conv2d, slim.fully_connected],
activation_fn=tf.nn.relu,
weights_regularizer=slim.l2_regularizer(weight_decay),
biases_initializer=tf.zeros_initializer()):
with slim.arg_scope([slim.conv2d], padding='SAME') as arg_sc:
return arg_sc
def vgg_a(inputs,
num_classes=1000,
is_training=True,
dropout_keep_prob=0.5,
spatial_squeeze=True,
scope='vgg_a',
fc_conv_padding='VALID'):
"""Oxford Net VGG 11-Layers version A Example.
Note: All the fully_connected layers have been transformed to conv2d layers.
To use in classification mode, resize input to 224x224.
Args:
inputs: a tensor of size [batch_size, height, width, channels].
num_classes: number of predicted classes.
is_training: whether or not the model is being trained.
dropout_keep_prob: the probability that activations are kept in the dropout
layers during training.
spatial_squeeze: whether or not should squeeze the spatial dimensions of the
outputs. Useful to remove unnecessary dimensions for classification.
scope: Optional scope for the variables.
fc_conv_padding: the type of padding to use for the fully connected layer
that is implemented as a convolutional layer. Use 'SAME' padding if you
are applying the network in a fully convolutional manner and want to
get a prediction map downsampled by a factor of 32 as an output. Otherwise,
the output prediction map will be (input / 32) - 6 in case of 'VALID' padding.
Returns:
the last op containing the log predictions and end_points dict.
"""
with tf.variable_scope(scope, 'vgg_a', [inputs]) as sc:
end_points_collection = sc.name + '_end_points'
# Collect outputs for conv2d, fully_connected and max_pool2d.
with slim.arg_scope([slim.conv2d, slim.max_pool2d],
outputs_collections=end_points_collection):
net = slim.repeat(inputs, 1, slim.conv2d, 64, [3, 3], scope='conv1')
net = slim.max_pool2d(net, [2, 2], scope='pool1')
net = slim.repeat(net, 1, slim.conv2d, 128, [3, 3], scope='conv2')
net = slim.max_pool2d(net, [2, 2], scope='pool2')
net = slim.repeat(net, 2, slim.conv2d, 256, [3, 3], scope='conv3')
net = slim.max_pool2d(net, [2, 2], scope='pool3')
net = slim.repeat(net, 2, slim.conv2d, 512, [3, 3], scope='conv4')
net = slim.max_pool2d(net, [2, 2], scope='pool4')
net = slim.repeat(net, 2, slim.conv2d, 512, [3, 3], scope='conv5')
net = slim.max_pool2d(net, [2, 2], scope='pool5')
# Use conv2d instead of fully_connected layers.
net = slim.conv2d(net, 4096, [7, 7], padding=fc_conv_padding, scope='fc6')
net = slim.dropout(net, dropout_keep_prob, is_training=is_training,
scope='dropout6')
net = slim.conv2d(net, 4096, [1, 1], scope='fc7')
net = slim.dropout(net, dropout_keep_prob, is_training=is_training,
scope='dropout7')
net = slim.conv2d(net, num_classes, [1, 1],
activation_fn=None,
normalizer_fn=None,
scope='fc8')
# Convert end_points_collection into a end_point dict.
end_points = slim.utils.convert_collection_to_dict(end_points_collection)
if spatial_squeeze:
net = tf.squeeze(net, [1, 2], name='fc8/squeezed')
end_points[sc.name + '/fc8'] = net
return net, end_points
vgg_a.default_image_size = 224
def vgg_16(inputs,
num_classes=1000,
is_training=True,
dropout_keep_prob=0.5,
spatial_squeeze=True,
scope='vgg_16',
fc_conv_padding='VALID'):
"""Oxford Net VGG 16-Layers version D Example.
Note: All the fully_connected layers have been transformed to conv2d layers.
To use in classification mode, resize input to 224x224.
Args:
inputs: a tensor of size [batch_size, height, width, channels].
num_classes: number of predicted classes.
is_training: whether or not the model is being trained.
dropout_keep_prob: the probability that activations are kept in the dropout
layers during training.
spatial_squeeze: whether or not should squeeze the spatial dimensions of the
outputs. Useful to remove unnecessary dimensions for classification.
scope: Optional scope for the variables.
fc_conv_padding: the type of padding to use for the fully connected layer
that is implemented as a convolutional layer. Use 'SAME' padding if you
are applying the network in a fully convolutional manner and want to
get a prediction map downsampled by a factor of 32 as an output. Otherwise,
the output prediction map will be (input / 32) - 6 in case of 'VALID' padding.
Returns:
the last op containing the log predictions and end_points dict.
"""
with tf.variable_scope(scope, 'vgg_16', [inputs]) as sc:
end_points_collection = sc.name + '_end_points'
# Collect outputs for conv2d, fully_connected and max_pool2d.
with slim.arg_scope([slim.conv2d, slim.fully_connected, slim.max_pool2d],
outputs_collections=end_points_collection):
net = slim.repeat(inputs, 2, slim.conv2d, 64, [3, 3], scope='conv1')
net = slim.max_pool2d(net, [2, 2], scope='pool1')
net = slim.repeat(net, 2, slim.conv2d, 128, [3, 3], scope='conv2')
net = slim.max_pool2d(net, [2, 2], scope='pool2')
net = slim.repeat(net, 3, slim.conv2d, 256, [3, 3], scope='conv3')
net = slim.max_pool2d(net, [2, 2], scope='pool3')
net = slim.repeat(net, 3, slim.conv2d, 512, [3, 3], scope='conv4')
net = slim.max_pool2d(net, [2, 2], scope='pool4')
net = slim.repeat(net, 3, slim.conv2d, 512, [3, 3], scope='conv5')
net = slim.max_pool2d(net, [2, 2], scope='pool5')
# Use conv2d instead of fully_connected layers.
net = slim.conv2d(net, 4096, [7, 7], padding=fc_conv_padding, scope='fc6')
net = slim.dropout(net, dropout_keep_prob, is_training=is_training,
scope='dropout6')
net = slim.conv2d(net, 4096, [1, 1], scope='fc7')
net = slim.dropout(net, dropout_keep_prob, is_training=is_training,
scope='dropout7')
net = slim.conv2d(net, num_classes, [1, 1],
activation_fn=None,
normalizer_fn=None,
scope='fc8')
# Convert end_points_collection into a end_point dict.
end_points = slim.utils.convert_collection_to_dict(end_points_collection)
if spatial_squeeze:
net = tf.squeeze(net, [1, 2], name='fc8/squeezed')
end_points[sc.name + '/fc8'] = net
return net, end_points
vgg_16.default_image_size = 224
def vgg_19(inputs,
num_classes=1000,
is_training=True,
dropout_keep_prob=0.5,
spatial_squeeze=True,
scope='vgg_19',
fc_conv_padding='VALID'):
"""Oxford Net VGG 19-Layers version E Example.
Note: All the fully_connected layers have been transformed to conv2d layers.
To use in classification mode, resize input to 224x224.
Args:
inputs: a tensor of size [batch_size, height, width, channels].
num_classes: number of predicted classes.
is_training: whether or not the model is being trained.
dropout_keep_prob: the probability that activations are kept in the dropout
layers during training.
spatial_squeeze: whether or not should squeeze the spatial dimensions of the
outputs. Useful to remove unnecessary dimensions for classification.
scope: Optional scope for the variables.
fc_conv_padding: the type of padding to use for the fully connected layer
that is implemented as a convolutional layer. Use 'SAME' padding if you
are applying the network in a fully convolutional manner and want to
get a prediction map downsampled by a factor of 32 as an output. Otherwise,
the output prediction map will be (input / 32) - 6 in case of 'VALID' padding.
Returns:
the last op containing the log predictions and end_points dict.
"""
with tf.variable_scope(scope, 'vgg_19', [inputs]) as sc:
end_points_collection = sc.name + '_end_points'
# Collect outputs for conv2d, fully_connected and max_pool2d.
with slim.arg_scope([slim.conv2d, slim.fully_connected, slim.max_pool2d],
outputs_collections=end_points_collection):
net = slim.repeat(inputs, 2, slim.conv2d, 64, [3, 3], scope='conv1')
net = slim.max_pool2d(net, [2, 2], scope='pool1')
net = slim.repeat(net, 2, slim.conv2d, 128, [3, 3], scope='conv2')
net = slim.max_pool2d(net, [2, 2], scope='pool2')
net = slim.repeat(net, 4, slim.conv2d, 256, [3, 3], scope='conv3')
net = slim.max_pool2d(net, [2, 2], scope='pool3')
net = slim.repeat(net, 4, slim.conv2d, 512, [3, 3], scope='conv4')
net = slim.max_pool2d(net, [2, 2], scope='pool4')
net = slim.repeat(net, 4, slim.conv2d, 512, [3, 3], scope='conv5')
net = slim.max_pool2d(net, [2, 2], scope='pool5')
# Use conv2d instead of fully_connected layers.
net = slim.conv2d(net, 4096, [7, 7], padding=fc_conv_padding, scope='fc6')
net = slim.dropout(net, dropout_keep_prob, is_training=is_training,
scope='dropout6')
net = slim.conv2d(net, 4096, [1, 1], scope='fc7')
net = slim.dropout(net, dropout_keep_prob, is_training=is_training,
scope='dropout7')
net = slim.conv2d(net, num_classes, [1, 1],
activation_fn=None,
normalizer_fn=None,
scope='fc8')
# Convert end_points_collection into a end_point dict.
end_points = slim.utils.convert_collection_to_dict(end_points_collection)
if spatial_squeeze:
net = tf.squeeze(net, [1, 2], name='fc8/squeezed')
end_points[sc.name + '/fc8'] = net
return net, end_points
vgg_19.default_image_size = 224
# Alias
vgg_d = vgg_16
vgg_e = vgg_19
|
[
"wenshaoguo0611@gmail.com"
] |
wenshaoguo0611@gmail.com
|
aa7b969ccbcabe0b29f5cc0015b585b72fd68b6c
|
2ecc2b5c89191176cb331f23a2c5bb19abbad744
|
/bff/api/reaction.py
|
a1c9724fd27d22c54e7a9e27431be8788a252185
|
[] |
no_license
|
Jonas1302/BotsForFuture
|
4f4c403ec7e4ba04bdf5f142293cb47979b11ce8
|
59a8e00750ce2f76cb6a22a8731944872fd967bb
|
refs/heads/master
| 2021-07-25T17:29:32.138162
| 2020-08-24T12:37:47
| 2020-08-24T12:37:47
| 212,149,557
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 785
|
py
|
import logging
from bff import api
logger = logging.getLogger(__name__)
class Reaction:
def __init__(self, user_id, post_id, emoji_name, **kwargs):
self.user_id = user_id
self.post_id = post_id
self.emoji = emoji_name
self.__dict__.update(kwargs)
self._user = None
self._post = None
def __eq__(self, other):
if not isinstance(other, Reaction):
return False
return self.user == other.user \
and self.post == other.post \
and self.emoji == other.emoji
def __hash__(self):
return hash(self.user, self.post, self.emoji)
@property
def user(self):
if not self._user:
self._user = api.User.by_id(self.user_id)
return self._user
@property
def post(self):
if not self._post:
self._post = api.Post.by_id(self.post_id)
return self._post
|
[
"Jones132@posteo.net"
] |
Jones132@posteo.net
|
f1268d47ba9fd932f12ba63aa3026b6f2fc2a852
|
76a197bd6836456ec556b916f58760e3452ed54e
|
/datascraper.py
|
ccb982dd9cd690394148946f33349786091a09ca
|
[
"MIT"
] |
permissive
|
enmerk4r/NoLA-Scraper
|
965700f94a4b6c9ae13dd75df649bb7fc0e70fa0
|
140348cf006ed83d28a727f7ef67380022a9abff
|
refs/heads/master
| 2020-12-02T12:14:10.073570
| 2020-01-05T05:39:41
| 2020-01-05T05:39:41
| 231,003,014
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 13,389
|
py
|
from selenium import webdriver
from dataframe import *
import os
import unicodedata
import re
from pprint import pprint
import urllib3
import time
import datetime
import uuid
from selenium.common.exceptions import UnexpectedAlertPresentException
class Scraper:
def __init__(self,
imgFolder="downloads",
downloadFiles=True,
goToNextParcel=True,
entryLimit = None,
stopWhenDuplicate=False):
self.Pages = {}
self.Driver = webdriver.Chrome()
self.ImageFolder = imgFolder
self.GoToNextParcel = goToNextParcel
self.EntryLimit = entryLimit
if not os.path.exists(imgFolder) and downloadFiles:
os.makedirs(imgFolder)
self.parsedUrls = []
self.StopWhenDuplicate = stopWhenDuplicate
def ReadWebPage(self, url):
try:
if (url in self.parsedUrls):
print("DUPLICATE! Skipping record")
if self.StopWhenDuplicate:
print("**** REACHED STOP CONDITION ****")
return
imageUri, specialTaxDistrictMapUri, parcelMapUri, AssessmentAreaMapUri, nextUri, zoningLink = self.GetHyperlinks()
else:
pageGuid = str(uuid.uuid4())
print("============================")
print("Parsing page: {0}".format(len(self.Pages) + 1))
print("============================")
self.Driver.get(url)
ownerParcelInfo = self.ParseOwnerParcelInfo()
ownerParcelInfo.URI = url
address = self.slugify(ownerParcelInfo.MailingAddress)
print(address, ": parsed owner / parcel data")
valueObjects = self.ParseValueInfo()
print(address, ": parsed {0} value records".format(len(valueObjects)))
salesObjects = self.ParseSalesInfo()
print(address, ": parsed {0} sales records".format(len(salesObjects)))
# Create address path
addressPath = os.path.join(self.ImageFolder, address)
if not os.path.exists(addressPath):
os.mkdir(addressPath)
# Create page path
pagePath = os.path.join(addressPath, pageGuid)
if not os.path.exists(pagePath):
os.mkdir(pagePath)
# Create image path
imagesPath = os.path.join(pagePath, "Images")
if not os.path.exists(imagesPath):
os.mkdir(imagesPath)
# Create sketch path
sketchPath = os.path.join(pagePath, "Sketches")
if not os.path.exists(sketchPath):
os.mkdir(sketchPath)
# Download Sketches
if self.DownloadImages:
numSketches = 0
numSketches += self.DownloadSketch(sketchPath)
print(address, ": downloaded {0} sketch(es)".format(numSketches))
# Get downloadable links
imageUri, specialTaxDistrictMapUri, parcelMapUri, AssessmentAreaMapUri, nextUri, zoningLink = self.GetHyperlinks()
ownerParcelInfo.SpecialTaxDistrictMap = specialTaxDistrictMapUri
ownerParcelInfo.ParcelMap = parcelMapUri
ownerParcelInfo.AssessmentAreaMap = AssessmentAreaMapUri
ownerParcelInfo.ImageUrl = imageUri
# Parse zoning data
if zoningLink != None:
try:
self.Driver.get(zoningLink)
zoningInfo = self.ParseZoningInfo(pagePath)
ownerParcelInfo.ZoningDistrict = zoningInfo.ZoningDistrict
ownerParcelInfo.ZoningDescription = zoningInfo.ZoningDescription
except:
ownerParcelInfo.ZoningDistrict = ""
ownerParcelInfo.ZoningDescription = ""
if self.DownloadImages:
# Download files
numFiles = 0
if specialTaxDistrictMapUri is not None:
taxDistrMapPath = os.path.join(pagePath, "SpecialTaxDistrictMap.pdf")
self.DownloadFile(specialTaxDistrictMapUri, taxDistrMapPath)
numFiles += 1
if parcelMapUri is not None:
parcelMapPath = os.path.join(pagePath, "ParcelMap.pdf")
self.DownloadFile(parcelMapUri, parcelMapPath)
numFiles += 1
if AssessmentAreaMapUri is not None:
assmntAreaPath = os.path.join(pagePath, "AssessmentAreaMap.pdf")
self.DownloadFile(AssessmentAreaMapUri, assmntAreaPath)
numFiles += 1
print(address, ": downloaded {0} PDF file(s)".format(numFiles))
# Download Images
if imageUri is not None:
numImgs = self.DownloadImages(imageUri, imagesPath)
print(address, ": downloaded {0} image(s)".format(numImgs))
# Create Page Rep
rep = PageRep(ownerParcelInfo, valueObjects, salesObjects, url)
rep.Guid = pageGuid
self.parsedUrls.append(url)
self.Pages[rep.Guid] = rep
rep.WriteOut()
print(address, ": successfully written to disk")
# Next Page
if (self.GoToNextParcel):
if (self.EntryLimit == None or len(self.Pages) < self.EntryLimit):
self.ReadWebPage(nextUri)
elif len(self.Pages) >= self.EntryLimit:
print("**** REACHED ENTRY LIMIT ****")
except UnexpectedAlertPresentException as exception:
alert_obj = self.Driver.switch_to.alert
alert_obj.accept()
self.ReadWebPage(url)
def DownloadSketch(self, sketchPath):
try:
tds = self.Driver.find_element_by_class_name("sketch_main")
image = tds.find_element_by_tag_name("img")
source = image.get_attribute("src")
self.DownloadFile(source, os.path.join(sketchPath, "MainSketch.jpg"))
return 1
except:
return 0
def DownloadImages(self, imagesUri, folder):
try:
self.Driver.get(imagesUri)
images = self.Driver.find_elements_by_tag_name("img")
counter = 0
for img in images:
src = img.get_attribute("src")
if src != "http://qpublic9.qpublic.net/images/la_orleans.jpg":
self.DownloadFile(src, os.path.join(folder, "img_{0}.jpg".format(counter)))
counter += 1
return counter
except:
return 0
def GetHyperlinks(self):
try:
imageLink = self.Driver.find_element_by_link_text("Enlarge/Show All")
imageUri = imageLink.get_attribute('href')
except:
imageUri = None
nextLink = self.Driver.find_element_by_link_text("Next Parcel")
nextUri = nextLink.get_attribute('href')
specialTaxDistrictMapUri = None
parcelMapUri = None
AssessmentAreaMapUri = None
ZoningLink = None
hyperLinks = self.Driver.find_elements_by_tag_name("a")
for h in hyperLinks:
href = h.get_attribute('href')
if "Show Viewer" in h.text:
ZoningLink = href
try:
img = h.find_element_by_tag_name("img")
if img.get_attribute("src") == "http://qpublic9.qpublic.net/images/special_tax_district_map.gif":
specialTaxDistrictMapUri = href
elif img.get_attribute("src") == "http://qpublic9.qpublic.net/images/spm.gif":
parcelMapUri = href
elif img.get_attribute("src") == "http://qpublic9.qpublic.net/images/saa.gif":
AssessmentAreaMapUri = href
except:
pass
return imageUri, specialTaxDistrictMapUri, parcelMapUri, AssessmentAreaMapUri, nextUri, ZoningLink
def DownloadFile(self, url, path, chunk_size=1024):
try:
http = urllib3.PoolManager()
r = http.request('GET', url, preload_content=False)
with open(path, 'wb') as out:
while True:
data = r.read(chunk_size)
if not data:
break
out.write(data)
r.release_conn()
except:
pass
def ParseZoningInfo(self, path):
# Make sure the map loads fully
print("Waiting for the map to load...")
try:
WaitStart = datetime.datetime.now()
WAIT_LIMIT = 60
fullyLoaded = False
while not fullyLoaded:
try:
lines = self.Driver.find_elements_by_tag_name("td")
for td in lines:
try:
b = td.find_element_by_tag_name("b")
head = b.text
if "Zoning District:" in head or "Future Land Use:" in head:
print("Page fully loaded")
time.sleep(0.5)
fullyLoaded = True
break
except:
pass
except:
time.sleep(1)
# Make sure we're not stuck
difference = datetime.datetime.now() - WaitStart
if difference.seconds > WAIT_LIMIT:
fullyLoaded = True
all_lines = self.Driver.find_elements_by_tag_name("td")
zoning_dict = {}
for td in all_lines:
try:
b = td.find_element_by_tag_name("b")
head = b.text
value = td.text
clean = head.replace(" ", "").replace("\n", "")
if clean not in zoning_dict:
zoning_dict[clean] = value
except:
pass
# Save screenshot
self.Driver.save_screenshot(os.path.join(path, "zoning.png"))
zoningInfo = ZoningInfoItem(zoning_dict)
return zoningInfo
except UnexpectedAlertPresentException as exception:
return None
def ParseOwnerParcelInfo(self):
owner_headers = self.Driver.find_elements_by_class_name("owner_header")
owner_values = self.Driver.find_elements_by_class_name("owner_value")
owner_parcel_value_dict = {}
for h, v in zip(owner_headers, owner_values):
owner_parcel_value_dict[h.text.replace(" ", "")] = v.text
# Create an Owner / Parcel info object
ownerParcelInfo = OwnerParcelInfoItem(owner_parcel_value_dict)
return ownerParcelInfo
def ParseValueInfo(self):
tax_headers = self.Driver.find_elements_by_class_name("tax_header")
tax_values = self.Driver.find_elements_by_class_name("tax_value")
numColumns = len(tax_headers)
value_partition = self.PartitionList(tax_values, numColumns)
valueObjects = []
for line in value_partition:
line_dict = {}
for h, v in zip(tax_headers, line):
line_dict[h.text.replace("\n", "").replace(" ", "")] = v.text
valueInfo = ValueInfoItem(line_dict)
valueObjects.append(valueInfo)
return valueObjects
def ParseSalesInfo(self):
sales_headers = self.Driver.find_elements_by_class_name("sales_header")
sales_values = self.Driver.find_elements_by_class_name("sales_value")
numColumns = len(sales_headers)
value_partition = self.PartitionList(sales_values, numColumns)
salesObjects = []
for line in value_partition:
line_dict = {}
for h, v in zip(sales_headers, line):
line_dict[h.text.replace("\n", "").replace(" ", "")] = v.text
salesInfo = SaleTransferInfoItem(line_dict)
salesObjects.append(salesInfo)
return salesObjects
def PartitionList(self, lst, length):
newList = []
buffer = []
for elem in lst:
if (len(buffer) < length):
buffer.append(elem)
else:
newList.append(buffer)
buffer = []
buffer.append(elem)
# Last leftover chunk
if len(buffer) == length:
newList.append(buffer)
return newList
def slugify(self, value):
"""
Normalizes string, converts to lowercase, removes non-alpha characters,
and converts spaces to hyphens.
"""
value = re.sub('[^\w\s-]', '', value).strip().lower()
value = re.sub('[-\s]+', '-', value)
return value
|
[
"p.i.g.a.c.h@gmail.com"
] |
p.i.g.a.c.h@gmail.com
|
03b906259971701d5257068ee3953200042c73a4
|
7b90a37a7917ca349161fc216f5d4ff7fedc9710
|
/nesting_usage.py
|
c8e99e09b0e7beac646801a77c5a14334514f9f7
|
[] |
no_license
|
gavin176/MyLocustDemo
|
cc022f1099dbf9eb6551d5b500e14b1da16cf5b8
|
6e2522a23ebd93d70f30c55b372c811cb433edcc
|
refs/heads/master
| 2021-09-05T07:10:39.046012
| 2018-01-25T03:37:53
| 2018-01-25T03:37:53
| 110,953,856
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,413
|
py
|
from locust import HttpLocust, TaskSet, task
class NestingTask(TaskSet):
@task
def visit_sogou(self):
url = "https://www.sogou.com"
print(url)
with self.client.get(url, catch_response=True,verify=False,name="sogou") as response:
if response.status_code == 200:
response.success()
else:
response.failure("error")
@task
class SubTask(TaskSet):
def on_start(self):
pass
@task(1)
def visit_baidu(self):
url = "https://baidu.com"
print(url)
with self.client.get(url,catch_response=True,verify=False,name="baidu") as response:
if response.status_code == 200:
response.success()
else:
response.failure("error")
@task(1)
def visit_google(self):
url = "http://google.com"
print(url)
with self.client.get(url,catch_response=True,verify=False,name="google") as response:
if response.status_code == 200:
response.success()
else:
response.failure("error")
@task(2)
def stop(self):
self.interrupt()
class WebsiteUser(HttpLocust):
task_set = NestingTask
host = "http://example.com"
min_wait = 1000
max_wait = 5000
|
[
"noreply@github.com"
] |
gavin176.noreply@github.com
|
ac3159bfe8a35361c4c3bd6e3994a05d66ae7ad1
|
99f9ed11c5eb1ee5a2a4800ad856eee2e9730018
|
/sqlalchemy_model_convert/sqlalchemy_model_convert.py
|
80430eb4df129a80249997c3c5a6f0db8049fe3a
|
[] |
no_license
|
persistty/sqlalchemy-model-convert
|
c2e5712b22f204cf1961af286ad30a7dcdef7c88
|
e6641c1b56fbe25fdd2997c6a45a530b10be6bbc
|
refs/heads/master
| 2020-04-06T17:37:33.746769
| 2019-06-23T13:02:45
| 2019-06-23T13:02:45
| 157,667,089
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,620
|
py
|
class ModelConvert(object):
"""Model transform class,Convert the object to the corresponding dictionary"""
@classmethod
def key_values_list(cls, models, related_models=[], ignore_fields=[]):
"""
Convert the list of objects into a dictionary list
:param models: List of objects to convert
:param related_models: The model object in the list to display the object of the associated query
:param ignore_fields: Fields in the list that the model object does not need to display
:return: Return list
"""
return cls.__convert_list(models, related_models, ignore_fields)
def key_values(self, related_models=[], ignore_fields=[]):
"""
Convert an object into a dictionary
:param related_models: The object to display the associated query
:param ignore_fields: Fields that do not need to be displayed
:return: Return dictionary
"""
if not hasattr(self, '__table__'):
return self.__dict__
columns = self.__table__.columns
attrs_dict = self.__convert_field(columns, ignore_fields)
for model in related_models:
value = getattr(self, model)
if isinstance(value, ModelConvert):
attrs_dict[model] = value.key_values()
elif isinstance(value, list):
attrs_dict[model] = self.key_values_list(value)
return attrs_dict
def __convert_field(self, fields, ignore_fields=[]):
"""Transform the corresponding fields of the database"""
field_dict = dict()
for column in fields:
field = column.name
if field in ignore_fields:
continue
value = getattr(self, field, None)
field_dict[field] = self.set_field_value(field, value)
return field_dict
@classmethod
def __convert_list(cls, models, related_models=[], ignore_fields=[]):
"""Convert a list"""
list_dicts = list()
for model in models:
if isinstance(model, ModelConvert):
list_dicts.append(model.key_values(related_models, ignore_fields))
else:
list_dicts.append(model)
return list_dicts
def set_field_value(self, field, value):
"""
Set the value of the corresponding field, override the method,
and modify the value of a particular field, such as the value of the date field
:param field: Field name
:param value: value
:return: Return the modified value
"""
return value
|
[
"liutangyongpersist@163.com"
] |
liutangyongpersist@163.com
|
fa253fce90a0b39ec7f500eb477e5b5a9a787010
|
b063645c5b004e7b834a95ee706c5122833cc6ce
|
/Tugas5/Pratikum2/Pratikum2_1.py
|
50e856dc01be5a0c28ffb35e8ccf8d41c13f18ce
|
[] |
no_license
|
bgspamungkas/Python
|
53bf1fc7df96f2b29936c640f0f4a5251025c9d6
|
597979aa42374ed5fd9c5964a715f02d9f535db5
|
refs/heads/main
| 2023-02-19T23:35:02.053816
| 2021-01-13T04:11:45
| 2021-01-13T04:11:45
| 303,153,729
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 208
|
py
|
def luasSegitiga (a,t):
luas = a * t / 2
return luas
alas = 10
tinggi = 20
print('Luas segitiga dg alas ',alas,
' dan tinggi ', tinggi,
' adalah ', luasSegitiga(alas,tinggi))
|
[
"noreply@github.com"
] |
bgspamungkas.noreply@github.com
|
340491cc31993dd884863ac140ddac1957461a1d
|
8e41426add05eac5959f4fb38561d1f3b18b5c05
|
/refinery/tool_manager/urls.py
|
76251a192fcf34bd4302527426433e39c847853d
|
[
"LicenseRef-scancode-warranty-disclaimer",
"MIT"
] |
permissive
|
refinery-platform/refinery-platform
|
ae2f299807b5f5d537d9bbc4b1f9a866d48b7245
|
b005e75cb79e8bfebbdf10da2b5d9d0062a5a661
|
refs/heads/develop
| 2023-01-24T01:32:29.178787
| 2019-12-09T15:54:19
| 2019-12-09T15:54:19
| 2,838,735
| 40
| 16
|
NOASSERTION
| 2023-01-11T19:52:35
| 2011-11-23T20:23:20
|
Python
|
UTF-8
|
Python
| false
| false
| 328
|
py
|
from rest_framework.routers import DefaultRouter
from . import views
# DRF url routing
router = DefaultRouter()
router.register(r'tools', views.ToolsViewSet, base_name='tools')
router.register(r'tool_definitions', views.ToolDefinitionsViewSet,
base_name='tooldefinitions')
tool_manager_api_urls = router.urls
|
[
"noreply@github.com"
] |
refinery-platform.noreply@github.com
|
598635ea594828d0c6926f5727c90391600b8821
|
a71e863867a067746e4360f34f86ef1e317b1410
|
/geocodemonkey/__init__.py
|
886c4909485a9b1bfb8c0fb6c954768e01b234b0
|
[
"MIT"
] |
permissive
|
loftylabs/django-geocodemonkey
|
9029c6dce88c7bae6b0636c077ae36decd7e32cc
|
40133c67ad86ef0e61601f77e5b20c6146256d5f
|
refs/heads/master
| 2020-04-08T09:05:04.802096
| 2014-04-20T17:14:37
| 2014-04-20T17:14:37
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,632
|
py
|
import re
import logging
from django.conf import settings
from django.core.cache import cache
from django.core.exceptions import ImproperlyConfigured
from django.utils.encoding import smart_str
from django.utils.timezone import now
from geopy import geocoders as geopy_geocoders
from geopy.exc import GeocoderServiceError
from models import GeocodedModel as geo_model
def get_geocoder(geocoder=None):
"""
Returns a GeocodeMonkeyGeooder instance linked to the specificed backend. If no backend is provided, returns the
an instance tied to the backend referenced by the GEOCODERS['default'] setting
"""
if geocoder is None:
geocoder = 'default'
if settings.GEOCODERS.get(geocoder, False):
return GeocodeMonkeyGeocoder(settings.GEOCODERS[geocoder])
else:
raise ImproperlyConfigured(
"Could not find geocoder config for '%s'. If no identifier was specified, please define a valid default geocoder in the "
"GEOCODERS setting" % geocoder)
class GeocodeFailed(Exception):
"""
Raised when a query was not geocodable
"""
pass
class GeocodeMonkeyGeocoder(object):
"""
Handles the basic geocoder features like cache management and returning
normalized data structures.
"""
def __init__(self, *args, **kwargs):
# Sets the class to be used for geocoding. args[0] should be dict from the GEOCODERS setting
self.geocoder_class = getattr(geopy_geocoders, args[0]['BACKEND'])
# Set whether or not this is an asynchronous geocoder
self.ASYNC = args[0].get('ASYNC', False)
self.qualified_address = ""
self.lat = None
self.long = None
def _generate_cache_key(self, address):
"""
Normalizes addresses for more effective caching
"""
return re.sub(r'[^a-z0-9]', '', str(address).lower())
def store_geocoded_address(self, qa, lat, long):
self.qualified_address = qa
self.lat = lat
self.long = long
def geocode(self, address):
# check the cache first
key = self._generate_cache_key(address)
cached_geocode = cache.get(key)
try:
if cached_geocode:
self.store_geocoded_address(cached_geocode[0], cached_geocode[1], cached_geocode[2])
logging.debug("Address %s geocoded from cache with key %s" % (address, key))
else:
qa, lat_long = self._geocode(address)
cache.set(key, (qa, lat_long[0], lat_long[1]), None)
self.store_geocoded_address(qa, lat_long[0], lat_long[1])
logging.debug("Address %s geocoded from web API and stored with key %s" % (address, key))
except GeocodeFailed:
raise LookupError("Geocoder %s did not return an address for %s" % (self.geocoder_class, address))
return self.qualified_address, (self.lat, self.long)
def geocode_to_model_instance(self, address, instance, commit=True):
"""
Performs a geocoding and saves it to the instance that was passed in.
It is expected that the instance inhertis from geocodemonkey.models.GeocodedObjectMixin
"""
if not isinstance(instance, geo_model):
raise TypeError("Instance argument is expected to be derived from geocodemonkey.models.GeocodedModel base class")
# If this is an async Geocoder, we want to perform this asynchronously
if self.ASYNC:
from celery.app import Celery
# Always commit on async
celery = Celery()
return celery.task(args=[self._geocode_to_model_instance(address, instance, commit=True)])
else:
return self._geocode_to_model_instance(address, instance, commit=commit)
def _geocode_to_model_instance(self, address, instance, commit):
qa, lat_long = self.geocode(address)
instance.qualified_address = qa
instance.latitude = lat_long[0]
instance.longitude = lat_long[1]
instance.geocoded = now()
if commit:
instance.save()
return instance
def _geocode(self, address):
"""
Instantiates the geopy Geocoder and nabs an address
"""
try:
g = self.geocoder_class()
address = smart_str(address)
result = g.geocode(address, exactly_one=False)
if result:
return result[0]
else:
raise GeocodeFailed()
except (UnboundLocalError, ValueError, GeocoderServiceError) as e:
raise Exception(e)
|
[
"casey.kinsey@gmail.com"
] |
casey.kinsey@gmail.com
|
04acd2d89b3ce66cb091b31f095e7ff5cde99fad
|
fe952ac535cdea8ee5d55fec1715db5aadad8e89
|
/scripts/preprocess_sp500.py
|
bb236919ca2be94183f61ca3238f96ee92936db8
|
[] |
no_license
|
Eva0417/tstl
|
c34dec884c41f7bad4b5afd725e62ba204556151
|
f3497acd31f303ae8eef25eaa3b370e64e717e8b
|
refs/heads/master
| 2023-06-11T18:42:20.055326
| 2021-07-01T23:12:22
| 2021-07-01T23:12:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,319
|
py
|
import os
import sys
import argparse
import random
from scipy.io import wavfile
import numpy as np
import pandas as pd
import datetime
import shutil
scriptDir = os.path.dirname(os.path.realpath(__file__)) + "/"
TRAINING_PERCENTAGE = 70
TEST_PERCENTAGE = 15
VALIDATION_PERCENTAGE = 15
WINDOW_SIZE = 50 # We use first 50 samples as input for our NN and try to predict next 1
PREDICTION_AHEAD = 0
PREDICTION_SIZE = 1
COLUMN = "Close"
parser = argparse.ArgumentParser()
parser.add_argument("pathToDataset", help="Path to csv file")
parser.add_argument("name", help="Output filename (name of dataset after preprocessing)")
parser.add_argument("--size", help="Processes only a subset of specified size of original dataset.", default=None, type=int)
args = parser.parse_args()
if not os.path.exists(args.pathToDataset):
print("Specified file does not exist. Specified path: {}".format(args.pathToDataset))
exit(0)
print("\rProcessing file {}".format(args.pathToDataset))
df = pd.read_csv(args.pathToDataset)
df = df[[COLUMN]]
data = df.to_numpy()
data = np.squeeze(data)
# Seperate data into train, validation and test sets
trainEnd = int(TRAINING_PERCENTAGE/100.0 * len(data))
validationEnd = trainEnd + int(VALIDATION_PERCENTAGE/100.0 * len(data))
trainData = data[:trainEnd]
validationData = data[trainEnd:validationEnd]
testData = data[validationEnd:]
# Generate training set using sliding window
train = []
for i in range(WINDOW_SIZE + PREDICTION_AHEAD + PREDICTION_SIZE, len(trainData)):
inputWindow = trainData[i-WINDOW_SIZE-PREDICTION_AHEAD-PREDICTION_SIZE:i-PREDICTION_AHEAD-PREDICTION_SIZE]
outputWindow = trainData[i-PREDICTION_SIZE:i]
train.append((inputWindow, outputWindow))
# Generate validation set using sliding window
validation = []
for i in range(WINDOW_SIZE + PREDICTION_AHEAD + PREDICTION_SIZE, len(validationData)):
inputWindow = validationData[i-WINDOW_SIZE-PREDICTION_AHEAD-PREDICTION_SIZE:i-PREDICTION_AHEAD-PREDICTION_SIZE]
outputWindow = validationData[i-PREDICTION_SIZE:i]
validation.append((inputWindow, outputWindow))
# Generate test set using sliding window
test = []
for i in range(WINDOW_SIZE + PREDICTION_AHEAD + PREDICTION_SIZE, len(testData)):
inputWindow = testData[i-WINDOW_SIZE-PREDICTION_AHEAD-PREDICTION_SIZE:i-PREDICTION_AHEAD-PREDICTION_SIZE]
outputWindow = testData[i-PREDICTION_SIZE:i]
test.append((inputWindow, outputWindow))
# Print information
print("Training set size: {}".format(len(train)))
print("Validation set size: {}".format(len(validation)))
print("Test set size: {}".format(len(test)))
# Randomly shuffle data sets
random.shuffle(train)
random.shuffle(validation)
random.shuffle(test)
# Write training data
print("Writing training data")
mask = np.lib.format.open_memmap(scriptDir + "/../data/{}_MASK.npy".format(args.name), mode="w+", dtype="float64", shape=(WINDOW_SIZE, 1))
X = np.lib.format.open_memmap(scriptDir + "/../data/{}_train_X.npy".format(args.name), mode="w+", dtype="float64", shape=(len(train), WINDOW_SIZE, 1))
Y = np.lib.format.open_memmap(scriptDir + "/../data/{}_train_Y.npy".format(args.name), mode="w+", dtype="float64", shape=(len(train), PREDICTION_SIZE))
STREAM_MAX = np.lib.format.open_memmap(scriptDir + "/../data/{}_train_STREAM_MAX.npy".format(args.name), mode="w+", dtype="float64", shape=(len(train), 1))
for index, data in enumerate(train):
X[index, :, 0] = data[0]
Y[index, :] = data[1]
# Scale to range [0, 1]
minVal = np.min(X[index, :, :])
maxVal = np.max(X[index, :, :])
STREAM_MAX[index, 0] = max(abs(maxVal), abs(minVal))
if(maxVal - minVal == 0):
# This waveform is constant signal so it does not contain any information.
X[index, :, :] = 0
else:
X[index, :, :] = X[index, :, :] - minVal
X[index, :, :] = X[index, :, :] / (maxVal - minVal)
mask[:, :] = mask[:, :] + X[index, :, :]
mask[:, :] = mask[:, :] / len(X)
# Apply mask to training set
for index, key in enumerate(X):
X[index, :, :] = X[index, :, :] - mask
del X
del Y
del STREAM_MAX
# Write validation data
print("Writing validation data")
X = np.lib.format.open_memmap(scriptDir + "/../data/{}_validation_X.npy".format(args.name), mode="w+", dtype="float64", shape=(len(validation), WINDOW_SIZE, 1))
Y = np.lib.format.open_memmap(scriptDir + "/../data/{}_validation_Y.npy".format(args.name), mode="w+", dtype="float64", shape=(len(validation), PREDICTION_SIZE))
STREAM_MAX = np.lib.format.open_memmap(scriptDir + "/../data/{}_validation_STREAM_MAX.npy".format(args.name), mode="w+", dtype="float64", shape=(len(validation), 1))
for index, data in enumerate(validation):
X[index, :, 0] = data[0]
Y[index, :] = data[1]
# Scale to range [0, 1]
minVal = np.min(X[index, :, :])
maxVal = np.max(X[index, :, :])
STREAM_MAX[index, 0] = max(abs(maxVal), abs(minVal))
if(maxVal - minVal == 0):
# This waveform is constant signal so it does not contain any information.
X[index, :, :] = 0
else:
X[index, :, :] = X[index, :, :] - minVal
X[index, :, :] = X[index, :, :] / (maxVal - minVal)
X[index, :, :] = X[index, :, :] - mask
del X
del Y
del STREAM_MAX
# Write test data
print("Writing test data")
X = np.lib.format.open_memmap(scriptDir + "/../data/{}_test_X.npy".format(args.name), mode="w+", dtype="float64", shape=(len(test), WINDOW_SIZE, 1))
Y = np.lib.format.open_memmap(scriptDir + "/../data/{}_test_Y.npy".format(args.name), mode="w+", dtype="float64", shape=(len(test), PREDICTION_SIZE))
STREAM_MAX = np.lib.format.open_memmap(scriptDir + "/../data/{}_test_STREAM_MAX.npy".format(args.name), mode="w+", dtype="float64", shape=(len(test), 1))
for index, data in enumerate(test):
X[index, :, 0] = data[0]
Y[index, :] = data[1]
# Scale to range [0, 1]
minVal = np.min(X[index, :, :])
maxVal = np.max(X[index, :, :])
STREAM_MAX[index, 0] = max(abs(maxVal), abs(minVal))
if(maxVal - minVal == 0):
# This waveform is constant signal so it does not contain any information.
X[index, :, :] = 0
else:
X[index, :, :] = X[index, :, :] - minVal
X[index, :, :] = X[index, :, :] / (maxVal - minVal)
X[index, :, :] = X[index, :, :] - mask
del X
del Y
del STREAM_MAX
del mask
|
[
"kecoeco@gmail.com"
] |
kecoeco@gmail.com
|
91d4f7f60cf0b51713c487a36192d9e70940c691
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03209/s568907509.py
|
a0f73fe21eee5f624d2d75824f89e6519534dec9
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,089
|
py
|
#!/usr/bin/env python3
import sys
def solve(N: int, X: int):
# a_0 = 1
# a_{n+1} = 2 * a_n + 3
# b_0 = 1
# b_{n+1} = 2 * b_n + 1
# a_n = 2**(n+2) - 3
# b_n = 2**(n+1) - 1
def f(N: int, X: int):
aN = 2 ** (N + 2) - 3
bN = 2 ** (N + 1) - 1
if X >= aN:
return bN
aN1 = 2 ** ((N - 1) + 2) - 3
ans = 0
X -= 1
if X <= 0:
return ans
ans += f(N - 1, X)
X -= aN1
if X <= 0:
return ans
ans += 1
X -= 1
if X <= 0:
return ans
ans += f(N - 1, X)
X -= aN1
if X <= 0:
return ans
X -= 1
if X <= 0:
return ans
assert False
print(f(N, X))
def main():
def iterate_tokens():
for line in sys.stdin:
for word in line.split():
yield word
tokens = iterate_tokens()
N = int(next(tokens)) # type: int
X = int(next(tokens)) # type: int
solve(N, X)
if __name__ == '__main__':
main()
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
5f6203131ffbaad18fea774003e59e49a64b3bf9
|
840fe85b48fc7c9b4862bd506091d51c6055f68f
|
/calcD.py
|
f14183b16525b79f390e44649d96b4f7266ba001
|
[] |
no_license
|
TheXpingu1n/calcD
|
805a19287f203bb5de104da516c12b40d8a8128e
|
cea0e2f061897a0b2ac727d55c587fa8757f093d
|
refs/heads/main
| 2023-01-10T19:21:51.130478
| 2020-11-13T12:47:08
| 2020-11-13T12:47:08
| 312,128,987
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 17,745
|
py
|
#coded by : Azad Mohammed
import math
import pyfiglet
import termcolor
print(termcolor.colored(pyfiglet.figlet_format('calcD'), color = 'green'))
pi = math.pi
e = math.exp
tan = math.tan
cos = math.cos
sin = math.sin
class Calculate_Areas2D:
def circle_area(self) :
rad = input('Please insert the value of the radius : ')
r = int(rad)
circleA = (pi * (pow(r, 2)))
print(f'The area is : {circleA} cm²')
quit1 = input('Want to close? Y or N?').capitalize().strip()
if quit1 == 'Y':
print('Exiting!')
elif quit1 == 'N':
open_pro()
else:
raise ValueError
def tri_area(self) :
b = input('Insert a value of the base : ')
h = input('Insert a value of the height : ')
bas = int(b)
hei = int(h)
triangleA = float(0.5) * bas * hei
print(f'The area is : {triangleA} cm²')
quit1 = input('Want to close? Y or N?').capitalize().strip()
if quit1 == 'Y':
print('Exiting!')
elif quit1 == 'N':
open_pro()
else:
raise ValueError
def squ_area(self) :
a = input('The length of the side : ')
side = int(a)
sqA = pow(side * 2)
print(f'The area is : {sqA} cm²')
quit1 = input('Want to close? Y or N?').capitalize().strip()
if quit1 == 'Y':
print('Exiting!')
elif quit1 == 'N':
open_pro()
else:
raise ValueError
def rect_area(self) :
l = input('The length please : ')
w = input('And the width : ')
leng = int(l)
wid = int(w)
rectA = leng * wid
print(f'The area is {rectA} cm²')
quit1 = input('Want to close? Y or N?').capitalize().strip()
if quit1 == 'Y':
print('Exiting!')
elif quit1 == 'N':
open_pro()
else:
raise ValueError
def para_area(self):
b = input('The base length please : ')
h = input('And the height legnth : ')
basee = int(b)
hei = int(h)
paralA = basee * hei
print(f'The area is : {paralA} cm²')
quit1 = input('Want to close? Y or N?').capitalize().strip()
if quit1 == 'Y':
print('Exiting!')
elif quit1 == 'N':
open_pro()
else:
raise ValueError
def trap_area(self):
APa = input('Please enter the first parallel line : ')
BPa = input('Please enter the second parallel line : ')
Hei = input('Height : ')
APara = int(APa)
BPara = int(BPa)
H = int(Hei)
trapizumA = 0.5 * (APara + BPara) * H
print(f'The area is : {trapizumA} cm²')
quit1 = input('Want to close? Y or N?').capitalize().strip()
if quit1 == 'Y':
print('Exiting!')
elif quit1 == 'N':
open_pro()
else:
raise ValueError
def elipise_area(self) :
a = input('Enter length of major axis : ')
b = input('Enter length of minor axis : ')
sideA = int(a) * 0.5
sideB = int(b) * 0.5
elipA = pi * sideA * sideB
print(f'The area : {elipA} cm²')
quit1 = input('Want to close? Y or N?').capitalize().strip()
if quit1 == 'Y':
print('Exiting!')
elif quit1 == 'N':
open_pro()
else:
raise ValueError
def Cres_area(self):
RR = input('Please enter your first radius (biggest) length : ')
r = input('Please enter your second radius (smallest) length : ')
rad1 = int(RR)
rad2 = int(r)
AofC = (pi*rad1**2)-(pi*rad2**2)
print(f'The area is : {AofC} cm²')
quit1 = input('Want to close? Y or N?').capitalize().strip()
if quit1 == 'Y':
print('Exiting!')
elif quit1 == 'N':
open_pro()
else:
raise ValueError
def rhombus_area(self):
print('''Choose *By numbers* : \n
1- Rhombus area by height and side '1' \n
2- One side one angle '2' \n
3- Length of diagonals '3' ''')
ChooseNo = input('Enter the number : ')
no = int(ChooseNo)
no1 = 1
no2 = 2
no3 = 3
def rhombus_area1(self):
h = input('Enter the height : ')
s = input('Enter the side : ')
Height = int(h)
Side = int(s)
area1 = Height * Side
print(f'The area is : {area1} cm²')
quit1 = input('Want to close? Y or N?').capitalize().strip()
if quit1 == 'Y':
print('Exiting!')
elif quit1 == 'N':
open_pro()
else:
raise ValueError
def rhombus_area2(self):
s = input("Enter length of side : ")
Mesaure = input('And the measurment of angle : ')
Side1 = int(s)
MofAngle = int(Mesaure)
area2 = pow(Side1 , 2)*sin(MofAngle)
print(f'The area is : {area2} cm²')
quit1 = input('Want to close? Y or N?').capitalize().strip()
if quit1 == 'Y':
print('Exiting!')
elif quit1 == 'N':
open_pro()
else:
raise ValueError
def rhombus_area3(self):
d = input('Enter the length of the first diameter : ')
d2 = input('And the second diameter : ')
di1 = int(d)
di2 = int(d2)
area3 = (di1 * di2) / 2
print(f'The area is : {area3} cm²')
quit1 = input('Want to close? Y or N?').capitalize().strip()
if quit1 == 'Y':
print('Exiting!')
elif quit1 == 'N':
open_pro()
else:
raise ValueError
if no == no1 :
rhombus_area1(1)
elif no == no2 :
rhombus_area2(2)
elif no == no3 :
rhombus_area3(3)
else:
raise ValueError
Calculate_Areas2D.rhombus_area(0)
class Calculate_Areas3D:
def Cube_surfarea(self):
edge = input('Please enter length of edge : ')
A = int(edge)
surfAreaC = 6*A**2
print(f'The area is : {surfAreaC} cm³')
quit1 = input('Want to close? Y or N?').capitalize().strip()
if quit1 == 'Y':
print('Exiting!')
elif quit1 == 'N':
open_pro()
else:
raise ValueError
def RecPrism_surArea(self):
l = input('Enter the length : ')
w = input('Enter the width : ')
h = input('Enter the height : ')
length = int(l)
width = int(w)
height = int(h)
surfArea = 2 * (width*length) + (height*length) + (height*width)
print(f'The area is : {surfArea} cm³')
quit1 = input('Want to close? Y or N?').capitalize().strip()
if quit1 == 'Y':
print('Exiting!')
elif quit1 == 'N':
open_pro()
else:
raise ValueError
def Cy_surfarea(self):
radius = input('Enter the length of the radius : ')
height = input('And the height : ')
r = int(radius)
h = int(height)
area = 2 * pi * r * (r + h)
print(f'The area is : {area} cm³')
quit1 = input('Want to close? Y or N?').capitalize().strip()
if quit1 == 'Y':
print('Exiting!')
elif quit1 == 'N':
open_pro()
else:
raise ValueError
def Cone_surfarea(self):
radius = input('Enter the length of the radius : ')
height = input('Enter the length of the slant height : ')
r = int(radius)
h = int(height)
area = pi * r * (r + h)
print(f'The area is : {area} cm³')
quit1 = input('Want to close? Y or N?').capitalize().strip()
if quit1 == 'Y':
print('Exiting!')
elif quit1 == 'N':
open_pro()
else:
raise ValueError
def Sp_surfArea(self):
radius = input('Enter the length of the radius : ')
r = int(radius)
area = 4 * pi * r**2
print(f'The area is : {area} cm³')
quit1 = input('Want to close? Y or N?').capitalize().strip()
if quit1 == 'Y':
print('Exiting!')
elif quit1 == 'N':
open_pro()
else:
raise ValueError
def hemiSp_surfArea(self):
radius = input('Enter the length of the radius : ')
r = int(radius)
area = 3 * pi * pow(r , 2)
print(f'The area is : {area} cm³')
quit1 = input('Want to close? Y or N?').capitalize().strip()
if quit1 == 'Y':
print('Exiting!')
elif quit1 == 'N':
open_pro()
else:
raise ValueError
def pyramid_LSA(self):
per = input('Enter the perimeter : ')
height = input('And the slant height : ')
p = int(per)
l = int(height)
area = 0.5 * p * l
print(f'The area is : {area} cm³')
quit1 = input('Want to close? Y or N?').capitalize().strip()
if quit1 == 'Y':
print('Exiting!')
elif quit1 == 'N':
open_pro()
else:
raise ValueError
def pyramid_TSA(self):
per = input('Enter the perimeter : ')
height = input('And the slant height : ')
Base = input('Input the base too : ')
p = int(per)
l = int(height)
B = int(Base)
area = 0.5 * p * l + B
print(f'The area is : {area} cm³')
quit1 = input('Want to close? Y or N?').capitalize().strip()
if quit1 == 'Y':
print('Exiting!')
elif quit1 == 'N':
open_pro()
else:
raise ValueError
class Calculate_Areas_Plat5:
def Tetrahedron(self):
def Tetrahedron_1(self):
side = input('Enter the length of the side : ')
a = int(side)
area = 3 * (math.sqrt(3))/4 * a ** 2
print(f'The area is : {area} cm³')
quit1 = input('Want to close? Y or N?').capitalize().strip()
if quit1 == 'Y':
print('Exiting!')
elif quit1 == 'N':
open_pro()
else:
raise ValueError
def Tetrahedron_2(self):
side = input('Enter the length of the side : ')
a = int(side)
area = 4 * (math.sqrt(3))/4 * a ** 2
print(f'The area is : {area} cm³')
quit1 = input('Want to close? Y or N?').capitalize().strip()
if quit1 == 'Y':
print('Exiting!')
elif quit1 == 'N':
open_pro()
else:
raise ValueError
choose = input('What do you want? Area of the slant surface to the tetrahedron ->> SA \n Area of whole surface of tetrahadron ->> WA \n >>> ').capitalize().strip()
if choose == 'S' :
Tetrahedron_1(0)
elif choose == 'W' :
Tetrahedron_2(0)
else :
raise ValueError
def hexahedron(self):
print('it is just a cube, go and call the cube calc function.')
quit1 = input('Want to close? Y or N?').capitalize().strip()
if quit1 == 'Y':
print('Exiting!')
elif quit1 == 'N':
open_pro()
else:
raise ValueError
def Octahedron(self):
edge = input('Enter The length of the edge : ')
a = int(edge)
area = 2 * pow(a , 2) * math.sqrt(3)
print(f'The area is : {area} cm³')
quit1 = input('Want to close? Y or N?').capitalize().strip()
if quit1 == 'Y':
print('Exiting!')
elif quit1 == 'N':
open_pro()
else:
raise ValueError
def Dodecahedron(self):
length = input('Enter the edge length : ')
a = int(length)
surfarea = 3 * math.sqrt(25 + 10 * math.sqrt(5)) * a ** 2
print(f'The area is : {surfarea} cm³')
quit1 = input('Want to close? Y or N?').capitalize().strip()
if quit1 == 'Y':
print('Exiting!')
elif quit1 == 'N':
open_pro()
else:
raise ValueError
def Icosahedron(self):
length = input('Enter the length of the side : ')
a = int(length)
area = 5 * math.sqrt(3) * a ** 2
print(f'The area is : {area} cm³')
quit1 = input('Want to close? Y or N?').capitalize().strip()
if quit1 == 'Y':
print('Exiting!')
elif quit1 == 'N':
open_pro()
else:
raise ValueError
def open_pro():
print(''' Welcome \n For 2D shape area calculator enter : "T" \n For 3D shape area calculator enter : "A" \n For platonic solid area claculator enter "P" \n To quit enter 'Q' ''')
answer = input('>> ').capitalize().strip()
if answer == 'T' :
print('''Hello \n
- For Circle area calculator ->> 'C' \n
- For Triangle area calculator ->> 'T' \n
- For Square area calculator ->> 'S' \n
- For Rectangle area calculator ->> 'R' \n
- For Parallelogram area calculator ->> 'P' \n
- For Trapizum area calculator ->> 'U' \n
- For Ellipse area calculator ->> 'E' \n
- For Cresent area calculator ->> 'N' \n
- For Rhombus area calculator ->> 'H' \n
''')
answer2 = input('>> ').capitalize().strip()
if answer2 == 'C':
Calculate_Areas2D.circle_area(0)
elif answer2 == 'T':
Calculate_Areas2D.tri_area(0)
elif answer2 == 'S':
Calculate_Areas2D.squ_area(0)
elif answer2 == 'R':
Calculate_Areas2D.rect_area(0)
elif answer2 == 'P':
Calculate_Areas2D.para_area(0)
elif answer2 == 'U':
Calculate_Areas2D.trap_area(0)
elif answer2 == 'E':
Calculate_Areas2D.elipise_area(0)
elif answer2 == 'N':
Calculate_Areas2D.Cres_area(0)
elif answer2 == 'H':
Calculate_Areas2D.rhombus_area(0)
else :
raise ValueError
elif answer == 'A':
print('''Hello \n
- For Cube surface area calculator ->> 'C' \n
- For Rectangular prism surface area calculator ->> 'R' \n
- For Cylinder surface area calculator ->> 'Y' \n
- For Cone surface area calculator ->> 'O' \n
- For Sphere surface area calculator ->> 'S' \n
- For Hemisphere surface area calculator ->> 'H' \n
- For Pyramid Literal surface area calculator ->> 'L' \n
- For Pyramid Total surface area calculator ->> 'T' \n
''')
answer3 = input('>> ').capitalize().strip()
if answer3 == 'C':
Calculate_Areas3D.Cube_surfarea(0)
elif answer3 == 'R':
Calculate_Areas3D.RecPrism_surArea(0)
elif answer3 == 'Y':
Calculate_Areas3D.Cy_surfarea(0)
elif answer3 == 'O':
Calculate_Areas3D.Cone_surfarea(0)
elif answer3 == 'S':
Calculate_Areas3D.Sp_surfArea(0)
elif answer3 == 'H':
Calculate_Areas3D.hemiSp_surfArea(0)
elif answer3 == 'L':
Calculate_Areas3D.pyramid_LSA(0)
elif answer3 == 'T':
Calculate_Areas3D.pyramid_TSA(0)
else :
raise ValueError
elif answer == 'P':
print('''Hello \n
- For Tetrahedron area calculator ->> 'T' \n
- For Hexahedron area calculator ->> 'H' \n
- For Octahedron area calculator ->> 'O' \n
- For Dodecahedron area calculator ->> 'D' \n
- For Icosahedron area calculator ->> 'I' \n
''')
answer4 = input('>> ').capitalize().strip()
if answer4 == 'T':
Calculate_Areas_Plat5.Tetrahedron(0)
elif answer4 == 'H':
Calculate_Areas_Plat5.hexahedron(0)
elif answer4 == 'O':
Calculate_Areas_Plat5.Octahedron(0)
elif answer4 == 'D':
Calculate_Areas_Plat5.Dodecahedron(0)
elif answer4 == 'I':
Calculate_Areas_Plat5.Icosahedron(0)
else :
print('Error occured , try to enter a valid option')
elif answer == 'Q':
print('Closing...')
else:
raise ValueError
open_pro()
#Calculate_Areas2D.Cres_area(0,1)
|
[
"noreply@github.com"
] |
TheXpingu1n.noreply@github.com
|
d157fefef96b07270738959f77b7cff2930c4a46
|
ad44e43f263e9678b0b72b0ab9cfd9350c97075d
|
/Day2/acceptinput.py
|
c589900d63258f7673dc45ddeff86ecef83c407a
|
[] |
no_license
|
mahesstp/python
|
cc95fe587baed83c241c5439ace6382a8c870082
|
9f93cd98a1855294d82a4121177eec5dc1509078
|
refs/heads/master
| 2020-04-02T21:49:49.286662
| 2018-08-31T11:46:26
| 2018-08-31T11:46:26
| 154,812,672
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 661
|
py
|
#!/usr/bin/python
from secondfile import *
def acceptInput():
firstValue = input ('Enter your first float number: ')
secondValue = input ('Enter your second float number: ')
print ( 'Type of firstValue is ' + str( type(firstValue) ) )
return firstValue, secondValue
def addNumbers(firstNumber, secondNumber):
return firstNumber + secondNumber
def main():
x, y = acceptInput()
print ( type ( x ) )
print ( type ( y ) )
result = addNumbers ( x , y )
print ( 'The sum of ' + str(x) + ' and ' + str(y) + ' is ' + str(result) )
main()
'''
if __name__ == '__main__':
print ( __name__ )
main()
'''
|
[
"mail2jegan@gmail.com"
] |
mail2jegan@gmail.com
|
e849afe585c0d25de97fbe749f49c5c34a0b092c
|
71f2d0d97146650975f066191b1851a13a3de43c
|
/Classes_and_Object_Programs/cls9.py
|
da0f9251d350fb85cc28080c8223cacd0a2a5b50
|
[
"MIT"
] |
permissive
|
saratkumar17mss040/Python-lab-programs
|
84e8537927498aeeb12e52e614cdaae489a7fe91
|
a2faa190acaaa30d92d4c801fd53fdc668c3c394
|
refs/heads/master
| 2023-01-13T06:09:24.358203
| 2020-11-18T04:43:14
| 2020-11-18T04:43:14
| 289,944,123
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 251
|
py
|
# encapsulation - protected members - 1
class Person:
def __init__(self):
self._name = 'sam'
class Student(Person):
def __init__(self):
Person.__init__(self)
print(self._name)
s = Student()
p = Person()
print(p._name)
print(s._name)
|
[
"sarathdon9245@gmail.com"
] |
sarathdon9245@gmail.com
|
0c8767bb5641218069611b38304e4fa40d8d3439
|
f7334f229bb73926903cef6ee5ba7a9d27349f56
|
/webempresa/social/migrations/0001_initial.py
|
52e2ff8b2aac685ef639ab66f5b9801d7b580164
|
[] |
no_license
|
monikafg/web-empresa-curso-django-2
|
e52408d9b0171e81c3366b4ed78178e97ffae867
|
d6cc5b074f19156c034ce80c964c558267853d82
|
refs/heads/master
| 2020-04-07T23:24:14.793136
| 2018-11-23T10:50:35
| 2018-11-23T10:50:35
| 158,811,088
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,066
|
py
|
# Generated by Django 2.0.2 on 2018-10-31 12:41
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Link',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('key', models.SlugField(max_length=100, unique=True, verbose_name='Nombre Clave')),
('name', models.CharField(max_length=200, verbose_name='Red Social')),
('url', models.URLField(blank=True, null=True, verbose_name='Enlace')),
('created', models.DateTimeField(auto_now_add=True, verbose_name='Fecha Creación')),
('updated', models.DateTimeField(auto_now=True, verbose_name='Fecha Edición')),
],
options={
'verbose_name': 'enlace',
'verbose_name_plural': 'enlaces',
'ordering': ['name'],
},
),
]
|
[
"monicafreygrimm@gmail.com"
] |
monicafreygrimm@gmail.com
|
bc60600bbd7f640a50d61ab429393bf190b0de37
|
6c81fbea3c897b97fed56f97c1bc657b632beb04
|
/classifier/Basic_ML_base/classify.py
|
feb5b1df7e5517e185732e2869ada6cb48e09182
|
[] |
no_license
|
JCai2017/SecurityProject
|
00e0c992ee802e52673a36580b28dd27cf181f12
|
25d0a0a497127dc30fe883115cdca0770643f796
|
refs/heads/master
| 2021-08-30T12:14:30.418667
| 2017-12-17T21:18:47
| 2017-12-17T21:18:47
| 110,923,892
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,127
|
py
|
import numpy as np
import pandas as pd
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten
from keras.layers import LSTM
from keras.preprocessing import sequence
from keras.utils.np_utils import to_categorical
from sklearn.preprocessing import MinMaxScaler
from sklearn.model_selection import train_test_split
from matplotlib import pyplot
# fix random seed for reproducibility
np.random.seed(7)
# load the dataset
#df = pd.read_csv('data.csv')
df = pd.read_table('data.csv', sep=",", usecols=range(164))
#print df
# plot original data.csv over time
for num in range(63):
df.iloc[num].plot()
pyplot.show()
# Rescale it - should help network performance
scaler = MinMaxScaler(feature_range=(0,1))
X = scaler.fit_transform(df)
y = pd.read_csv('key.csv')
y = to_categorical(y, 1 << 6)
#print y.shape
# Just so we can have a validation set
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=7)
# This next part is because keras is picky with how they want data
X_train = np.reshape(X_train, (X_train.shape[0], 1, X_train.shape[1]))
X_test = np.reshape(X_test, (X_test.shape[0], 1, X_test.shape[1]))
# truncate and pad input sequences
# create the model
model = Sequential()
model.add(LSTM(125, input_shape=(X_train.shape[1], X_train.shape[2])))
# Since it seems to be a categorical problem, use softmax activation instead of linear
model.add(Dense(1 << 6, activation='softmax')) # 16 possible key values, activation, 16keys*4training-traces, 197us sigmoid
model.compile(loss='categorical_crossentropy', optimizer='rmsprop', metrics=['accuracy']) #rmsprop
print(model.summary())
# diagnosis
history = model.fit(X_train, y_train, epochs=200, batch_size=64, validation_split = 0.33)
pyplot.plot(history.history['loss'])
pyplot.plot(history.history['val_loss'])
pyplot.title('model train vs validation loss')
pyplot.ylabel('loss')
pyplot.xlabel('epoch')
pyplot.legend(['train', 'validation'], loc='upper right')
pyplot.show()
scores = model.evaluate(X_test, y_test, verbose=0)
print("Accuracy: %.2f%%" % (scores[1]*100))
#print(model.predict(X_test))
|
[
"aytung94@gmail.com"
] |
aytung94@gmail.com
|
eeb74448f321c85c0bb760e02b2d6afb1a90ebdc
|
5274373dcbfbc2b7fd16050cfab1ac3f378c57bb
|
/prepare.py
|
a4facf27a4b64e9a61cf98ea3f819f28bcb5c057
|
[] |
no_license
|
MainaliB/time-series-exercises
|
7a1e8d4370c5140c3d50c70e478352dec7afffb0
|
00dafb61347f9037882b47696a00a68fab2cf72f
|
refs/heads/main
| 2023-01-05T14:54:57.984526
| 2020-11-03T02:58:24
| 2020-11-03T02:58:24
| 306,452,863
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 718
|
py
|
import pandas as pd
from datetime import timedelta, datetime
import numpy as np
import matplotlib.pyplot as plt
import warnings
warnings.filterwarnings("ignore")
def prep_sales_df(df):
'''Takes in a dataframe, performs necessary date time conversion, sets date as index, creates new
columns(months, day, and sales total) and returns the dataframe'''
df.sale_date = pd.to_datetime(df.sale_date, format = '%a, %d %b %Y %H:%M:%S %Z')
df = df.set_index('sale_date').sort_index()
df['month'] = df.index.month_name()
df['day']= df.index.day_name()
df.rename(columns ={'sale_amount':'quantity'}, inplace = True)
df['sales_total'] = df.quantity * df.item_price
return df
|
[
"bibek.mainali20@gmail.com"
] |
bibek.mainali20@gmail.com
|
5ed038d04b8958a97134dbddf90e1607c7731360
|
4f6d7307a00d5a5ddecc12203a7d8ebc4a9ae95b
|
/api/permissions.py
|
23b1901230d259a29d2dbdf9cd74d2f68ba7a592
|
[] |
no_license
|
Cscotttaakan/DjangoRestAPITutorial
|
d67728fb48d1ae346b61a3d7afae8f1999ef168b
|
9fe67d315b257a8d6584c5cfc18ff8339ac46e5f
|
refs/heads/master
| 2020-04-12T21:31:44.155967
| 2018-12-22T21:40:25
| 2018-12-22T21:40:25
| 162,764,257
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 302
|
py
|
from rest_framework.permissions import BasePermission
from .models import Bucketlist
class IsOwner(BasePermission):
def has_object_permission(self, request, view, obj):
if isinstance(obj, Bucketlist):
return obj.owner == request.user
return obj.owner == request.user
|
[
"cscott_taakan@yahoo.com"
] |
cscott_taakan@yahoo.com
|
de7ff038bc97d99b5b5417effac204ade6feffef
|
d9796881c5f39d8057bf9e5b47b5825b052b37eb
|
/FrostGem/settings.py
|
4ccaf8aecc9b166d2b5b54270d54f33692d870bb
|
[] |
no_license
|
RedaAbuKhamdi/FrostGem
|
d16f5f611a748cd107438ad0e6a06b787d408e28
|
b3746b9ce747ae23316930eb1b4d6cd1044db030
|
refs/heads/master
| 2023-06-05T23:58:13.307815
| 2021-06-29T16:24:54
| 2021-06-29T16:24:54
| 381,429,017
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,518
|
py
|
"""
Django settings for FrostGem project.
Generated by 'django-admin startproject' using Django 3.2.4.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-ist_@u8wpk(ch2aop*t*f#9e*dsheuzv-qd&e=48rrzl63qq76'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'FrostGem.urls'
# Configure templates. DIRS - list of directories
# that django will look in for the html files
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [BASE_DIR / 'FrostGem' / 'views'],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'FrostGem.wsgi.application'
# Database settings
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'FrostGemDB',
'USER': 'root',
'PASSWORD': 'pass'
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Europe/Kaliningrad'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# URL of static files. Anything that starts with this USR is identified as a static file
STATIC_URL = '/static/'
# Directories where django will look for the said files
STATICFILES_DIRS = [
BASE_DIR / "FrostGem" / "static"
]
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
|
[
"30412098+RedaAbuKhamdi@users.noreply.github.com"
] |
30412098+RedaAbuKhamdi@users.noreply.github.com
|
d8c69bbd5692d0ee667598a089bd5b101c022a65
|
bf57ecee77df0a08f2456a05918d6585c68a92ea
|
/JITNet/datasets/osvos_dataset.py
|
06ca304be21304d431803ec5615adc91c47061e6
|
[] |
no_license
|
abollu779/Multiple-Object-Tracking
|
8d0f602976b2fb5d40d0539a0e4a43f080656f21
|
0be5a067a3f469f73b804a9a03c8b4157ece1d54
|
refs/heads/master
| 2022-11-05T18:11:02.001614
| 2019-05-08T16:55:16
| 2019-05-08T16:55:16
| 181,527,805
| 0
| 1
| null | 2022-10-12T02:46:24
| 2019-04-15T16:43:13
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 7,463
|
py
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from PIL import Image
import os
import numpy as np
import sys
import cv2
# Mask R-CNN utilities
sys.path.append(os.path.realpath('./datasets'))
sys.path.append(os.path.realpath('./utils'))
from mask_rcnn_tfrecords import get_dataset, batch_segmentation_masks,\
visualize_masks
from mask_rcnn_stream import MaskRCNNSequenceStream
import video_distillation
from video_distillation import sequence_to_class_groups_stable
"""Dataset class for OSVOS, using the same interface as the original."""
class OSVOS_Dataset:
def __init__(self, sequence, dataset_dir, sequence_limit, stride, height, width, class_index, start_frame):
"""Initialize the Dataset object
Args:
sequence: sequence name
dataset_dir: Absolute path to dataset root
sequence_limit: maximum number of video sequence chunks to load
stride: stride to run MRCNN teacher
height: height of full size output images
width: width of full size output images
class_index: one-indexed class to segment
"""
self.height = height
self.width = width
self.class_index = class_index
self.stride = stride
self.sequence_limit = sequence_limit
# Load the sequence using JITNet utilities
print('Initializing dataset...')
# assemble video files and detection paths
video_files = []
detections_paths = []
sequence_to_video_list = \
video_distillation.get_sequence_to_video_list(
dataset_dir, dataset_dir,
video_distillation.video_sequences_stable)
assert(sequence in sequence_to_video_list)
sequence_path = os.path.join(dataset_dir, sequence)
assert(os.path.isdir(sequence_path))
assert(sequence in video_distillation.sequence_to_class_groups_stable)
num_sequences = 0
for s in sequence_to_video_list[sequence]:
video_files.append(os.path.join(sequence_path, s[0]))
detections_paths.append(os.path.join(sequence_path, s[1]))
num_sequences = num_sequences + 1
if num_sequences >= sequence_limit:
break
self.class_groups = sequence_to_class_groups_stable[sequence]
print(video_files)
print(detections_paths)
print(self.class_groups)
self.class_groups = [ [video_distillation.detectron_classes.index(c) for c in g] \
for g in self.class_groups ]
self.num_classes = len(self.class_groups) + 1
self.input_streams = MaskRCNNSequenceStream(video_files,
detections_paths,
start_frame=start_frame,
stride=1)
img, label = self._stream_next(self.input_streams)
# these are also used for the first test batch
self.first_img = img
self.first_label = label
self._augment_data(img, label)
# Init parameters
self.train_ptr = 0
self.test_ptr = 0
self.train_size = 6 # 3 scales, with a flip for each scale
self.test_size = stride
self.train_idx = np.arange(self.train_size)
np.random.shuffle(self.train_idx)
print('Done initializing Dataset.')
def _augment_data(self, img, label):
self.images_train = []
self.labels_train = []
data_aug_scales = [0.5, 0.8, 1]
for scale in data_aug_scales:
img_size = (int(self.height * scale), int(self.width * scale))
img_sc = cv2.resize(img, img_size)
label_sc = cv2.resize(label, img_size)
self.images_train.append(np.array(img_sc, dtype=np.uint8))
self.labels_train.append(np.array(label_sc, dtype=np.uint8))
# add flip
img_sc_fl = np.fliplr(img_sc).astype(np.uint8)
label_sc_fl = np.fliplr(label_sc).astype(np.uint8)
self.images_train.append(img_sc_fl)
self.labels_train.append(label_sc_fl)
def _stream_next(self, stream):
# grab a single image and mask from input_streams
frame, boxes, classes, scores, masks, num_objects, frame_id = next(stream)
img = cv2.resize(frame, (self.width, self.height))
boxes = np.expand_dims(boxes, axis=0)
classes = np.expand_dims(classes, axis=0)
scores = np.expand_dims(scores, axis=0)
masks = np.expand_dims(masks, axis=0)
num_objects = np.expand_dims(num_objects, axis=0)
labels_vals, _ = batch_segmentation_masks(1,
(self.height, self.width),
boxes, classes, masks, scores,
num_objects, True,
self.class_groups)
labels_val = np.reshape(labels_vals, (self.height, self.width))
# only consider one class label
labels_mask = (labels_val == self.class_index)
label = np.zeros((self.height, self.width), dtype=np.uint8)
label[labels_mask] = 255
return img, label
def next_batch(self, batch_size, phase):
"""Get next batch of image (path) and labels
Args:
phase: 'train' or 'test', starts with one train
Returns:
images: List of Numpy arrays of the images
labels: List of Numpy arrays of the labels
"""
if batch_size != 1:
raise ValueError('batch size only 1')
if phase == 'train':
# return from the premade list
index = self.train_idx[self.train_ptr]
self.train_ptr += 1
self.train_ptr = self.train_ptr % self.train_size
if self.train_ptr == 0:
np.random.shuffle(self.train_idx)
return [self.images_train[index]], [self.labels_train[index]]
else:
# read from the stream and return
if self.test_ptr == 0:
self.test_ptr += 1
return [self.first_img], [self.first_label]
else:
img, label = self._stream_next(self.input_streams)
if self.test_ptr >= self.test_size:
raise StopIteration('next_batch test should not be called more than stride times.')
self.test_ptr += 1
return [img], [label]
def reset_cycle(self):
"""Get ready for the next cycle of data."""
# grab a new test frame and perform augmentation.
img, label = self._stream_next(self.input_streams)
# these are also used for the first test batch
self.first_img = img
self.first_label = label
self._augment_data(img, label)
# reset the pointers and variables
self.train_ptr = 0
self.test_ptr = 0
self.train_size = 6 # 3 scales, with a flip for each scale
self.test_size = self.stride
self.train_idx = np.arange(self.train_size)
np.random.shuffle(self.train_idx)
def get_train_size(self):
return self.train_size
def get_test_size(self):
return self.test_size
|
[
"abollu779@gmail.com"
] |
abollu779@gmail.com
|
5a03ac597a444bda5ed3fc444cbe62979129c614
|
5a200273e064fe8b2d4b4b69be80c8e32df3ed9f
|
/ch09-pagination/app/__init__.py
|
7139b2a2578530df0e2576e09e5fe6d49cfa846b
|
[] |
no_license
|
NajibAdan/flask-tutorial
|
d05ede626b43b2924a314aa45584566bfd0cda7d
|
e4711668571eea7424f8babf71bbc8b76838cca1
|
refs/heads/master
| 2022-07-08T19:32:49.332757
| 2020-05-05T13:13:19
| 2020-05-05T13:13:19
| 257,529,254
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,055
|
py
|
from flask import Flask
from config import Config
from flask_sqlalchemy import SQLAlchemy
from flask_migrate import Migrate
from flask_login import LoginManager
import logging
from logging.handlers import SMTPHandler, RotatingFileHandler
import os
app = Flask(__name__)
app.config.from_object(Config)
db = SQLAlchemy(app)
Migrate = Migrate(app, db)
login = LoginManager(app)
login.login_view = 'login'
from app import routes, models, errors
if not app.debug:
if app.config['MAIL_SERVER']:
auth = None
if app.config['MAIL_USERNAME'] or app.config['MAIL_PASSWORD']:
auth = (app.config['MAIL_USERNAME']), app.confg['MAIL_PASSWORD']
secure = None
if app.config['MAIL_USE_TLS']:
secure = ()
mail_handler = SMTPHandler(
mailhost=(app.config['MAIL_SERVER'], app.config['MAIL_PORT']),
fromaddr='no-reply@' + app.config['MAIL_SERVER'],
toaddrs=app.config['ADMINS'], subject='Microblog Failure',
credentials=auth, secure=secure)
mail_handler.setLevel(logging.ERROR)
app.logger.addHandler(mail_handler)
if os.path.exists('.flaskenv'):
with open('.flaskenv') as file:
line = file.readline().strip()
current_directory = line[line.find('=')+1:line.find('/')] + '/'
elif os.getenv('FLASK_APP') is not None:
current_directory = os.environ['FLASK_APP'].split('/')[0]+'/'
else:
current_directory = ''
if not os.path.exists(current_directory+'logs'):
os.mkdir(current_directory+'logs')
file_handler = RotatingFileHandler(current_directory+'logs/microblog.log',
maxBytes=10240,
backupCount=10)
file_handler.setFormatter(logging.Formatter(
'%(asctime)s %(levelname)s: %(message)s [in %(pathname)%s:%(lineno)d]'
))
file_handler.setLevel(logging.INFO)
app.logger.addHandler(file_handler)
app.logger.setLevel(logging.INFO)
app.logger.info('Microblog startup')
|
[
"najibadan@airmail.cc"
] |
najibadan@airmail.cc
|
0fd1806e5506112797a8cb6495adf054e9cb9c25
|
9fb6b17f3e07ca1c35d453103954c82d5d17a37d
|
/http_check/datadog_checks/http_check/__about__.py
|
61058f8e0152dea5457cd85007db7b668ce2746d
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
acvejic/integrations-core
|
0fc205b6388e0065f419c22a7af7d68fb68869ea
|
8ff1157f63acc649f15a169906f4a5f27508fcf7
|
refs/heads/master
| 2023-01-04T15:37:48.124433
| 2020-10-28T13:35:29
| 2020-10-28T13:35:29
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 138
|
py
|
# (C) Datadog, Inc. 2018-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
__version__ = "4.11.0"
|
[
"noreply@github.com"
] |
acvejic.noreply@github.com
|
4b6a0a46e3adc9f8de24088b29069bac7537fcb8
|
f92fd5ba7ec30729dada169f7684e2edf565c139
|
/test/case/player/TestPlayImgsDelete.py
|
260981fb6ac07740c9cabc4022522ba95fdab5f5
|
[] |
no_license
|
rico-o/autoTest
|
6ea0c83ab3ef60bf8ee13d00f61995d3357f4c37
|
6ca03420faf1b0523a567eed11a8212796ee3d35
|
refs/heads/master
| 2022-03-30T14:34:55.787787
| 2020-02-08T11:46:23
| 2020-02-08T11:46:23
| 218,439,831
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 700
|
py
|
from test.pages.PlayerPage import *
import unittest
from test.common.Common import *
class TestPlayerImgs_delete(unittest.TestCase):
"""运动员图片批量删除"""
@classmethod # 类方法
def setUpClass(cls):
cls.player = PlayerPage()
cls.player.login()
cls.player.iframe1()
cls.player.base_setting()
cls.player.iframe0()
@classmethod # 类方法
def tearDownClass(cls):
cls.player.quit_driver()
def test_player_imgs_delete(self):
self.player.player_select()
self.player.delete_img()
self.player.parentframe()
self.player.delete_confirm()
if __name__ == '__main__':
unittest.main()
|
[
"53588251+rico-o@users.noreply.github.com"
] |
53588251+rico-o@users.noreply.github.com
|
b0be8f2a55bbc9929898b604c25ab9a0f3e565a7
|
dff1473735f229f54561bb4c7f390472108c1839
|
/ActFunctions/softsign.py
|
135c05f361716f8375da34dfaac129fbb8f172ea
|
[] |
no_license
|
HenryGJackson/DecisionTree
|
7d3f8bf669c4a6fe62d0b5255b87da20ad835004
|
4ca0cb93511bba6b758655b993527d0dd177c6e6
|
refs/heads/master
| 2021-01-20T08:43:30.670445
| 2017-05-08T07:17:10
| 2017-05-08T07:17:10
| 90,184,715
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 416
|
py
|
class softsign:
# Soft sign ActFunctions f(x) = x / (1 + |x|)
def __init__(self):
self.coeff = 1
self.range = [-1, 1]
def __str__(self):
return "Function: Soft Sign Function"
def setParam(self, value):
self.coeff = value
def evaluate(self, value):
if value < 0:
return value / (1 - value)
else:
return value / (1 + value)
|
[
"jacksonhg95@gmail.com"
] |
jacksonhg95@gmail.com
|
b58f3c6a1b6de08b96f1eb51373d4641c2ab4815
|
4f0db9e506a11dc75828cf26cfd60e1a5d745d54
|
/monthcalc.py
|
3d6769b6275f9e0f3669d3fc57ee37755c672f2c
|
[] |
no_license
|
keerthankkr/test-upload
|
832cf878afbafcd78bf0ed9e660c2fd2ab162d34
|
d771475cd8745c2630461259432baad47ddd137a
|
refs/heads/master
| 2023-06-21T18:24:10.231281
| 2021-07-15T12:25:15
| 2021-07-15T12:25:15
| 386,279,732
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 670
|
py
|
from datetime import date
import sys
def twelve_month_apart(date1,date2):
try:
d1 = date.fromisoformat(date1)
d2 = date.fromisoformat(date2)
except ValueError:
print('ERROR: Invalid date or incorrect date format. Provide valid dates in YYYY-MM-DD fomat e.g. 2019-04-30.')
sys.exit(-1)
if d1 > d2:
print('ERROR: Second date should be same or later than first date.')
sys.exit(-1)
if d2.month == 2 and d2.day == 29:
d2 = d2.replace(day=28)
d2_prev_year = d2.replace(year=d2.year-1)
return True if d2_prev_year >= d1 else False
# example
#twelve_month_apart('2001-02-28', '2002-02-27')
|
[
"keerthankumarreddy141998@gmail.com"
] |
keerthankumarreddy141998@gmail.com
|
cfdbc02514bd83216de38f1fa8a6cfdf0d52f7b5
|
bdbf67a9f144bbc2afc97aef34a7784702cc95e1
|
/configs/reppoints_v2/reppoints_v2_r101_fpn_giou_mstrain_2x_coco.py
|
845f5ddd7fe8b188f6a655b0999ddf8f3854010b
|
[
"MIT"
] |
permissive
|
peterkim0622/RepPointsV2
|
15e1f5454c1583ab2f224108a23ef2af5c4522c6
|
36e1e54549061a735831d776781cb64caf6de68a
|
refs/heads/master
| 2023-03-21T12:43:31.902169
| 2020-11-20T17:33:37
| 2020-11-20T17:33:37
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 359
|
py
|
_base_ = './reppoints_v2_r50_fpn_giou_mstrain_2x_coco.py'
model = dict(
pretrained='torchvision://resnet101',
backbone=dict(
type='ResNet',
depth=101,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch'))
|
[
"scalsol@163.com"
] |
scalsol@163.com
|
063d2c8f294ea7ce2df64b3c217d7f5d7d860ab8
|
21615505744aa59e7200d4c34da664706306aa03
|
/model_training/get_models.py
|
8f4e4472fc16daf109eb13c19649cdd359172796
|
[] |
no_license
|
Scintillare/piecesofcode
|
48e23f3d5445ec9d366aa3e881e47d18ef1ef551
|
feaabcfb7f9f65cd0e6344eccafa045032eaf8e9
|
refs/heads/main
| 2023-04-27T16:04:48.743624
| 2021-05-19T21:48:00
| 2021-05-19T21:48:00
| 351,194,657
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,316
|
py
|
import torch
import torch.nn as nn
from torchvision import transforms, datasets, models
def get_resnet(num_classes):
model = models.resnet101(pretrained=True)
n_inputs = model.fc.in_features
model.fc = nn.Sequential(
nn.Linear(n_inputs, 256), nn.ReLU(), nn.Dropout(0.2),
nn.Linear(256, num_classes))#, nn.LogSoftmax(dim=1))
# model = nn.Sequential(nn.BatchNorm2d(num_features=3, affine=False), model)
return model
def get_densenet(num_classes):
model_ft = models.densenet121(pretrained=True)
num_ftrs = model_ft.classifier.in_features
# model_ft.classifier = nn.Sequential(nn.Linear(num_ftrs, num_classes), nn.Softmax(dim=1))
model_ft.classifier = nn.Sequential(
nn.Linear(n_inputs, 256), nn.ReLU(), nn.Dropout(0.2),
nn.Linear(256, num_classes))
return model
def set_parameter_requires_grad(model, feature_extracting):
'''
This helper function sets the .requires_grad attribute of the parameters
in the model to False when we are feature extracting. By default, when we load
a pretrained model all of the parameters have .requires_grad=True,
which is fine if we are training from scratch or finetuning.
'''
if feature_extracting:
for param in model.parameters():
param.requires_grad = False
|
[
"scintiamon@gmail.com"
] |
scintiamon@gmail.com
|
2e291852953cee8a6272452abcadb5b633bcb264
|
ba7be445410aa58f8cee5e64a38a4f701d50408b
|
/sell_npc/actions.py
|
c7616f88eb017d7169021ed04a11179da3d5155f
|
[] |
no_license
|
MattyM92/scripts
|
99214d6108cce5bff707d1acee566438a91b101f
|
f0bdea3eebe4e83bd3a73a2be5d5456ad8d7ac90
|
refs/heads/master
| 2023-01-11T00:04:57.821007
| 2020-11-07T18:03:30
| 2020-11-07T18:03:30
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,560
|
py
|
#Load Custom functions
from lib import *
import global_actions
def waypoint_action(client, action):
def withdraw_items_to_backpack(backpack, depot):
# Returns true if withdraw was successful
dest = client.get_container(backpack)
if not dest:
print('Could not find backpack', backpack, 'to hold items')
return False
# Open up to last backpack
tries = 20
while not dest.is_empty() and tries:
tries -= 1
client.use_slot(dest, 0)
sleep(0.3)
client.use_slot(dest, 1)
sleep(0.3)
client.hotkey('esc')
# Try 3 times to open depot
for tries in range(3):
client.reach_locker()
src = client.open_depot(depot)
if src:
client.use_slot(src, depot - 1)
sleep(1)
break
else:
print('Failed to reach locker')
return False
# Withdraw items until no backpack slots or no cap
withdraw_any = False
if client.get_cap() < 800:
return False
for i in range(10):
print('cap:', client.get_cap())
if client.get_cap() < 200:
print('Low cap')
return withdraw_any
dest = client.get_container(backpack)
if not dest:
print('Could not find backpack', backpack, 'to hold items')
return withdraw_any
if src.is_empty():
print('Depot is empty')
return withdraw_any
# Withdraw 19 items to current backpack
items = 19
while items > 0:
items -= 1
client.take_item_from_slot(src, 0, dest)
withdraw_any = True
# Go back one backpack
client.return_container(dest)
print('Withdrawed more than 250 items')
return True
if action == 'check_sell_edron':
if not client.script_options['lailene'] and not client.script_options['alexander'] and not client.script_options['telas']:
client.jump_label('end_edron')
elif action == 'check_sell_edron_tower':
if not client.script_options['lailene'] and not client.script_options['alexander']:
client.jump_label('goto_telas')
elif action == 'check_sell_rashid':
if not client.script_options['rashid']:
client.jump_label('skip_sell_rashid')
elif action == 'check_sell_green_djinn':
if not client.script_options['green_djinn']:
client.jump_label('skip_sell_green_djinn')
elif action == 'check_sell_flint':
if not client.script_options['flint']:
client.jump_label('skip_sell_flint')
elif action == 'check_sell_lailene':
if not client.script_options['lailene']:
client.jump_label('skip_sell_lailene')
elif action == 'check_sell_alexander':
if not client.script_options['alexander']:
client.jump_label('skip_sell_alexander')
elif action == 'check_sell_telas':
if not client.script_options['telas']:
client.jump_label('skip_sell_telas')
elif action == 'check_sell_tamoril':
if not client.script_options['tamoril']:
client.jump_label('skip_sell_tamoril')
elif action == 'check_sell_esrik':
if not client.script_options['esrik']:
client.jump_label('skip_sell_esrik')
elif action == "travel_darashia": # Only for carpet
client.npc_say(['darashia', 'yes'])
elif action == "repeat_flint":
client.jump_label('start_flint')
elif action == "flint":
withdraw = withdraw_items_to_backpack(backpack='Golden Backpack', depot=5)
if not withdraw:
client.jump_label('end_flint')
elif action == "repeat_djinn":
client.jump_label('start_djinn')
elif action == "djinn":
withdraw = withdraw_items_to_backpack(backpack='Golden Backpack', depot=10)
if not withdraw:
client.jump_label('end_djinn')
elif action == "repeat_edron":
client.jump_label('start_edron')
elif action == "edron":
withdraw = withdraw_items_to_backpack(backpack='Golden Backpack', depot=6)
if not withdraw:
client.jump_label('end_edron')
elif action == "skip_edron":
client.jump_label('skip_edron')
elif action == "repeat_farmine":
client.jump_label('start_farmine')
elif action == "farmine":
withdraw = withdraw_items_to_backpack(backpack='Golden Backpack', depot=7)
if not withdraw:
client.jump_label('end_farmine')
elif action == "use_elevator":
client.use_lever((1,0))
elif action == "repeat_yalahar":
client.jump_label('start_yalahar')
elif action == "yalahar":
withdraw = withdraw_items_to_backpack(backpack='Golden Backpack', depot=8)
if not withdraw:
client.jump_label('end_yalahar')
elif action == "check_rashid":
if not client.script_options['rashid']:
client.jump_label('end_rashid')
weekday = datetime.utcnow().weekday()
# Adjust date to sever save
if 0 < datetime.utcnow().hour + 1 < 10:
weekday -= 1 # still before server save
weekday %= 7
if weekday == 0:
client.jump_label('goto_svargrond_depot')
elif weekday == 1:
client.jump_label('goto_liberty_bay_depot')
elif weekday == 2:
client.jump_label('goto_port_hope_depot')
elif weekday == 3:
client.jump_label('goto_ankrahmun_depot')
elif weekday == 4:
client.jump_label('goto_darashia_depot')
elif weekday == 5:
client.jump_label('goto_edron_depot')
elif weekday == 6:
client.jump_label('goto_carlin_depot')
elif action == "end_rashid":
client.jump_label('end_rashid')
elif action == "repeat_rashid_svargrond":
client.jump_label('start_rashid_svargrond')
elif action == "rashid_svargrond":
withdraw = withdraw_items_to_backpack(backpack='Golden Backpack', depot=9)
if not withdraw:
client.jump_label('end_rashid_svargrond')
elif action == "repeat_rashid_carlin":
client.jump_label('start_rashid_carlin')
elif action == "rashid_carlin":
withdraw = withdraw_items_to_backpack(backpack='Golden Backpack', depot=9)
if not withdraw:
client.jump_label('end_rashid_carlin')
elif action == "repeat_rashid_liberty_bay":
client.jump_label('start_rashid_liberty_bay')
elif action == "rashid_liberty_bay":
withdraw = withdraw_items_to_backpack(backpack='Golden Backpack', depot=9)
if not withdraw:
client.jump_label('end_rashid_liberty_bay')
elif action == "repeat_rashid_port_hope":
client.jump_label('start_rashid_port_hope')
elif action == "rashid_port_hope":
withdraw = withdraw_items_to_backpack(backpack='Golden Backpack', depot=9)
if not withdraw:
client.jump_label('end_rashid_port_hope')
elif action == "repeat_rashid_ankrahmun":
client.jump_label('start_rashid_ankrahmun')
elif action == "rashid_ankrahmun":
withdraw = withdraw_items_to_backpack(backpack='Golden Backpack', depot=9)
if not withdraw:
client.jump_label('end_rashid_ankrahmun')
elif action == "repeat_rashid_darashia":
client.jump_label('start_rashid_darashia')
elif action == "rashid_darashia":
withdraw = withdraw_items_to_backpack(backpack='Golden Backpack', depot=9)
if not withdraw:
client.jump_label('end_rashid_darashia')
elif action == "repeat_rashid_edron":
client.jump_label('start_rashid_edron')
elif action == "rashid_edron":
withdraw = withdraw_items_to_backpack(backpack='Golden Backpack', depot=9)
if not withdraw:
client.jump_label('end_rashid_edron')
elif action == "stash_all":
client.reach_locker()
stash_item_from_slot(client, client.equips, 'backpack')
elif action == "buy_potions":
npc_refill(client, mana=True, health=False)
elif action == "refill":
if not withdraw_item_from_stash(client, 'brown mushroom', 50, client.items['brown mushroom']):
print('Not enough mushrooms')
if 'ammo_name' in client.hunt_config.keys():
ammo_name = client.hunt_config['ammo_name']
if not withdraw_item_from_stash(client, ammo_name, client.hunt_config['take_ammo'], client.items[ammo_name]):
print('Not enough ammo')
elif action == "check_ammo":
if 'ammo_name' not in client.hunt_config.keys():
client.jump_label('skip_ammo')
else:
ammo_name = client.hunt_config['ammo_name']
if 'arrow' not in ammo_name and 'bolt' not in ammo_name:
client.jump_label('skip_ammo')
elif action == "buy_ammo":
npc_refill(client, ammo='ammo_name' in client.hunt_config.keys())
elif action == "check_supplies":
check_supplies(client, health=False, imbuement=False, ammo='ammo_name' in client.hunt_config.keys(), logout_fail=True)
else:
global_actions.waypoint_action(client, action)
|
[
"kaden.lothar@yahoo.com"
] |
kaden.lothar@yahoo.com
|
f9c5eb8dcf7029ff417d9f814c947b5b96afe7e3
|
57235e5fbd29dc5e0b3f24649e15a48935edd65f
|
/boa3_test/test_sc/arithmetic_test/AddBoa2TestVoid.py
|
a258a7f83bf8253d84701620af37b12201d4e5af
|
[
"Apache-2.0",
"LicenseRef-scancode-free-unknown"
] |
permissive
|
DanPopa46/neo3-boa
|
ae75543bdc4e0aeadf45578b6b5e4c45b9253557
|
e4ef340744b5bd25ade26f847eac50789b97f3e9
|
refs/heads/development
| 2023-04-01T19:25:08.216180
| 2021-04-15T17:45:38
| 2021-04-15T17:45:38
| 358,663,228
| 0
| 0
|
Apache-2.0
| 2021-04-16T16:46:46
| 2021-04-16T16:46:31
| null |
UTF-8
|
Python
| false
| false
| 74
|
py
|
from boa3.builtin import public
@public
def main(m: int):
c = m + 2
|
[
"lucas.uezu@simpli.com.br"
] |
lucas.uezu@simpli.com.br
|
c561330fc9fb62e44382bc258536ed2c656fad33
|
cd3e72282679d00c33fdb8a1500e59e0c735c86b
|
/qa_data_preprocessing.py
|
01eb131875dd90b042357de156962af872da7ebd
|
[] |
no_license
|
amirharati/Trump_tweeter_bot
|
c4bbbf5b3a954a0b44c44393c9968106ec8fdfa6
|
23c46116000c4dcb54d900913d63ae0d8f035633
|
refs/heads/master
| 2021-04-26T23:49:23.632245
| 2018-08-23T05:20:43
| 2018-08-23T05:20:43
| 123,863,041
| 0
| 0
| null | 2018-03-22T02:04:40
| 2018-03-05T04:06:30
|
Python
|
UTF-8
|
Python
| false
| false
| 1,377
|
py
|
# qa_data_preprocessing.py
# Amir Harati, Aug 2018
"""
data preprocessing for question/answers pairs.
"""
import sys
def main(q_file, a_file, bpe_voc_file, word2id, wordid_questions, wordid_answers):
q_data = [line.strip() for line in open(q_file)]
a_data = [line.strip() for line in open(a_file)]
words = [line.split()[0] for line in open(bpe_voc_file)]
#words = list(set(words))
words = ["<PAD>", "<START>", "<EOS>"] + words
words_to_ids = {w: id for id, w in enumerate(words)}
ids_to_words = {words_to_ids[x]: x for x in words_to_ids}
with open(word2id, "w") as wif:
for key, val in words_to_ids.items():
wif.write(key + "\t" + str(val) + "\n")
with open(wordid_questions, "w") as f:
for sen in q_data:
ostr = ""
for word in sen.split():
#print(word)
ostr = ostr + str(words_to_ids[word]) + " "
f.write(ostr + "\n")
with open(wordid_answers, "w") as f:
for sen in a_data:
ostr = ""
for word in sen.split():
#print(word)
ostr = ostr + str(words_to_ids[word]) + " "
f.write(ostr + "\n")
if __name__ == "__main__":
q_file = sys.argv[1]
a_file = sys.argv[2]
bpe_voc_file = sys.argv[3]
word2id = sys.argv[4]
wordid_questions = sys.argv[5]
wordid_answers = sys.argv[6]
main(q_file, a_file, bpe_voc_file, word2id, wordid_questions, wordid_answers)
|
[
"amir.harati@gmail.com"
] |
amir.harati@gmail.com
|
5d31302c9abdc959a722e47913347d4d059a2d45
|
bda6ce4cfe2263e97c93f37f554eb05e85adee92
|
/011-020/013.py
|
ab9710fd9937cc380ab650ab844d748491b57ab3
|
[] |
no_license
|
Cybersheralt/Project-Euler-HackerRank
|
5d59dcc039d01ddfb5a594b560c4ceea3b23ed01
|
ab19bcc07cf4817798f6418f49c212b2eedcd08e
|
refs/heads/main
| 2023-05-27T16:00:51.266750
| 2021-06-14T06:50:31
| 2021-06-14T06:50:31
| 364,251,121
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 112
|
py
|
n = int(input())
nums = []
for i in range(n):
nums.append(int(input()))
print(str(sum(nums))[:10])
|
[
"noreply@github.com"
] |
Cybersheralt.noreply@github.com
|
dcf66511f02b0bd6fa496e59a1438b8b6f10a669
|
d982053fc7dfc016e541b507de29755320d59e84
|
/podstanovka_skloneniy.py
|
8fbb67caaae6f1aec8df27d4742c7ac42d511597
|
[] |
no_license
|
MikhaeleDem/python_training_cource
|
cbfbeca8afc4a1f3f145990094ce0d1c916df864
|
63ada5be4e3ad6cc3ce8f7fa074514f499045dec
|
refs/heads/master
| 2023-06-02T18:08:04.036982
| 2021-06-16T12:54:21
| 2021-06-16T12:54:21
| 373,197,923
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,669
|
py
|
"""Проверка правильности окончания в зависимости от числа"""
a = int(input())
if a % 10 == 1 and a // 10 != 11 and a // 10 != 1\
and a // 10 != 21 and a // 10 != 31 and a // 10 != 41 and a // 10 != 51 and a // 10 != 61 and a // 10 != 71 and a // 10 != 81 and a // 10 != 91:
print(a, 'программист')
elif (a % 10) == 2 and a // 10 != 11\
and a // 10 != 21 and a // 10 != 31 and a // 10 != 41 and a // 10 != 51 and a // 10 != 61 and a // 10 != 71 and a // 10 != 81 and a // 10 != 91\
or (a % 10) == 3 and a // 10 != 11\
and a // 10 != 21 and a // 10 != 31 and a // 10 != 41 and a // 10 != 51 and a // 10 != 61 and a // 10 != 71 and a // 10 != 81 and a // 10 != 91\
or (a % 10) == 4 and a // 10 != 11\
and a // 10 != 21 and a // 10 != 31 and a // 10 != 41 and a // 10 != 51 and a // 10 != 61 and a // 10 != 71 and a // 10 != 81 and a // 10 != 91:
print(a, 'программиста')
elif (a % 10) == 5\
or (a % 10) == 6\
or (a % 10) == 7\
or (a % 10) == 8\
or (a % 10) == 9\
or (a % 10) == 0\
or (a % 10) == 1 and a // 10 == 11 or a // 10 == 1\
or a // 10 == 21 or a // 10 == 31 or a // 10 == 41 or a // 10 == 51 or a // 10 == 61 or a // 10 == 71 or a // 10 == 81 or a // 10 == 91\
or (a % 10) == 2 and a // 10 == 11\
or a // 10 == 21 or a // 10 == 31 or a // 10 == 41 or a // 10 == 51 or a // 10 == 61 or a // 10 == 71 or a // 10 == 81 or a // 10 == 91\
or (a % 10) == 3 and a // 10 == 11\
or (a % 10) == 4 and a // 10 == 11:
print(a, 'программистов')
|
[
"m.dannicov@mail.ru"
] |
m.dannicov@mail.ru
|
30a7caa176be289d1ea59edf66de5b3ea7a4c75c
|
22e767950423c0ab8efd6a474d38157e799669f2
|
/main.py
|
598aaa7542231012b4e2d8600f382d097523c062
|
[] |
no_license
|
richardphi1618/file-sorter
|
907196415214fc397944874c2fbf4a3458218773
|
de2ca1f761ecca72b4adc48f3703eaabd97f603d
|
refs/heads/main
| 2023-04-06T11:29:13.928664
| 2021-04-19T23:38:58
| 2021-04-19T23:38:58
| 359,619,299
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 883
|
py
|
import os
import shutil
import csv
csv_file = input("\nEnter your list: ")
#csv_file = "photostofind.csv"
src_dir = input("Enter your source folder: ")
#src_dir = "ada"
full_path = os.path.realpath(__file__)
home = os.path.dirname(full_path)
#print(home)
from_folder = home + "\\" + src_dir
to_folder = home + "\\found"
print("\nSearching Directory: " + from_folder)
print("Destination Directory: " + to_folder + "\n")
if not os.path.exists('found'):
os.makedirs('found')
with open(csv_file, newline='') as f:
reader = csv.reader(f)
ini_list = list(reader)
files_to_find = sum(ini_list, [])
for (dirpath, dirnames, filenames) in os.walk(src_dir):
for fname in filenames:
if fname in files_to_find:
print (fname)
file_in_motion = os.path.join(dirpath, fname)
shutil.copy(file_in_motion, to_folder)
|
[
"richardphi1618@gmail.com"
] |
richardphi1618@gmail.com
|
e981ad2971bf95bb5ad26fb4b7a1eb93a1732c2b
|
73f6ba42a793d18ad5b4c44cfdc278e51aa1b9b0
|
/perum/views.py
|
e8828e9cea66ce406f19303c074bfd490595440b
|
[] |
no_license
|
akbarlintang/perumahan
|
e14eb922a86c76581d8faae5700ff21e83ba13ee
|
66c908a382bc32e9b9abc69b3a6f22eab12d8d2c
|
refs/heads/main
| 2022-12-30T08:11:23.856824
| 2020-10-22T15:33:30
| 2020-10-22T15:33:30
| 306,377,878
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,587
|
py
|
from django.shortcuts import render, redirect
from django.http import HttpResponse
from django.db.models import Sum
from django.forms import inlineformset_factory
from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import Group
from django.contrib import messages
from .models import *
from .forms import *
from .filters import *
from .decorators import *
import datetime
# Create your views here.
@unauthenticated_user
def loginPage(request):
if request.method == 'POST':
username = request.POST.get('username')
password = request.POST.get('password')
user = authenticate(request, username=username, password=password)
if user is not None:
login(request, user)
return redirect('home')
else:
messages.info(request, 'Username atau Password salah!')
context = {}
return render(request, 'perum/login.html', context)
def logoutUser(request):
logout(request)
return redirect('login')
@unauthenticated_user
def registerPage(request):
form = CreateUserForm()
if request.method == 'POST':
form = CreateUserForm(request.POST)
if form.is_valid():
user = form.save()
username = form.cleaned_data.get('username')
group = Group.objects.get(name='customer')
user.groups.add(group)
Pelanggan.objects.create(
user=user,
)
messages.success(request, 'Akun berhasil dibuat untuk ' + username)
return redirect('login')
context = {'form':form}
return render(request, 'perum/register.html', context)
@login_required(login_url='login')
@admin_only
def home(request):
pelanggans = Pelanggan.objects.all()
administrasis = Administrasi.objects.all()
today = datetime.date.today()
bulan = Administrasi.objects.filter(tanggal__year=today.year, tanggal__month=today.month).aggregate(Sum('biaya_angsur'))['biaya_angsur__sum']
tahun = Administrasi.objects.filter(tanggal__year=today.year).aggregate(Sum('biaya_angsur'))['biaya_angsur__sum']
context = {'pelanggans':pelanggans, 'bulan':bulan, 'tahun':tahun, 'administrasis':administrasis}
return render(request, 'perum/dashboard.html', context)
@login_required(login_url='login')
def unit(request):
units = Unit.objects.all().order_by('no_unit')
unitFilter = UnitFilter(request.GET, queryset=units)
units = unitFilter.qs
context = {'units':units, 'unitFilter':unitFilter}
return render(request, 'perum/unit.html', context)
@login_required(login_url='login')
@allowed_users(allowed_roles=['admin'])
def pelanggan(request):
pelanggans = Pelanggan.objects.all()
pelangganFilter = PelangganFilter(request.GET, queryset=pelanggans)
pelanggans = pelangganFilter.qs
context = {'pelanggans':pelanggans, 'pelangganFilter':pelangganFilter}
return render(request, 'perum/pelanggan.html', context)
@login_required(login_url='login')
@allowed_users(allowed_roles=['admin'])
def administrasi(request):
administrasis = Administrasi.objects.all()
administrasiFilter = AdministrasiFilter(request.GET, queryset=administrasis)
administrasis = administrasiFilter.qs
context = {'administrasis':administrasis, 'administrasiFilter':administrasiFilter}
return render(request, 'perum/administrasi.html', context)
def pemesanan(request):
pemesanan = Booking.objects.all().order_by('tanggal')
context = {'pemesanan':pemesanan}
return render(request, 'perum/pemesanan.html', context)
def akun(request, pk):
akun = Pelanggan.objects.get(id=pk)
pelanggans = Pelanggan.objects.filter(id=pk)
adm = Administrasi.objects.filter(nama_id=pk)
context = {'pelanggans':pelanggans, 'akun':akun, 'adm':adm}
return render(request, 'perum/akun.html', context)
@login_required(login_url='login')
@allowed_users(allowed_roles=['customer'])
def profil(request):
nama = request.user.profile.nama
no_telp = request.user.profile.no_telp
email = request.user.profile.email
no_unit = request.user.profile.no_unit
context = {'nama':nama, 'no_telp':no_telp, 'email':email, 'no_unit':no_unit}
return render(request, 'perum/profil.html', context)
@login_required(login_url='login')
def angsuran(request, pk):
adm = Administrasi.objects.filter(nama_id=pk)
context = {'adm':adm}
return render(request, 'perum/angsuran.html', context)
def infoUnit(request, pk):
unit = Unit.objects.filter(id=pk)
context = {'unit':unit}
return render(request, 'perum/info_unit.html', context)
def createBooking(request, pk):
unit = Booking.objects.filter(id=pk)
form = BookingForm()
if request.method == 'POST':
form = BookingForm(request.POST)
if form.is_valid():
form.save()
return redirect('/unit')
context = {'form':form, 'unit':unit}
return render(request, 'perum/form_booking.html', context)
def createPelanggan(request):
form = PelangganForm()
if request.method == 'POST':
form = PelangganForm(request.POST)
if form.is_valid():
form.save()
return redirect('home')
context = {'form':form}
return render(request, 'perum/form_pelanggan.html', context)
def ubahPelanggan(request, pk):
pelanggan = Pelanggan.objects.get(id=pk)
form = PelangganForm(instance=pelanggan)
if request.method == 'POST':
form = PelangganForm(request.POST, instance=pelanggan)
if form.is_valid():
form.save()
return redirect('home')
context = {'form':form}
return render(request, 'perum/form_pelanggan.html', context)
def hapusPelanggan(request, pk):
pelanggan = Pelanggan.objects.get(id=pk)
if request.method == "POST":
pelanggan.delete()
return redirect('pelanggan')
context = {'pelanggan':pelanggan}
return render(request, 'perum/hapus_pelanggan.html', context)
def createUnit(request):
form = UnitForm()
if request.method == 'POST':
form = UnitForm(request.POST)
if form.is_valid():
form.save()
return redirect('/unit')
context = {'form':form}
return render(request, 'perum/form_unit.html', context)
def ubahUnit(request, pk):
unit = Unit.objects.get(id=pk)
form = UnitForm(instance=unit)
if request.method == 'POST':
form = UnitForm(request.POST, instance=unit)
if form.is_valid():
form.save()
return redirect('/unit')
context = {'form':form}
return render(request, 'perum/form_unit.html', context)
def hapusUnit(request, pk):
unit = Unit.objects.get(id=pk)
if request.method == "POST":
unit.delete()
return redirect('/unit')
context = {'unit':unit}
return render(request, 'perum/hapus_unit.html', context)
def hapusPemesanan(request, pk):
pemesanan = Booking.objects.get(id=pk)
if request.method == "POST":
pemesanan.delete()
return redirect('/pemesanan')
context = {'pemesanan':pemesanan}
return render(request, 'perum/hapus_pemesanan.html', context)
def createAdministrasi(request):
form = AdministrasiForm()
if request.method == 'POST':
form = AdministrasiForm(request.POST)
if form.is_valid():
form.save()
return redirect('/administrasi')
context = {'form':form}
return render(request, 'perum/form_administrasi.html', context)
def pembayaran(request, pk):
administrasis = Administrasi.objects.filter(id=pk)
context = {'administrasis':administrasis}
return render(request, 'perum/pembayaran.html', context)
def pemasukan(request):
today = datetime.datetime.today()
bulan = Administrasi.objects.filter(tanggal__year=today.year).filter(tanggal__month=today.month).aggregate(Sum('biaya_angsur'))
tahun = Administrasi.objects.filter(tanggal__year=today.year).aggregate(Sum('biaya_angsur'))
hari = Administrasi.objects.filter(tanggal__year=today, tanggal__month=today, tanggal__date=today).aggregate(Sum('biaya_angsur'))
|
[
"lintangajiakbar@gmail.com"
] |
lintangajiakbar@gmail.com
|
199c48d52a688d764373e6cc606a68fcb43fd61e
|
37afd293bc9ded594d4d0864dbd82bcfabf1b2a6
|
/FGSM_v16.py
|
484ca7338259d03b3f7c3188669421338a6ad746
|
[] |
no_license
|
AndroidDevelopersTools/SAI-FGSM
|
670257fb5412a95475fb348f6ff2aa9615c4ff1b
|
f5a3e9f51d7d860af5acbd9f474e6866f6574a35
|
refs/heads/main
| 2023-06-10T14:18:50.370169
| 2021-07-05T13:09:52
| 2021-07-05T13:09:52
| 327,304,640
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,884
|
py
|
import tensorflow as tf
import matplotlib as mpl
import matplotlib.pyplot as plt
import math
import numpy as np
import os
from tensorflow.keras.preprocessing.image import load_img, img_to_array
import gc
import threading
# mpl.rcParams['figure.figsize'] = (8, 8)
# mpl.rcParams['axes.grid'] = False
gpus = tf.config.experimental.list_physical_devices(device_type='GPU')
tf.config.experimental.set_memory_growth(gpus[0], enable=True)
def load_label(url):
find_label = {}
file = open(url, 'r', encoding='UTF-8')
for line in file:
key, value = line.split(' ')
find_label[key] = value.replace('\n', '')
file.close()
return find_label
def load_ng_data(url, link):
X_train = []
y_train = []
find_label = load_label(link)
imgs_dirs = url
for imgs_dir in imgs_dirs:
imagePaths = list(os.listdir(imgs_dir))
for imagePath in imagePaths:
x = img_to_array(load_img(os.path.join(imgs_dir, imagePath), target_size=(224, 224)))
X_train.append(x / 255.0)
y_train.append(find_label[imagePath])
return tf.convert_to_tensor(np.array(X_train)), y_train
Decode_model = tf.keras.applications
def from_probability_get_label(model, image):
image_probs = model.predict(image)
# print('--------------')
# print(np.argmax(image_probs))
return Decode_model.densenet.decode_predictions(image_probs, top=1)[0][0], np.argmax(image_probs)
# return Decode_model.nasnet.decode_predictions(image_probs, top=1)[0][0], np.argmax(image_probs)
# return Decode_model.mobilenet.decode_predictions(image_probs, top=1)[0][0], np.argmax(image_probs)
loss_temp = tf.keras.losses.CategoricalCrossentropy()
def create_adversarial_sample_iterative(model, input_image, input_label, eplison, times):
if eplison != 0:
alpha = eplison / times
for i in range(times):
with tf.GradientTape() as tape:
tape.watch(input_image)
prediction = model(input_image)
loss = loss_temp(input_label, prediction)
gradient = tape.gradient(loss, input_image)
if tf.norm(gradient, ord=2) != 0:
# input_image = alpha * (0.5 * (tf.sign(gradient) + 1)) + input_image
input_image = alpha * tf.tanh(gradient) + input_image
else:
break
return input_image
else:
return input_image
def create_adversarial_sample_MI(model, input_image, input_label, eplison, times):
if eplison != 0:
alpha = eplison / times
g = 0
for i in range(times):
with tf.GradientTape() as tape:
tape.watch(input_image)
prediction = model(input_image)
loss = loss_temp(input_label, prediction)
gradient = tape.gradient(loss, input_image)
g = g + gradient
# input_image = input_image + alpha * (0.5 * (tf.sign(g) + 1))
input_image = alpha * tf.tanh(g) + input_image
return input_image
else:
return input_image
def create_adversarial_sample(model, input_image, input_label, epsilon, times):
if epsilon != 0:
T = 9
Tmin = 0.5
t = 0
alpha = epsilon / times
g = 0
below = input_image - epsilon
above = input_image + epsilon
while T >= Tmin:
for i in range(times):
with tf.GradientTape(watch_accessed_variables=False) as tape:
tape.watch(input_image)
prediction = model(input_image)
loss = loss_temp(input_label, prediction)
gradient = tape.gradient(loss, input_image)
g = g + gradient
if tf.norm(gradient, ord=2).numpy() != 0:
# input_image = input_image + alpha * (0.5 * (tf.sign(g) + 1))
input_image = alpha * tf.tanh(g) + input_image
else:
p = math.exp(-(1 / T))
r = np.random.uniform(low=0, high=1)
if r < p:
print('random')
if tf.norm(g, ord=2).numpy() != 0:
input_image = input_image - 2 * alpha * (g / tf.norm(g, ord=2))
else:
break
else:
if tf.norm(g, ord=2).numpy() != 0:
# input_image = input_image + alpha * (0.5 * (tf.sign(g) + 1))
input_image = alpha * tf.tanh(g) + input_image
else:
break
t = t + 2
T = 9 / (1 + t)
if T >= Tmin:
input_image = tf.clip_by_value(input_image, below, above)
return input_image
else:
return input_image
def pre_process_label(index, label_shape):
return tf.reshape(tf.one_hot(index, label_shape), (1, label_shape))
def display_images(image, description):
(class_name, label, confidence), class_number = from_probability_get_label(Decode_model.densenet.DenseNet121(include_top=True, weights='imagenet'), image)
# (class_name, label, confidence), class_number = from_probability_get_label(Decode_model.nasnet.NASNetMobile(include_top=True, weights='imagenet'), image)
# (class_name, label, confidence), class_number = from_probability_get_label(Decode_model.mobilenet.MobileNet(include_top=True, weights='imagenet'), image)
plt.figure()
plt.imshow(image[0])
plt.title('{} \n {} : {:.2f}% Confidence'.format(description, label, confidence * 100))
plt.show()
return class_number
class myThread(threading.Thread):
def __init__(self, threadID, name, model, input_image, input_label, epsilon, times, q):
threading.Thread.__init__(self)
self.threadID = threadID
self.name = name
self.model = model
self.input_image = input_image
self.input_label = input_label
self.epsilon = epsilon
self.times = times
self.q = q
def run(self):
adv_x_iterative = create_adversarial_sample_iterative(self.model, self.input_image, self.input_label,
self.epsilon, self.times)
self.q[self.name] = adv_x_iterative
class myThread_1(threading.Thread):
def __init__(self, threadID, name, model, input_image, input_label, epsilon, times, q):
threading.Thread.__init__(self)
self.threadID = threadID
self.name = name
self.model = model
self.input_image = input_image
self.input_label = input_label
self.epsilon = epsilon
self.times = times
self.q = q
def run(self):
adv_x_iterative = create_adversarial_sample_MI(self.model, self.input_image, self.input_label, self.epsilon,
self.times)
self.q[self.name] = adv_x_iterative
class myThread_2(threading.Thread):
def __init__(self, threadID, name, model, input_image, input_label, epsilon, times, q):
threading.Thread.__init__(self)
self.threadID = threadID
self.name = name
self.model = model
self.input_image = input_image
self.input_label = input_label
self.epsilon = epsilon
self.times = times
self.q = q
def run(self):
adv_x_iterative = create_adversarial_sample(self.model, self.input_image, self.input_label, self.epsilon,
self.times)
self.q[self.name] = adv_x_iterative
image, label = load_ng_data([r'C:\Users\Administrator\Desktop\dataset\images'],
r'C:\Users\Administrator\Desktop\dataset\labels')
# pretrained_model = tf.keras.applications.densenet.DenseNet121(include_top=True, weights='imagenet')
pretrained_model = tf.keras.applications.densenet.DenseNet121(include_top=True, weights='imagenet')
pretrained_model.trainable = False
epsilons = [0.03]
descriptions = [('Epsilon = {:0.3f}'.format(eps) if eps else 'Input')
for eps in epsilons]
I_times_total = []
MI_times_total = []
SAI_times_total = []
for iterative_times in range(2, 3):
I_times = 0
MI_times = 0
SAI_times = 0
for j in range(160, 233):
current_img = image[j]
current_label = pre_process_label(int(label[j]), 1000)
current_img = current_img[None, ...]
q = {}
# plt.figure()
# plt.imshow(image[j])
# plt.show()
for i, eps in enumerate(epsilons):
"""
p1 = myThread(1, "Thread-iterative", pretrained_model, current_img, current_label, eps, iterative_times + 1, q)
p1.start()
p1.join()
adv_x_iterative = q['Thread-iterative']
adv_x_iterative = tf.clip_by_value(adv_x_iterative, 0, 1)
if int(display_images(adv_x_iterative, descriptions[i])) != int(label[j]):
# print(display_images(adv_x_iterative, descriptions[i]))
# print(label[j])
I_times = I_times + 1
del adv_x_iterative
# del current_img
# del current_label
gc.collect()
"""
"""
p2 = myThread_1(2, "Thread-MI", pretrained_model, current_img, current_label, eps, iterative_times + 1, q)
p2.start()
p2.join()
adv_x_MI = q['Thread-MI']
adv_x_MI = tf.clip_by_value(adv_x_MI, 0, 1)
if int(display_images(adv_x_MI, descriptions[i])) != int(label[j]):
MI_times = MI_times + 1
# print(display_images(adv_x_MI, descriptions[i]))
# print(int(label[j]))
del adv_x_MI
gc.collect()
"""
p3 = myThread_2(3, "Thread-SAI", pretrained_model, current_img, current_label, eps, iterative_times + 1, q)
p3.start()
p3.join()
adv_x = q['Thread-SAI']
adv_x = tf.clip_by_value(adv_x, 0, 1)
if int(display_images(adv_x, descriptions[i])) != int(label[j]):
SAI_times = SAI_times + 1
del adv_x
gc.collect()
# print('===========')
# I_times_total.append(I_times)
# print(I_times)
# MI_times_total.append(MI_times)
# print(MI_times)
SAI_times_total.append(SAI_times)
print(SAI_times)
|
[
"noreply@github.com"
] |
AndroidDevelopersTools.noreply@github.com
|
f197a1991b3cedfa9127889007e6604fa8b13673
|
3f6016a16b58be6ddfa001500bb1580151b8ede1
|
/12306/Main.py
|
87c0d2757682da98a08fc2a1d84106765f281f21
|
[] |
no_license
|
stayli117/PyProjects
|
d48ffed5dd94b8d80c6cfa539bcc155281e08916
|
0814a3b2551a0500be90dff4e73cd624f0134333
|
refs/heads/master
| 2020-03-28T10:15:28.512525
| 2018-09-10T03:40:15
| 2018-09-10T03:40:15
| 148,095,043
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,042
|
py
|
# coding: utf-8
# -*- coding: UTF-8 -*
"""Train tickets query via command-line.
Usage:
tickets [-gdtkz] <from> <to> <date> [--n=1]
Options:
-h,--help 显示帮助菜单
-g 高铁
-d 动车
-t 特快
-k 快速
-z 直达
--n=<kn> 连续查询天数[default:1]
Example:
tickets 南京 北京 2016-07-01
tickets -dg 南京 北京 2016-07-01 -n=2
"""
from docopt import docopt
from ProApi import *
from Resources import info
def cli():
"""command-line interface"""
arguments = {'<from>':'xian','<to>':'ganzhou', '<date>':'2018-04-17', '--n':5}
# print(arguments)
operate(arguments)
if __name__ == "__main__":#main方法
# Menu()
# isContionue = 'Y'
# while isContionue == 'Y' or isContionue == 'y':
# counts = input('输入查询天数:\n')
# operate(int(counts))
# isContionue = input('是否继续查询?Y/N\n')
# input('按任意键退出...')
cli()
|
[
"stayli117@163.com"
] |
stayli117@163.com
|
da8a60005ee0d90d5161d29d43ab645a9a723b5a
|
aecf6829c070b8ea6365c1cc783aff91094e8048
|
/9_web_server/server.py
|
15f6509d366ac97c16c6789a3589fc0f34e319c6
|
[] |
no_license
|
KolchinaAsya/UNIX
|
7cc8eef2c0ed0e26e4e744013f8ee4fe417933a6
|
027e59a1911adb506aafc58db338ec1beb0b2c4a
|
refs/heads/main
| 2023-05-21T21:58:32.822009
| 2021-06-09T12:39:19
| 2021-06-09T12:39:19
| 355,810,856
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,772
|
py
|
import logging
import socket
import configparser
import threading
import time
def get_new_response(conn,MAX,DIRECTORY):
"""
Функция реализует GET-запрос от клиента и передает данные разного типа
"""
while True:
logging.info("CONNECTION WITH "+str(conn))
data = conn.recv(MAX)
try:
msg = data.decode().split()[1][1:]
except:
msg = ""
if not msg:
msg = 'index.html'
if msg[-1] == '?':
msg = msg[:-1]
try:
exp = msg.split('.')[1]
except:
exp = ''
content = {"css":"text/css","min": "text/css","html":"text/html","png":"image/png",
"jpeg": "image/jpeg","js": "text/javascript","jpg": "image/jpeg","txt": "text/plain",
"json":"text/json","ico":"image/x-icon"}
content_type = ""
message = ""
response = "HTTP/1.1 "
length = 0
try:
content_type = content[exp]
try:
logging.info(f'{msg} - {addr[0]} - 200 OK')
file = open(DIRECTORY + msg, "rb")
message = file.read()
length = len(message)
file.close()
response += "200 OK\r\n"
except FileNotFoundError:
response+='404 NOT FOUND\r\n'
logging.info(f'{msg} - {addr[0]} - 404 NOT FOUND')
except Exception:
logging.info(f'{msg} - {addr[0]} - 403 FORBIDDEN')
response += '403 FORBIDDEN\r\n'
response+= f"Content-Type:{content_type}\r\n"
response+=f"Date: {time.ctime()}\r\n"
response+="Server: Server v0.0.1\r\n"
response+=f"Content-Length: {length}\r\n"
response+="Connection: close\r\n"
response += "\r\n"
if message:
msg = response.encode() + message
conn.send(msg)
else:
msg = response.encode()
conn.send(msg)
logging.info(f'SENT RESPONSE {response}')
conn.close()
return
#================================================
logging.basicConfig(filename='server.log', level=logging.INFO, format='%(asctime)s - %(message)s', datefmt='%d-%b-%y %H:%M:%S')
config = configparser.ConfigParser()
config.read('config.ini')
HOST = config["Settings"]["HOST"]
PORT = int(config["Settings"]["PORT"])
DIRECTORY = config["Settings"]["DIRECTORY"]
MAX = int(config["Settings"]["MAX"])
sock = socket.socket()
try:
sock.bind((HOST,PORT))
print("SERVER IS STARTING ON " + str(PORT))
logging.info("SERVER IS STARTING ON " + str(PORT))
except:
sock.bind(("localhost",8080))
print("SERVER IS STARTING ON 8080")
logging.info("SERVER IS STARTING ON 8080")
sock.listen()
#================================================
while True:
conn, addr = sock.accept()
t1 = threading.Thread(target=get_new_response, args=[conn, MAX,DIRECTORY])
t1.start()
|
[
"kolchina.asya@list.ru"
] |
kolchina.asya@list.ru
|
a5ecdd7ea2524f019fae09e68c82b2da283f1301
|
04b6fe7ad42a7872e024317f9e490483da1186b7
|
/TestCase/test_03_set_category.py
|
2f68f7f64ab64364500b4570f131ac5494080a85
|
[] |
no_license
|
QUPSBD/Appium_Python
|
896ed2fb3f7e6b5ba6e3e371de623b0a030a1428
|
d47b324d381ec4f66245434fcdef64a8f31d77c2
|
refs/heads/master
| 2023-05-09T01:18:43.121771
| 2021-06-03T17:13:10
| 2021-06-03T17:13:10
| 373,581,592
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 270
|
py
|
from Pages.home_page import HomePage
from TestCase.base_test import BaseTest
class TestSetCategory(BaseTest):
def test_intro(self):
homepage = HomePage(self.driver)
homepage.test_set_category()
# python3 -m unittest TestCase.test_03_set_category
|
[
"sh.qups@gmail.com"
] |
sh.qups@gmail.com
|
b29f38581517653bee0d5d847bf5be11288f212e
|
b44fee80dd22adad1d8ec3e166789579ec7579c7
|
/plot_params.py
|
1e272d4202d96fca783370a7213fd9b9d6394d78
|
[] |
no_license
|
kimds91/growplot
|
e8f8c504bd955a06f230bdf336f259ad854b5d77
|
ce92bf8f755d2f7710f687b2909758d88ebdfeb4
|
refs/heads/master
| 2021-06-26T09:36:28.592821
| 2020-11-23T00:17:05
| 2020-11-23T00:17:05
| 141,829,890
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,872
|
py
|
from scipy.optimize import curve_fit
from math import log
from scipy import stats
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.patches import Ellipse, Rectangle
plt.rcParams['svg.fonttype'] = 'none'
import seaborn as sns
import pandas as pd
import sys
import argparse
#font = {'family' : 'normal',
# 'weight' : 'bold',
# 'size' : 24}
#matplotlib.rc('font', **font)
plt.style.use('seaborn-talk')
def process_excel(filename, specified_series):
df_kinetic_total = pd.read_excel(filename, sheet_name = 'Data')
def get_hrs(time_str):
h, m, s = time_str.split(':')
return ((int(h) * 3600 + int(m) * 60 + int(s))/3600)
def get_minutes(time_str):
h,m,s = time_str.split(':')
return (float(h)*60 + float(m) + float(s)/60)
hrs_list = []
min_list = []
for time in df_kinetic_total['Time']:
hrs_list.append(get_hrs(str(time)))
min_list.append(get_minutes(str(time)))
df_kinetic_total['hrs'] = hrs_list
df_kinetic_total['mins'] = min_list
df_samples = pd.read_excel(filename, sheet_name = 'Layout')
series_names = list(df_samples['Sample'])
df_samples.set_index('Sample', inplace=True)
samples = []
for series in series_names:
samples.append(filter(lambda it: it != '', map(lambda it: it.strip(), df_samples.loc[series].tolist()[0].split(','))))
df_dict = {}
for sample, series in zip(samples, series_names):
if specified_series is None or (len(specified_series) > 0 and series in specified_series):
df_dict[series] = df_kinetic_total.loc[:,sample].astype(float)
return df_dict, df_kinetic_total
#data_dfs = []
#for sample_df, series in zip(dfs, series_names):
# df = pd.DataFrame()
# #df['avg'] = sample_df.mean(axis=1)
# df['avg'] = sample_df#.mean(axis=1)
# #df['std'] = sample_df.std(axis=1)
# df['Hours'] = df_kinetic_total['hrs']
# df['sample'] = series
# data_dfs.append(df)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Break down an Excel file into collections of data series for later plotting in R")
parser.add_argument('filename', type=str, nargs=1, help="The Excel file to be processed")
parser.add_argument('--series', type=str, nargs='+', help="The series from the Excel file, as named in the Layout tab, to be plotted")
args = parser.parse_args()
df_dict, t = process_excel(filename=args.filename[0], specified_series=args.series)
def f(x, L, k, x0):
return L/(1+np.exp(-k*(x-x0)))
final_proto_df = []
for k,df in df_dict.items():
npdf = np.array(df)
ydata = npdf[:,0]
xdata = t['hrs']
for i in range(1,npdf.shape[1]):
ydata = np.concatenate((ydata, npdf[:,i]))
xdata = np.concatenate((xdata, t['hrs']))
#ydata = np.concatenate((ydata, npdf[:,2]))
#ydata = np.concatenate((ydata, npdf[:,3]))
#ydata = np.concatenate((ydata, npdf[:,4]))
#ydata = np.concatenate((ydata, npdf[:,5]))
def av(l):
return sum(l)/len(l)
yav = np.array([av([npdf[i,k] for k in range(npdf.shape[1])]) for i in range(len(npdf))])
#['avg']
#print(xdata)
#print(ydata)
params, pcov = curve_fit(f, xdata, ydata)
if '_' in k:
final_proto_df.append({'tag': k, 'seq': k.split('_')[0], 'cond': k.split('_')[1], 'odmax': params[0], 'midpt': params[2], 'maxslope': (params[0]*params[1])/4, 'doubling': np.log(2)/params[1], 'cov': pcov })
else:
final_proto_df.append({'tag': k, 'seq': k, 'cond': 'n/a', 'odmax': params[0], 'midpt': params[2], 'maxslope': (params[0]*params[1])/4, 'doubling': np.log(2)/params[1], 'cov': pcov })
#print(k, params, pcov)
stdevs = np.sqrt(np.diag(pcov))
print(k)
for name, param, stdev in zip(["Max", "K-param", "Midpoint"], params, stdevs):
print("{}: {} +/- {}".format(name, param, stdev))
# lag_time = (mx0 - y0) / m = x0 - y0/m
# Assuming covariances are close enough to zero.
half_max = params[0]/2
half_max_sd = stdevs[0]/2
max_slope = (params[0]*params[1])/4
max_slope_sd = ( (stdevs[0]**2 * stdevs[1]**2 + stdevs[0]**2 * params[0]**2 + stdevs[1]**2 * params[1]**2 )**0.5 ) / 4
half_max_over_max_slope = half_max / max_slope
# minimal consideration of covariance would append -2cov(x,y)/xy to the second term
half_max_over_max_slope_sd = ( half_max**2 / max_slope**2 * ( half_max_sd**2 / half_max**2 + max_slope_sd**2/max_slope**2 ) )**0.5
lag_time = params[2] - half_max_over_max_slope
# minimal consideration of covariance would subtract -2cov(x,y) inside the sqrt
lag_time_sd = ( stdevs[2]**2 + half_max_over_max_slope_sd**2 ) ** 0.5
print("Max slope: {} +/- {}".format(max_slope, max_slope_sd))
print("Lag time: {} +/- {}".format(lag_time, lag_time_sd))
finaldf = pd.DataFrame.from_dict(final_proto_df)
plt.clf()
sns.lmplot(x='odmax', y='midpt',data=finaldf, fit_reg=False, hue='seq', legend='True')
plt.savefig('plot_odmax_v_midpt_byseq.svg')
plt.clf()
sns.lmplot(x='odmax', y='doubling',data=finaldf, fit_reg=False, hue='seq', legend='True')
plt.savefig('plot_odmax_v_doubling_byseq.svg')
#plt.scatter(odmax_CS, midpt_CS, c='b')
#plt.scatter(odmax_C, midpt_C, c='g')
plt.clf()
sns.lmplot(x='odmax', y='midpt',data=finaldf, fit_reg=False, hue='cond', legend='True')
plt.savefig('plot_odmax_v_midpt_bycond.svg')
plt.clf()
sns.lmplot(x='odmax', y='doubling',data=finaldf, fit_reg=False, hue='cond', legend='True')
plt.savefig('plot_odmax_v_doubling_bycond.svg')
|
[
"andy.watkins2@gmail.com"
] |
andy.watkins2@gmail.com
|
d46cce93334a04c9ef82afc0f189bf9671eeb38e
|
dcdf8a7edb8a765707459a540997750402f65bea
|
/test/functional/p2p_pos_fakestake.py
|
e0b805152981bf94db4ca0fec1fdedf73bfd28b4
|
[
"MIT"
] |
permissive
|
ifinmakassar/BitalGO
|
a313fa0e2015cc02fa3dc8d63d8d3a692dd8b019
|
f8f5e126a7c808f72ff85b32e28c09a45fe684ca
|
refs/heads/master
| 2022-04-06T12:33:49.209408
| 2020-03-16T06:15:12
| 2020-03-16T06:15:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,288
|
py
|
#!/usr/bin/env python3
# Copyright (c) 2019 The BitalGo Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
Covers the scenario of a PoS block where the coinstake input prevout is already spent.
'''
from time import sleep
from fake_stake.base_test import ALG_FakeStakeTest
class PoSFakeStake(ALG_FakeStakeTest):
def run_test(self):
self.description = "Covers the scenario of a PoS block where the coinstake input prevout is already spent."
self.init_test()
INITAL_MINED_BLOCKS = 150 # First mined blocks (rewards collected to spend)
MORE_MINED_BLOCKS = 100 # Blocks mined after spending
STAKE_AMPL_ROUNDS = 2 # Rounds of stake amplification
self.NUM_BLOCKS = 3 # Number of spammed blocks
# 1) Starting mining blocks
self.log.info("Mining %d blocks.." % INITAL_MINED_BLOCKS)
self.node.generate(INITAL_MINED_BLOCKS)
# 2) Collect the possible prevouts
self.log.info("Collecting all unspent coins which we generated from mining...")
# 3) Create 10 addresses - Do the stake amplification
self.log.info("Performing the stake amplification (%d rounds)..." % STAKE_AMPL_ROUNDS)
utxo_list = self.node.listunspent()
address_list = []
for i in range(10):
address_list.append(self.node.getnewaddress())
utxo_list = self.stake_amplification(utxo_list, STAKE_AMPL_ROUNDS, address_list)
self.log.info("Done. Utxo list has %d elements." % len(utxo_list))
sleep(2)
# 4) Start mining again so that spent prevouts get confirmted in a block.
self.log.info("Mining %d more blocks..." % MORE_MINED_BLOCKS)
self.node.generate(MORE_MINED_BLOCKS)
sleep(2)
# 5) Create "Fake Stake" blocks and send them
self.log.info("Creating Fake stake blocks")
err_msgs = self.test_spam("Main", utxo_list)
if not len(err_msgs) == 0:
self.log.error("result: " + " | ".join(err_msgs))
raise AssertionError("TEST FAILED")
self.log.info("%s PASSED" % self.__class__.__name__)
if __name__ == '__main__':
PoSFakeStake().main()
|
[
"kyzer@traebit.ca"
] |
kyzer@traebit.ca
|
93f4bd22d4f2bd236632acc4fe534cb04a3954bb
|
66fbb969c8844f6a0db7cf8939c3412516bf54ca
|
/min_stack.py
|
f010c0465b1a9022ba8d0b7065f5b842d1107edf
|
[] |
no_license
|
chunweiliu/leetcode2
|
67a86d5a0d8c3ffe41f53a46b0e5960edc64c56d
|
086b7c9b3651a0e70c5794f6c264eb975cc90363
|
refs/heads/master
| 2021-01-19T08:13:12.667448
| 2017-04-08T06:02:35
| 2017-04-08T06:02:35
| 87,612,004
| 4
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,100
|
py
|
"""Design a stack that can get the min value
Time: O(1) for all operations
Space: O(n)
"""
class MinStack(object):
def __init__(self):
"""
initialize your data structure here.
"""
self.stack = []
def push(self, x):
"""
:type x: int
:rtype: void
"""
if not self.stack:
self.stack.append({'value': x, 'min_below': x})
else:
self.stack.append({'value': x, 'min_below': min(x, self.getMin())})
def pop(self):
"""
:rtype: void
"""
if self.stack:
self.stack.pop()
def top(self):
"""
:rtype: int
"""
if self.stack:
return self.stack[-1]['value']
return None
def getMin(self):
"""
:rtype: int
"""
if self.stack:
return self.stack[-1]['min_below']
return None
# Your MinStack object will be instantiated and called as such:
# obj = MinStack()
# obj.push(x)
# obj.pop()
# param_3 = obj.top()
# param_4 = obj.getMin()
|
[
"gtoniliu@gmail.com"
] |
gtoniliu@gmail.com
|
cc2adeccf0056d1ea67abfdf21ae86dbf06d38d3
|
16427e5c6ee8d1baab1696f2eb8635dda7460147
|
/ass/time.py
|
6fb5292b8a9acf313ae2da1cbd7bdcbb2b18ed07
|
[] |
no_license
|
threemay/python3_study
|
21ab425825e064a12bcde739415865d37356b3d0
|
3a563ccf7ec48fc6c3dda76d4c05382a215bed4f
|
refs/heads/master
| 2022-02-02T21:19:47.349976
| 2019-07-20T05:38:05
| 2019-07-20T05:38:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 432
|
py
|
import time
print(time.localtime())
#while True:
# time.sleep(1)
# print(time.strftime('%H:%M:%S',time.localtime()))
Hour = int(input("pls input h: "))
Minute = int(input("pls input m: "))
Second = int(input("pls input second: "))
while True:
time.sleep(1)
t = time.localtime()
print(t[3],t[4],t[5])
if t[3] == Hour and t[4] == Minute and t[5] == Second:
print("time **************")
#if t[3] ==
|
[
"254363807@qq.com"
] |
254363807@qq.com
|
2c3bfbc6432bf9d34345b24b769176cb31759ffc
|
7308f089e45b71497d1f79d90a9502282bb30f7c
|
/TestEngines/mysql.py
|
7edb4ed88589715eadb9f2d1eb0bd14cce41ac5c
|
[] |
no_license
|
braveswang/v2test
|
804c7e86fa787040cea00ff895e171152bd7c242
|
e1543e4dfe61f90bad7d3bfac8bd133f3563e3d4
|
refs/heads/master
| 2023-05-13T11:20:22.625318
| 2018-07-18T09:32:37
| 2018-07-18T09:32:37
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,721
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import pymysql.cursors
from TestEngines.config import *
class Test:
def __init__(self):
self.connection = pymysql.connect(
host=CONFIG.get('MYSQL', 'HOST'),
port=int(CONFIG.get('MYSQL', 'PORT')),
user=CONFIG.get('MYSQL', 'USERNAME'),
password=CONFIG.get('MYSQL', 'PASSWORD'),
db=CONFIG.get('MYSQL', 'DATABASE'),
charset=CONFIG.get('MYSQL', 'CHARSET'),
cursorclass=pymysql.cursors.DictCursor
)
def action(self, action_value, action, *action_sub):
with self.connection.cursor() as cursor:
if not action_value:
raise ValueError('This action need a value.')
cursor.execute(action_value)
if action == 'commit':
return self.connection.commit()
elif action == 'fetchall':
return cursor.fetchall()
elif action == 'fetchmany':
if action_sub:
number = int(action_sub[0])
else:
number = int(CONFIG.get('MYSQL', 'FETCH_NUM'))
return cursor.fetchmany(number)
elif action == 'fetchone':
result = cursor.fetchone()
if action_sub:
result = result[action_sub[0]]
return result
elif action == 'sql':
with open(os.path.join(FILE_DIR, action_value), 'r') as f:
for line in filter(lambda x: x.strip(),f):
cursor.execute(line)
return self.connection.commit()
def clean(self):
self.connection.close()
|
[
"deepjia@gmail.com"
] |
deepjia@gmail.com
|
4caed74a60c8e498f398a39b567023f66b6fd454
|
306a8a2475b0d86b016be948b368dd44da3d109a
|
/bounties/migrations/0011_auto_20160902_1553.py
|
8ee1b2dfe8da46c27c1185a88f079294b7471230
|
[] |
no_license
|
kgfig/onepiecebounties
|
fb9093a026b653d3ec114fdc812f0471c6c04485
|
305a9b022aa078f1fec70da45d9263aca16b564a
|
refs/heads/master
| 2020-12-03T04:17:19.073372
| 2016-09-18T01:30:04
| 2016-09-18T01:30:04
| 95,845,299
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 471
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('bounties', '0010_auto_20160902_1511'),
]
operations = [
migrations.AlterField(
model_name='pirate',
name='wanted_status',
field=models.IntegerField(null=True, default=1, choices=[(1, 'Dead or Alive'), (2, 'Only Alive')]),
),
]
|
[
"kathleengay.figueroa@gmail.com"
] |
kathleengay.figueroa@gmail.com
|
eb97059c8e8392710252eef94beb2c0179e9b4d7
|
d7a1cab0b158b550d278d416f3d93c2b2a48e106
|
/blissutil.py
|
11887f8c3dd7a54e04393373812156158a64bc87
|
[
"MIT"
] |
permissive
|
pirmoradian/BLISS
|
624d03a0f1c4dfd0b2106abcbea2b702a911ed15
|
efe029fe2ec6409712c000c9bfca2bd6c1dac6c0
|
refs/heads/master
| 2018-06-06T07:19:34.289490
| 2018-05-30T17:47:30
| 2018-05-30T17:47:30
| 125,905,557
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 15,865
|
py
|
import matplotlib.pyplot as plt
#from scipy import stats,polyval
import numpy as np
from operator import itemgetter
import pickle
import copy
import re
__all__ = ['UtilDict', 'FileReader']
#***************************************
class UtilDict(object):
'''containing the utility functions used for dictionary'''
def __init__(self):
pass
def rescale_d(self, d):
avg = np.mean(d.values())
intv = np.max(d.values()) - np.min(d.values())
for k, v in d.iteritems():
d[k] = (d[k] - avg) / float(intv)
return d
def sort_dictionary(self, d, sort='key'):
'''sorts a dictionary based on 'key' or 'value' indicated by 'sort' variable as input and returns the corresponding sorted tuple'''
d_tuple = d.items()
if sort == 'key':
d_tuple = sorted(d_tuple, key=itemgetter(0))
#sorted(d_tuple, key=lambada a:a[0])
elif sort == 'value':
d_tuple = sorted(d_tuple, key=itemgetter(1), reverse = True)
else:
raise Exception, 'how should the dictionary be sorted?!'
return d_tuple
def normalize_d_values(self, d_):
"Normalize the values of a dictinary"
d = copy.deepcopy(d_)
for k in d:
d[k] = float(d[k])
s = sum(d.values())
for w in d:
d[w] = d[w]/s
return d
def normalize_d_wrt1stwd(self, d):
fq_wds = {}
for k, v in d.iteritems():
w1, w2 = k.split()
fq_wds[w1] = fq_wds.get(w1,0) + v
for k in d:
w1, w2 = k.split()
d[k] = d[k] / float(fq_wds[w1])
return d
def get_shared_keys(self,d1, d2):
'''get the keys shared by two dictionaries d1, d2'''
keys = []
keys = [k for k in d1 if k in d2]
return keys
def get_shared_dicts(self,d1, d2):
'''get the dictionaries corresponding to a part of d1, d2 which share the same key '''
keys = self.get_shared_keys(d1,d2)
d1_shd={}
d2_shd={}
for k in keys:
d1_shd[k] = d1[k]
d2_shd[k] = d2[k]
return d1_shd,d2_shd
def get_diff_keys(self,d1, d2):
'''get the keys of d1 that are not in d2: key(d1)-key(d2)'''
keys = []
keys = [k for k in d1 if not k in d2]
return keys
def get_diff_dict(self,d1, d2):
'''get the difference of two dictionaries which corresponds to d1-d2 '''
keys = self.get_diff_keys(d1,d2)
d1_diff={}
for k in keys:
d1_diff[k] = d1[k]
return d1_diff
def get_dict_val_interval(self,d,min_val=0,max_val=100):
'''returns that part of dictionary d whose value is in the exclusive interval of (min_val max_val)'''
d_mv = {}
for k,v in d.items():
if (v > min_val and v < max_val):
d_mv[k]=v
return d_mv
def get_d1vsd2_sharedkeys(self, d1, d2):
d1_shd, d2_shd = self.get_shared_dicts(d1, d2)
keys = d1_shd.keys()
d1_values = []
d2_values = []
for k in keys:
d1_values.append(d1_shd[k])
d2_values.append(d2_shd[k])
return keys, d1_values, d2_values
def save_d(self, d, filename):
'''save a dictionary into a file'''
f = open(filename, 'w')
pickle.dump(d, f)
f.close()
def load_d(self, filename):
'''load a dictionary from a file'''
f = open(filename, 'r')
d = pickle.load(f)
f.close()
return d
def writetofile(self,d,filename,sort='value'):
f = open(filename,'w')
srtd_tuple = self.sort_dictionary(d,sort=sort)
for k,v in srtd_tuple:
f.write(str(v)+' '+k+'\n')
f.close()
def split_key(self,d,n=0):
'''split every key of a dictionary and create a dictionary with nth element of splitted key and its corresponding value
Example: d={'w1 k1':5, 'w1 k2':3, 'w2 k1':4}
n=0 => returns d1={'w1':8, 'w2':4}
n=1 => returns d1={'k1':9, 'k2':3}
'''
d1={}
for key,val in d.items():
key_list = key.split()
w = key_list[n]
d1[w] = d1.get(w,0)+val
return d1
def check_same_key(self, d1, d2):
#Check if dictionaries d1 and d2 have the same key
if sorted(d1) != sorted(d2):
raise Exception, "dictionaries don't still have the same probability space"
def lookup_dict(self,wordlist,ref_d):
#return the values of the corresponding wordlist in ref_d
p = []
for w in wordlist:
w = w.lower()
if w in ref_d:
p.append(float(ref_d[w]))
else:
p.append(0.0)
return p
def adjust_lineartransform(self,d1,d2):
#TODOl: where should I put this function
'''given linear relation between d1 and d2, this function linearly transforms the values in d1 such that , for example
d1[k]=400, d2[k]=800, this function returns d[k]=400*(400/800) '''
self.check_same_key(d1,d2)
d={}
for k in d1:
v1 = d1[k]
v2 = d2[k]
v = v1 * v1 * (1./v2)
d[k] = round(v)
return d
def lineartransform(self,d,a,b):
d_={}
for k,v in d.items():
v_= v*a + b
d_[k] = v_
return d_
def del_key_d(self,key,d):
'''delete a key from a dictionary '''
#d = copy.deepcopy(d_)
for k_d in d.keys():
pat = '\W'+key+'$'+'|'+'^'+key+'\W'
if re.search(pat,k_d, re.IGNORECASE):
del d[k_d]
return d
def del_keypair_d(self,key,d):
'''delete a key which is a pair from a dictionary '''
#d = copy.deepcopy(d_)
for k_d in d.keys():
if key == k_d:
del d[k_d]
return d
def merge_2dicts_disreg_ordkeypairs(self, d1, d2):
'''merge two dictionaries containing pair of words while the order of words is disregarded:
d1={'w1 w2':3, 'w2 w3':4}
d2={'w2 w1':12, 'w2 w3':4, 'w4 w5':5}
returns d_mrg={'w1 w2':15, 'w2 w3':4, 'w4 w5':5}
'''
d_mrg = {}
for pair in d1:
w1, w2 = pair.split()
rev_pair = w2 + ' ' + w1
if rev_pair in d2:
d_mrg[pair] = d1[pair] + d2[rev_pair]
else:
d_mrg[pair] = d1[pair]
for pair in d2:
w1, w2 = pair.split()
rev_pair = w2 + ' ' + w1
if not (pair in d_mrg or rev_pair in d_mrg):
d_mrg[pair] = d2[pair]
return d_mrg
#******************************************
class FileReader(object):
'''having functions to read different types of files that might correspond to different distributions'''
def __init__(self):
pass
#TODOl: adapting these functions to read several types of input files
def read_words_d_file_r(self, filename):
''' reads a file containing the distribution of words and returns a dictionary with words and freqs as its keys and values'''
f = open(filename,'r')
d = {}
for l in f.readlines():
fq,w = l.split()
d[w.lower()] = float(fq)
f.close()
return d
def read_pairs_d_file_r(self, filename):
''' reads a file containing the distribution of pairs and returns a dictionary with pairs and freqs as its keys and values'''
f = open(filename,'r')
d = {}
for l in f.readlines():
p,fq = l.split(':')
d[p.lower()] = float(fq)
f.close()
return d
def read_words_d_spcw_file_r(self, filename,w_list):
''' reads a file containing the distribution of words and only extracts the word from them'''
d = self.read_words_d_file(filename)
w_l = [w.lower() for w in w_list]
d1={}
#TODOl: make it pythonic list comprehension
for w in w_list:
if w in d:
d1[w] = d[w]
return d1
def read_pairs_d_spcw_file_r(self, filename, w_list):
'''reads a file containing all pair distiributions and only extracts the pairs having specific words, indicated in w_list, as their first word'''
d = self.read_pairs_d_file_r(filename)
w_l = [w.lower() for w in w_list]
d1={}
#TODOl: make it pythonic list comprehension
for w in w_list:
for k in d:
if k.startswith(w+' '):
d1[k] = d[k]
return d1
def read_words_d_file(self, filename):
''' reads a file containing the distribution of words and returns a dictionary with words and freqs as its keys and values'''
f = open(filename,'r')
d = {}
for l in f.readlines():
ws = l.split()
fq = ws[0]
w = ' '.join(ws[1:])
d[w.lower()] = float(fq)
f.close()
return d
def read_words_d_separate_files(self,wordfile,probfile):
'''reads two separate files containing words and their corresponding probs respectively.'''
wf = open(wordfile, 'r')
pf = open(probfile, 'r')
p_list = []
d = {}
for l in pf.readlines():
line = l.split()
p = float(line[0])
p_list.append(p)
print 'p_list done!'
pf.close()
idx = 0
for l in wf.readlines():
d[l] = p_list[idx]
idx = idx+1
wf.close()
return d
def read_words_d_spcw_file(self, filename,w_list):
''' reads a file containing the distribution of words and only extracts the word from them'''
d = self.read_words_d_file(filename)
w_l = [w.lower() for w in w_list]
d1={}
#TODOl: make it pythonic list comprehension
for w in w_list:
if w in d:
d1[w] = d[w]
return d1
def read_pairs_d_spcw_file(self, filename, w_list):
'''reads a file containing all pair distiributions and only extracts the pairs having specific words, indicated in w_list, as their first word'''
#d = self.read_pairs_d_file(filename)
d = self.read_words_d_file(filename)
w_l = [w.lower() for w in w_list]
d1={}
#TODOl: make it pythonic list comprehension
for w in w_list:
for k in d:
if k.startswith(w+' '):
d1[k] = d[k]
return d1
def cut_line_words_pos(self,filename_in, filename_out, wordlist, pos):
'''if a line contains a word of wordlist in the specified position, the line is cutted before that word and written in the output file '''
f_in = open(filename_in, 'r')
f_out = open(filename_out, 'w')
selected_lines = []
for l in f_in.readlines():
words_l = l.split()
if len(words_l)>pos:
for spcw in wordlist:
if (words_l[pos] == spcw):
a = ' '.join(words_l[:pos])
f_out.write(a+'\n')
break
def columnSelector (self, filename_in, filename_out, col_width, cond='greater'):
'''the python equivalent to the perl package ColumnSelector.pm
it selects sentences if their number of words are EQUAL/GREATER (cond) than COL_WIDTH
and it only returns the first COL_WIDTH of words of the selected sentence'''
f_in = open(filename_in, 'r')
f_out = open(filename_out, 'w')
for l in f_in.readlines():
words_l = l.split()
sent_sz = len(words_l)
if (cond == 'greater') and (sent_sz >= col_width):
a = ' '.join(words_l[:col_width])
f_out.write(a+'\n')
elif (cond == 'equal') and (sent_sz == col_width):
a = ' '.join(words_l[:col_width])
f_out.write(a+'\n')
f_in.close()
f_out.close()
#*******************************************
class DictPlotter(object):
'''plots dictionaries: values vs. keys'''
def __init__(self):
self.ud = UtilDict()
def plot_xy(self,x,y,fig=None,xlab='X',ylab='Y',title='X vs. Y',linestyle='bo'):
if not fig: fig = plt.figure(1)
width = 0.5
xlocations = np.array(range(len(x)))+width
#print 'xlocation:',xlocations
#TODOl Ali: solve yticks!
#plt.yticks(np.arange(0,0.005 ,0.001))
#if len(labels) < 40:
# plt.xticks(xlocations+ width/2, labels, rotation=70,fontsize=8)
plt.xlim(0, xlocations[-1]+width*2)
#plt.ylim(0, .5)
plt.plot(x,y,linestyle)
plt.xlabel(xlab, fontsize=24)
plt.ylabel(ylab,fontsize=24)
plt.title(title,fontsize=28)
return fig
def plot_d(self,d,sort='key',fig=None,xlab='word',ylab='Probability',title='Words Distribution'):
''' plots one dictionary which might be the distribution of words/pairs'''
if not fig: fig = plt.figure(1)
d_tuple = self.ud.sort_dictionary(d, sort)
print 'd:',d_tuple
labels, data = zip(*d_tuple)
width = 0.5
xlocations = np.array(range(len(data)))+width
#print 'xlocation:',xlocations
#TODOl Ali: solve yticks!
#plt.yticks(np.arange(0,0.005 ,0.001))
#if len(labels) < 40:
# plt.xticks(xlocations+ width/2, labels, rotation=70,fontsize=8)
plt.xticks(xlocations+ width/2, labels, rotation=70,fontsize=8)
plt.xlim(0, xlocations[-1]+width*2)
#plt.ylim(0, .5)
plt.bar(xlocations, data)
plt.xlabel(xlab, fontsize=24)
plt.ylabel(ylab,fontsize=24)
plt.title(title,fontsize=28)
#plt.text(60, .025, r'$\mu=100,\ \sigma=15$')
#plt.axis([40, 160, 0, 0.03])
plt.grid(True)
return fig
def plot_2d(self, d1,d2, fig=None, sort='key',xlab='word', ylab='Probability', title1='Distribution1',title2='Distribution2'):
'''plot two dictionaries, which might be the distributions of words or pairs, on the same figure'''
plt.ion()
fig = plt.figure(1)
#plt.subplot(211)
fig.add_axes([.1,.55,.8,.4]) # [left, bottom, width, height]
ax1=plt.gca()
self.plot_d(d1, sort=sort, fig=fig, xlab=xlab, ylab=ylab, title=title1)
ax1.set_xlabel('')
#plt.subplot(212)
fig.add_axes([.1,.07,.8,.4])
ax2=plt.gca()
self.plot_d(d2, sort=sort, fig=fig, xlab=xlab, ylab=ylab, title=title2)
#plt.show()
return fig
class RegressionAnalysis(object):
'''different regression methods'''
def __init__(self):
pass
def linreg(self,x,y):
t=[1, 2, 3, 4, 5]
xn=[1,3,2,4,5]
(a_s,b_s,r,tt,stderr)=stats.linregress(x,y)
print('Linear regression using stats.linregress')
print('regression: a=%.2f b=%.2f, std error= %.3f' % (a_s,b_s,stderr))
x=polyval([a_s,b_s],t)
plt.plot(t,xn,'bo')
plt.plot(t,x,'g-')
plt.legend(['original','regression'])
plt.xlim([-.5,5.5])
plt.ylim([-.5,5.5])
plt.show()
def avg_len(filename,max_sent_len=100):
'''average length of sentences in a file:
n_snt_ln_lst = [0 0 2 4 2 0 2]
p_ln_lst = [0 0 .2 .4 .2 0 .2]
l_avg = 2*.2 + 4*.4 + 2*.2 + 2*.2
'''
n_snt_ln_lst = get_len_list(filename,max_sent_len)
s = float(sum(n_snt_ln_lst))
p_ln_lst = [i/s for i in n_snt_ln_lst]
l_avg = 0
for idx,v in enumerate(p_ln_lst):
l_avg = l_avg + idx*v
return l_avg
def get_len_list(filename,max_sent_len=100):
'''gets a list having the information about the lengths of sentences in a file: len_list[3] indicates the number of sentences with the length of 3 '''
n_snt_ln_lst = [0*v for v in range(1,max_sent_len)]
f = open(filename,'r')
for l in f.readlines():
w_l = len(l.split())
n_snt_ln_lst[w_l] = n_snt_ln_lst[w_l] + 1
return n_snt_ln_lst
|
[
"sahar.pirmoradian@gmail.com"
] |
sahar.pirmoradian@gmail.com
|
bc223b451fec6f8d4802803c50b9f1f0a299af89
|
7912b6cccb6b4d9e7b1c1a4d3f196f8da352ff11
|
/computer_guesses_number.py
|
98b9d7f0e44557a6adf6365deb58b50b1a036b7a
|
[] |
no_license
|
panatale1/teaching
|
22090d4f3226f6d1a06b71458638dced4840af41
|
e25364aa43f31542ab105fdf44781d8da9fd4ff7
|
refs/heads/master
| 2020-03-17T06:50:54.189972
| 2019-07-27T19:45:00
| 2019-07-27T19:45:00
| 133,371,988
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 559
|
py
|
def guess_number(low, high):
if low == high:
return low
mid = (high + low) / 2
response = raw_input("Is your number {}?(yes/no) ".format(mid))
if response.lower()[0] == 'y':
return mid
response = raw_input("Is your number larger than {}?(yes/no) ".format(mid))
if response.lower()[0] == 'y':
return guess_number(mid+1, high)
return guess_number(low, mid-1)
print("Pick a number between 1 and 1,000,000")
raw_input("Hit Enter when you have it")
print("Your number is: {}".format(guess_number(1, 1000000)))
|
[
"panatale1@gmail.com"
] |
panatale1@gmail.com
|
145dd83ff13b43d5878b3eb0f4de0a6748d7ed10
|
ac47074bcf749273941ab01213bb6d1f59c40c99
|
/project/multi_factor/alpha_model/exposure/alpha_daily_tsrank9.py
|
5454e25583fa2b1f682220d7cd8d614d685f6e42
|
[] |
no_license
|
xuzhihua95/quant
|
c5561e2b08370610f58662f2871f1f1490681be2
|
c7e312c70d5f400b7e777d2ff4c9f6f223eabfee
|
refs/heads/master
| 2020-05-19T17:04:08.796981
| 2019-04-24T02:50:29
| 2019-04-24T02:50:29
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,911
|
py
|
import pandas as pd
from quant.stock.date import Date
from quant.stock.stock import Stock
from quant.project.multi_factor.alpha_model.exposure.alpha_factor import AlphaFactor
class AlphaDailyTsRank9(AlphaFactor):
"""
高频因子
ts_rank(rank(low),9)
首先计算每日股票最低价在股票池内的排名,再计算时间上述排名在最近9天的排名
"""
def __init__(self):
AlphaFactor.__init__(self)
self.exposure_path = self.data_path
self.raw_factor_name = 'daily_alpha_raw_ts_rank9'
def cal_factor_exposure(self, beg_date, end_date):
""" 计算因子暴露 """
term = 9
price_low = Stock().read_factor_h5("Price_Low_Adjust").T
price_low_rank = price_low.rank(axis=1)
date_series = Date().get_trade_date_series(beg_date, end_date)
result = pd.DataFrame([], index=date_series, columns=price_low.columns)
for i_date in range(len(date_series)):
data_end_date = date_series[i_date]
data_beg_date = Date().get_trade_date_offset(data_end_date, - term + 1)
data_period = price_low_rank.loc[data_beg_date: data_end_date, :]
if len(data_period) >= term:
print('Calculating factor %s at date %s' % (self.raw_factor_name, data_end_date))
ts_rank = data_period.rank().loc[data_end_date, :]
result.loc[data_end_date, :] = ts_rank
else:
print('Calculating factor %s at date %s is null' % (self.raw_factor_name, data_end_date))
res = result.dropna(how='all').T
self.save_alpha_factor_exposure(res, self.raw_factor_name)
if __name__ == '__main__':
from datetime import datetime
beg_date = '20060101'
end_date = datetime.today()
end_date = "20100101"
self = AlphaDailyTsRank9()
self.cal_factor_exposure(beg_date, end_date)
|
[
"1119332482@qq.com"
] |
1119332482@qq.com
|
e04bd468aa4464b615e74973bec338036d840146
|
6c7b19ad3119ca7f5f7e2e59102900faa06a268d
|
/uncle_lesson_ep6.py
|
22299036b7a251a8ebb065ed43028f107e4c17d1
|
[] |
no_license
|
pui-ml/expense
|
ad2a8f1d9788ff61b4bf7894af30fe6bde3d83a1
|
585f0a92b70a105bc2629e5c3116483b00a0fe41
|
refs/heads/main
| 2023-08-10T23:06:13.045386
| 2021-09-26T15:24:32
| 2021-09-26T15:24:32
| 409,151,866
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,998
|
py
|
from tkinter import *
from tkinter import ttk, messagebox
import csv
from datetime import datetime
import sys
GUI = Tk()
GUI.title("Expense Tracker")
GUI.geometry("500x500+200+100")
########### menu bar ###############
menu_bar = Menu(GUI)
GUI.config(menu=menu_bar)
# file menu
file_menu = Menu(menu_bar, tearoff=False)
menu_bar.add_cascade(label='File', menu=file_menu)
file_menu.add_command(label='Import CSV')
file_menu.add_command(label='Export CSV')
def About():
print('About Menu')
messagebox.showinfo('About', 'สวัสดี โปรแกรมนี้ทำงานดีมาก\nสนใจบริจาคได้')
# help menu
help_menu = Menu(menu_bar, tearoff=False)
menu_bar.add_cascade(label='Help', menu=help_menu)
help_menu.add_command(label='About', command=About)
# Donate menu
donate_menu = Menu(menu_bar, tearoff=False)
menu_bar.add_cascade(label='Donate', menu=donate_menu)
# Tab ------------------------------
Tab = ttk.Notebook(GUI)
T1 = Frame(Tab)
T2 = Frame(Tab)
Tab.pack(fill=BOTH, expand=1)
expenseicon = PhotoImage(file="wallet-icon.png").subsample(2)
listicon = PhotoImage(file="folder-flower-orange-icon.png").subsample(2)
Tab.add(T1, text=f'{"Add Expense":^50}', image=expenseicon, compound="top")
Tab.add(T2, text=f'{"Expense List":^50s}', image=listicon, compound="top")
# ----------- define frame ----------
F1 = Frame(T1)
# F1.place(x=100,y=20)
F1.pack()
days = {
"Mon": "จันทร์",
"Tue": "อังคาร",
"Wed": "พุธ",
"Thu": "พฤหัส",
"Fri": "ศุกร์",
"Sat": "เสาร์",
"Sun": "อาทิตย์",
}
# -------- define font ---------------
FONT1 = (None, 20)
FONT2 = (None, 10)
# Logo -----------------------------
centerimg = PhotoImage(file="cash-icon.png")
logo = ttk.Label(F1, image=centerimg)
logo.pack()
def new_form():
v_expense.set("")
v_price.set("")
v_amount.set("")
E1.focus()
# ------------ save to file ------------
def Save(event=None):
try:
expense = v_expense.get()
if expense == "":
raise ValueError("")
price = float(v_price.get())
if price <= 0:
raise ValueError("")
amount = float(v_amount.get())
if amount <= 0:
raise ValueError("")
total = amount * price
today = datetime.now().strftime("%a")
this_time = datetime.now().strftime(f"%Y-%m-%d {days[today]} %H:%M:%S")
text = f"รายการ: {expense} - ราคา: {price:.2f} บาท - จำนวน: {amount:.1f} ชิ้น - รวม: {total:.2f} บาท\n- เวลาทำรายการ {this_time}"
v_result.set(text)
# print(f'รายการ: {expense} - ราคา: {price:.2f} - จำนวน: {amount:.1f} - รวม: {total:.2f} - เวลาทำรายการ {this_time}')
new_form()
with open("savedata.csv", "a", encoding="utf-8", newline="") as f:
fw = csv.writer(f)
data = [expense, price, amount, total, this_time]
fw.writerow(data)
except Exception as e:
# messagebox.showerror('Error',e)
# messagebox.showwarning('Error','Please enter new data')
messagebox.showinfo("Error", " Please enter data ")
new_form()
update_table()
def terminate():
sys.exit(1)
GUI.bind("<Return>", Save)
# -------------- text1 start--------------
v_expense = StringVar()
L1 = ttk.Label(F1, text="รายการค่าใช้จ่าย", font=FONT2).pack()
E1 = ttk.Entry(F1, textvariable=v_expense, font=FONT2)
E1.pack()
# -------------- text1 end--------------
# -------------- text2 start--------------
v_price = StringVar()
L2 = ttk.Label(F1, text="ราคา (บาท)", font=FONT2).pack()
E2 = ttk.Entry(F1, textvariable=v_price, font=FONT2)
E2.pack()
# -------------- text2 end--------------
# -------------- text3 start--------------
v_amount = StringVar()
L3 = ttk.Label(F1, text="จำนวน (ชิ้น)", font=FONT2).pack()
E3 = ttk.Entry(F1, textvariable=v_amount, font=FONT2)
E3.pack()
# -------------- text3 end--------------
saveicon = PhotoImage(file="wallet-icon.png").subsample(2)
B1 = ttk.Button(
F1, text="บันทึก", command=Save, image=saveicon, compound="left", width=12
)
B1.pack(ipadx=20, ipady=8, pady=8)
B2 = ttk.Button(F1, text="ล้างข้อมูล", command=new_form, width=18)
B2.pack(ipadx=20, ipady=8, pady=4)
B3 = ttk.Button(F1, text="จบโปรแกรม", command=terminate, width=18)
B3.pack(ipadx=20, ipady=8, pady=4)
v_result = StringVar()
result = ttk.Label(F1, textvariable=v_result, font=FONT2, foreground="green")
result.pack(pady=10)
# tab 2 -------- read_csv
def read_csv():
with open("savedata.csv", newline="", encoding="utf-8") as f:
fr = csv.reader(f)
data = list(fr)
return data
# table ---
L = ttk.Label(T2, text='ตารางผลลัพธ์', font=FONT1).pack(pady=20)
header = ["รายการ", "ค่าใช้จ่าย", "จำนวน", "รวม", "วัน-เวลา"]
header_width = [120, 80, 80, 100, 120]
result_table = ttk.Treeview(T2, columns=header, show="headings", height=10)
result_table.pack(pady=20)
for head, h_width in zip(header, header_width):
result_table.heading(head, text=head)
result_table.column(head, width=h_width)
# result_table.insert('', 'end', value=['วันจันทร์', 30, 40, 50, 60])
def update_table():
result_table.delete(*result_table.get_children()) # delete ก่อน update
data = read_csv()
for d in data:
result_table.insert('', 0, value=d)
update_table()
GUI.bind("<Tab>", lambda x: E2.focus())
GUI.mainloop()
|
[
"noreply@github.com"
] |
pui-ml.noreply@github.com
|
39d1e88a3987aea0b3a27768b630796c94cb3239
|
e3f4e9e7252bec042fd97076faef0a94251a3201
|
/shopapi/routers/user.py
|
c7a34eac150d43acace6cc6a1b783351ae193ac6
|
[] |
no_license
|
eshopapi/python-api
|
95f64c2777867ce3d246d2e1aa6addd0039953a3
|
0d2cc82dba94384541be2507701b5b9b34634e5f
|
refs/heads/master
| 2023-03-26T11:44:47.631281
| 2021-03-27T00:59:25
| 2021-03-27T00:59:25
| 347,774,028
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,568
|
py
|
"""User-related API endpoints
"""
import logging
from typing import List
from fastapi import APIRouter, Depends
from shopapi.helpers import dependencies as deps, exceptions
from shopapi.schemas import models, schemas, api
from shopapi import actions
logger = logging.getLogger(__name__)
router = APIRouter(prefix="/user", tags=["Users"])
operator = actions.user.UserOperator()
@router.get("/me", response_model=schemas.UserToken)
async def user_info(user: schemas.UserToken = Depends(deps.get_user)):
"""Return user information"""
return user
@router.get("/role", response_model=schemas.Role, dependencies=[Depends(deps.get_user)])
async def user_role(role: schemas.Role = Depends(deps.get_user_role)):
"""Get user's role and return it"""
return role
@router.put("/role", dependencies=[Depends(deps.get_user)], response_model=api.RoleUpdateOut)
async def user_role_update(role_update: api.RoleUpdateIn, role: schemas.Role = Depends(deps.get_user_role)):
"""Update user's role.
Required permissions:
- `users.write`
- `roles.write`
"""
if not role.users.write or not role.roles.write:
raise exceptions.InsufficientPermissions("users.write, roles.write")
if not await models.Role.get(id=role_update.role_id):
raise exceptions.ResourceNotFound("role", role_update.role_id)
if not (user_db := await models.User.get(id=role_update.user_id)):
raise exceptions.ResourceNotFound("user", role_update.user_id)
user_db = await user_db.update_from_dict({"role_id": role_update.role_id})
await user_db.save()
await user_db.fetch_related("role")
return api.RoleUpdateOut.from_orm(user_db)
@router.get(
"/",
dependencies=[Depends(deps.get_user)],
response_model=List[api.UserUpdateOut],
)
async def user_list(
role: schemas.Role = Depends(deps.get_user_role),
common: deps.QueryParams = Depends(deps.query_params),
):
"""List all users
Required permissions:
- `users.read`
"""
return await operator.list(common, role)
@router.get("/{user_id}", response_model=api.UserUpdateOut)
async def user_get(
user_id: int, user: schemas.UserToken = Depends(deps.get_user), role: schemas.Role = Depends(deps.get_user_role)
):
"""Get user info
Required permissions:
- `users.read`
"""
if user.id == user_id:
return await operator.get(user_id)
return await operator.get(user_id, role)
@router.post("/")
async def user_create(user: api.UserUpdateIn, role: schemas.Role = Depends(deps.get_user_role)):
"""Create a user
Required permissions:
- `users.write`
"""
return await operator.create(user, role)
@router.delete("/{user_id}", dependencies=[Depends(deps.get_user)])
async def user_delete(user_id: int, role: schemas.Role = Depends(deps.get_user_role)):
"""Delete user.
Required permissions:
- `users.delete`
"""
return await operator.delete(user_id, role)
@router.put("/{user_id}", response_model=api.UserUpdateOut)
async def user_update(
user_id: int,
info: api.UserUpdateIn,
user: schemas.UserToken = Depends(deps.get_user),
role: schemas.Role = Depends(deps.get_user_role),
):
"""Update user in database specified with `user_id` with the data in `info`
Required permissions:
- `users.write`
"""
if user.id == user_id:
logger.info("Skipping role check, user editing themselves")
return await operator.update(user_id, info)
return await operator.update(user_id, info, role)
|
[
"info@tomasvotava.eu"
] |
info@tomasvotava.eu
|
6abff99c7fff4207e881dc321edeb8ebe67ca815
|
49eed26edb54813b3dce4dc41a512c57e2822b36
|
/plugins/rex_socketio_chat/ServerEngine/src/setup_engine.py
|
64a28ee202d5d64fe6b3572ac8c21c67b19a9022
|
[] |
no_license
|
JohnnySheffield/C2_Plugins_Rex
|
b41d2620c5fdd870f1f233ed243b3dbf96923070
|
c723550cb36540209fa71415dbfa91be328a139a
|
refs/heads/master
| 2021-01-18T13:29:58.342134
| 2013-04-28T01:01:23
| 2013-04-28T01:01:23
| 9,702,925
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,050
|
py
|
# setup.py
from distutils.core import setup
import py2exe
# We need to import the glob module to search for all files.
import glob
# Remove the build folder, a bit slower but ensures that build contains the latest
import shutil
shutil.rmtree("build", ignore_errors=True)
# my setup.py is based on one generated with gui2exe, so data_files is done a bit differently
includes = ['tornado',
'tornadio2',
'socket',
'sys',
'threading',
'time',
'math',
'random',
#'glob',
'zipfile',
'Queue',
'optparse',
're',
'xml.etree.ElementTree',
'urllib2',
'urlparse',
]
setup(console=["AppLoader.py",
{"script":"AppLoader.py"}],
options = {
'py2exe' : {
'includes' : includes,
'bundle_files' : 1,
}
},
zipfile = None,
)
|
[
"binhua.tein@gmail.com"
] |
binhua.tein@gmail.com
|
d0a03cd0e5641548a9237b8af49e0d2aed2252f2
|
6ba4546aaad6e468b86244d51619aabce58e3c63
|
/yqpass-v1.5/backend/resmanage/models.py
|
b6786510de58b2c2ba25cb218d8a8f483c25c201
|
[] |
no_license
|
LezBaishi/yqpass
|
3840d9f48f2809e8617002e5a8ff9492f10ec202
|
7131e418c01095ec0248a28afc06998cd2d7505c
|
refs/heads/master
| 2022-10-31T18:24:15.278290
| 2018-10-23T12:05:44
| 2018-10-23T12:05:44
| 140,539,352
| 1
| 1
| null | 2022-10-10T23:01:09
| 2018-07-11T07:39:25
|
Vue
|
UTF-8
|
Python
| false
| false
| 8,349
|
py
|
from django.db import models
__author__ = 'mc'
# Create your models here.
class BuildingInfo(models.Model):
"""
楼栋信息表
"""
building = models.CharField("楼栋", max_length=50)
creator = models.CharField('创建人', max_length=50, default='admin')
gmt_created = models.DateTimeField('创建时间', auto_now_add=True)
modifier = models.CharField('修改人', max_length=50, default='admin')
gmt_modified = models.DateTimeField('更新时间', auto_now=True)
is_deleted = models.BooleanField("已删除", default=False)
def __str__(self):
return self.building
class Meta:
verbose_name = "楼栋信息"
verbose_name_plural = "楼栋信息"
class BuildingRoomInfo(models.Model):
"""
楼栋机房信息表
"""
building_info = models.ForeignKey('BuildingInfo', on_delete=models.CASCADE, related_name='BuildingRoomInfos',
help_text='所在楼栋')
room = models.CharField("机房号", max_length=50, help_text='F表示楼层,R表示弱电间,S表示南边,N表示北边,-1表示负一层')
description = models.CharField("备注", max_length=100, help_text='有英文标注请详细说明')
creator = models.CharField('创建人', max_length=50, default='admin')
gmt_created = models.DateTimeField('创建时间', auto_now_add=True)
modifier = models.CharField('修改人', max_length=50, default='admin')
gmt_modified = models.DateTimeField('更新时间', auto_now=True)
is_deleted = models.BooleanField("已删除", default=False)
def __str__(self):
building_name = self.building_info.building
return building_name + '_' + self.room
class Meta:
verbose_name = "楼栋机房信息"
verbose_name_plural = "楼栋机房信息"
class OcableSection(models.Model):
"""
光缆段表
"""
bri_A = models.ForeignKey('BuildingRoomInfo', on_delete=models.CASCADE, related_name='OcableSectionsA',
help_text='A端楼栋机房号,A端排序规则:(1)不同楼宇:3.1--2.3--2.2--1.3--1.2--1.1--2.1--6.1--6.2--4.1--4.2--4.3--4.4.1--4.4.2--4.5.1--4.5.2--园内(指挥部/保安亭/变电站/足球场)--出局;'
+ '(2)相同楼宇:底层机房号数小的为A端,高层机房号数大为Z端。F表示楼层,R表示弱电间,S表示南边,N表示北边,-1表示负一。')
name_A = models.CharField('A端名称', max_length=50, unique=True,
help_text='命名格式参考:3.1栋101数据机房路由1-1或1.1栋1楼南塔弱电间')
bri_Z = models.ForeignKey('BuildingRoomInfo', on_delete=models.CASCADE, related_name='OcableSectionsZ',
help_text='Z端楼栋机房号,A端排序规则:(1)不同楼宇:3.1--2.3--2.2--1.3--1.2--1.1--2.1--6.1--6.2--4.1--4.2--4.3--4.4.1--4.4.2--4.5.1--4.5.2--园内(指挥部/保安亭/变电站/足球场)--出局;'
+ '(2)相同楼宇:底层机房号数小的为A端,高层机房号数大为Z端。F表示楼层,R表示弱电间,S表示南边,N表示北边,-1表示负一。')
name_Z = models.CharField('Z端名称', max_length=50, unique=True,
help_text='命名格式参考:3.1栋101数据机房路由1-1或1.1栋1楼南塔弱电间')
ocable_name = models.CharField('光缆段名称', max_length=100)
core_num = models.PositiveSmallIntegerField('纤芯数')
ocable_length = models.DecimalField('光缆长度(皮长公里)', max_digits=5, decimal_places=2)
SINGLE_MODE = '单模'
MULTIMODE = '多模'
OCABLE_TYPE_CHOICES = (
(SINGLE_MODE, '单模'),
(MULTIMODE, '多模'),
)
ocable_type = models.CharField('光缆类型', max_length=10, choices=OCABLE_TYPE_CHOICES, default=SINGLE_MODE)
GANXIAN = '干线'
BENDIWANG = '广州本地网'
YUANQU = '南方基地园区'
YUANQU2 = '南方基地园区(CMNET网络成端)'
OCABLE_LEVEL_CHOICES = (
(GANXIAN, '干线'),
(BENDIWANG, '广州本地网'),
(YUANQU, '南方基地园区'),
(YUANQU2, '南方基地园区(CMNET网络成端)'),
)
ocable_level = models.CharField('光缆等级', max_length=20, choices=OCABLE_LEVEL_CHOICES)
OFIBER_TYPE1 = 'G652'
OFIBER_TYPE2 = 'G655'
OFIBER_TYPE_CHOICES = (
(OFIBER_TYPE1, 'G652'),
(OFIBER_TYPE2, 'G655'),
)
ofiber_type = models.CharField('光纤型号', max_length=10, choices=OFIBER_TYPE_CHOICES)
notes = models.TextField('备注', null=True, blank=True)
creator = models.CharField('创建人', max_length=50, default='admin')
gmt_created = models.DateTimeField('创建时间', auto_now_add=True)
modifier = models.CharField('修改人', max_length=50, default='admin')
gmt_modified = models.DateTimeField('更新时间', auto_now=True)
is_deleted = models.BooleanField("已删除", default=False)
def __str__(self):
return self.ocable_name
class Meta:
verbose_name = "光缆段"
verbose_name_plural = "光缆段"
class OfiberCore(models.Model):
"""
纤芯表
"""
core_no = models.PositiveSmallIntegerField('纤芯序号', help_text='在该光缆段中的纤芯序号')
ocable_section = models.ForeignKey('OcableSection', on_delete=models.CASCADE, related_name='OfiberCores',
help_text='对应光缆段名称')
ocable_odf_A = models.CharField('A端光缆ODF', max_length=50,
help_text='命名格式参考:1.1栋负1楼ODF01-1-A1或3.1栋103机房G101-1-A1')
ocable_odf_Z = models.CharField('Z端光缆ODF', max_length=50,
help_text='命名格式参考:1.1栋负1楼ODF01-1-A1或3.1栋103机房G101-1-A1')
ocable_cor = models.CharField('光缆对应', max_length=100)
core_quality = models.CharField('纤芯质量', max_length=50, null=True, blank=True)
notes = models.TextField('备注', null=True, blank=True)
creator = models.CharField('创建人', max_length=50, default='admin')
gmt_created = models.DateTimeField('创建时间', auto_now_add=True)
modifier = models.CharField('修改人', max_length=50, default='admin')
gmt_modified = models.DateTimeField('更新时间', auto_now=True)
is_deleted = models.BooleanField("已删除", default=False)
def __str__(self):
return self.ocable_cor
class Meta:
verbose_name = "纤芯"
verbose_name_plural = "纤芯"
class RouteInfo(models.Model):
"""
路由信息表
根据两端业务设备信息生成电路路由-纤芯表关系
"""
circuit_num = models.CharField('电路编号', max_length=10, help_text='命名格式:F+四位数字')
ofiber_core = models.ForeignKey('OfiberCore', on_delete=models.CASCADE, related_name='oc_routeinfos',
help_text='对应光缆纤芯')
application_detail = models.ForeignKey('fielddiy.ApplicationDetail', on_delete=models.CASCADE,
related_name='ad_routeinfos', help_text='工单明细')
ONE = 'first'
TWO = 'second'
ROUTE_NO_CHOICES = (
(ONE, '1'),
(TWO, '2'),
)
route_no = models.CharField('路由序号(1或2)', max_length=10, choices=ROUTE_NO_CHOICES)
route_where = models.PositiveSmallIntegerField('路由位置')
ON = '在用'
OFF = '停闭'
STATE_CHOICES = (
(ON, '在用'),
(OFF, '停闭'),
)
state = models.CharField('状态', max_length=10, choices=STATE_CHOICES, default=ON)
creator = models.CharField('创建人', max_length=50, default='admin')
gmt_created = models.DateTimeField('创建时间', auto_now_add=True)
modifier = models.CharField('修改人', max_length=50, default='admin')
gmt_modified = models.DateTimeField('更新时间', auto_now=True)
is_deleted = models.BooleanField("已删除", default=False)
def __str__(self):
return self.circuit_num + '-' + str(self.ofiber_core)
class Meta:
verbose_name = "路由信息"
verbose_name_plural = "路由信息"
|
[
"645034308@qq.com"
] |
645034308@qq.com
|
4ceb1169b17f5ecb87cbe8ecf513094aca5757fb
|
9bcba8f3162eacea872dbadc9990a164f945f70a
|
/Packages/comandos/less_compile.py
|
ac8f39c84ef170e6388339bb635164f8cb30228a
|
[] |
no_license
|
programadorsito/Packages
|
a4cb568219dbc10a69e15a2832ef52d19eb83061
|
af748327f128ed90bb146dc12bb53b76ccb609fd
|
refs/heads/master
| 2021-01-10T04:41:38.676159
| 2016-04-06T07:52:45
| 2016-04-06T07:52:45
| 55,560,324
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 385
|
py
|
import sublime, os, platform
import sublime_plugin
import subprocess
import webbrowser
from subprocess import PIPE, Popen
class LessCompileCommand(sublime_plugin.TextCommand):
def run(self, edit):
window=sublime.active_window()
view=window.active_view()
view.run_command("ejecutar_comando_silencioso", {"comando":"lessc $file>$filename.css"})
|
[
"Mac@MacBook-de-Mac.local"
] |
Mac@MacBook-de-Mac.local
|
f5a916d3b329f603a68d398034bffd51f8825d94
|
d14c8419786129124c4a81a9f2f6ba963ada0f0b
|
/labs/lab05/tests/q2_4_3b.py
|
60ed30a13337d549a04a972bee75259718fe307b
|
[] |
no_license
|
ucsb-ds/ds1-w21-content
|
5ad4421ae106de2c2e3d9a9a22f3580cd26757b5
|
1ab3d43d3b66d7eb5774dab3ff23e0bc848241bd
|
refs/heads/main
| 2023-03-13T19:11:14.271885
| 2021-03-08T17:48:22
| 2021-03-08T17:48:22
| 329,216,349
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 345
|
py
|
test = { 'name': 'q2_4_3b',
'points': 1,
'suites': [ { 'cases': [{'code': '>>> (gave_away_all_guys > 0) and (gave_away_all_guys < 5)\nTrue', 'hidden': False, 'locked': False}],
'scored': True,
'setup': '',
'teardown': '',
'type': 'doctest'}]}
|
[
"swsolis@ucsb.edu"
] |
swsolis@ucsb.edu
|
717ab242e9964182526ed036becb8eea6cc752fe
|
63deaf056c59db72a1df338fddaa99453a4fbb6f
|
/test/test_connector.py
|
494b213949352d927cf752325ead0afd05b1498f
|
[
"Apache-2.0"
] |
permissive
|
aantti/haystack
|
60efb46b090fdaf3b00e14e9b16afb617107a7fe
|
6f6f2357fdef9e98850a7ffcd80f18b05aa7cfaf
|
refs/heads/master
| 2023-08-20T23:41:10.416841
| 2021-10-26T09:47:25
| 2021-10-26T09:47:25
| 339,123,743
| 0
| 1
|
Apache-2.0
| 2021-02-16T15:15:18
| 2021-02-15T15:39:59
| null |
UTF-8
|
Python
| false
| false
| 4,525
|
py
|
import json
from pathlib import Path
from re import search
import pytest
from haystack.connector import Crawler
def test_crawler_url_none_exception(tmp_path):
tmp_dir = tmp_path / "crawled_files"
with pytest.raises(ValueError):
Crawler(tmp_dir).crawl()
def test_crawler_depth(tmp_path):
tmp_dir = tmp_path / "crawled_files"
_url = ["https://haystack.deepset.ai/overview/get-started"]
crawler = Crawler(output_dir=tmp_dir)
doc_path = crawler.crawl(urls=_url, crawler_depth=0)
assert len(doc_path) == 1
_urls = [
"https://haystack.deepset.ai/overview/v0.8.0/get-started",
"https://haystack.deepset.ai/overview/v0.7.0/get-started",
"https://haystack.deepset.ai/overview/v0.6.0/get-started",
]
doc_path = crawler.crawl(urls=_urls, crawler_depth=0)
assert len(doc_path) == 3
doc_path = crawler.crawl(urls=_url, crawler_depth=1)
assert len(doc_path) > 1
for json_file in doc_path:
assert isinstance(json_file, Path)
with open(json_file.absolute(), "r") as read_file:
data = json.load(read_file)
assert 'content' in data
assert 'meta' in data
assert isinstance(data['content'], str)
assert len(data['content'].split()) > 2
def test_crawler_filter_urls(tmp_path):
tmp_dir = tmp_path / "crawled_files"
_url = ["https://haystack.deepset.ai/overview/v0.8.0/"]
crawler = Crawler(output_dir=tmp_dir)
doc_path = crawler.crawl(urls=_url, filter_urls=["haystack\.deepset\.ai\/overview\/v0\.9\.0\/"])
assert len(doc_path) == 0
doc_path = crawler.crawl(urls=_url, filter_urls=["haystack\.deepset\.ai\/overview\/v0\.8\.0\/"])
assert len(doc_path) > 0
doc_path = crawler.crawl(urls=_url, filter_urls=["google\.com"])
assert len(doc_path) == 0
def test_crawler_content(tmp_path):
tmp_dir = tmp_path / "crawled_files"
partial_content_match: list = [
{"url": "https://haystack.deepset.ai/overview/v0.7.0/intro",
"partial_content": ["What is Haystack",
"Utilize all transformer based models",
"a Retriever-Reader pipeline in order",
"Passing on only a small candidate set",
"fast indexing and querying",
"Fine-tune models to your own domain",
"smoothly switch when new ones get published"]},
{"url": "https://haystack.deepset.ai/overview/v0.7.0/use-cases",
"partial_content": ["Semantic Search System",
"Store your documents in the database of ",
"results are chosen based on compatibility in",
"Apply a set of standard questions to each document",
"Return a NO_ANSWER if a given document",
"like what is the revenue forecast for 2020?",
"overview of academic papers and internal business"]}]
crawler = Crawler(output_dir=tmp_dir)
for _dict in partial_content_match:
url: str = _dict['url']
partial_content: list = _dict['partial_content']
doc_path = crawler.crawl(urls=[url], crawler_depth=0)
assert len(doc_path) == 1
for json_file in doc_path:
assert isinstance(json_file, Path)
with open(json_file.absolute(), "r") as read_file:
content = json.load(read_file)
assert isinstance(content['content'], str)
for partial_line in partial_content:
assert search(partial_line, content['content'])
assert partial_line in content['content']
def test_crawler_return_document(tmp_path):
tmp_dir = tmp_path / "crawled_files"
_url = ["https://haystack.deepset.ai/docs/v0.5.0/intromd"]
crawler = Crawler(output_dir=tmp_dir)
docs_path = crawler.crawl(urls=_url, crawler_depth=1)
results, _ = crawler.run(urls=_url, crawler_depth=1, return_documents=True)
documents = results['documents']
for json_file, document in zip(docs_path, documents):
assert isinstance(json_file, Path)
assert isinstance(document, dict)
with open(json_file.absolute(), "r") as read_file:
file_content = json.load(read_file)
assert file_content['meta'] == document['meta']
assert file_content['content'] == document['content']
|
[
"noreply@github.com"
] |
aantti.noreply@github.com
|
c7d1a3d4c7e7cae5223344a1b29d7b2c57fc9ff2
|
23cda8710dc83ca1d26e8f9160e5c537871ef532
|
/blend.py
|
9c30792ef264212be4d5b698dc31313a6dd45d16
|
[] |
no_license
|
neeraj71/Recommender-system-EPFL
|
676f352ef6557609f637c9d122f36d53c3f97d64
|
ee930555e93ae7014485df5b97fd179f41bd2b42
|
refs/heads/master
| 2020-04-26T23:58:52.324859
| 2019-03-05T14:31:24
| 2019-03-05T14:31:24
| 173,920,380
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,278
|
py
|
import numpy as np
import os
import pickle
def blend_train(als=None, sgd=None, user__mean=None, knn_user=None, knn_item=None, svd=None, svdpp=None, nnmf=None, alpha=None, label=None):
"""
This function train blending model for our prediction of algorithms
:param als: is prediction array of als algrithm with shape of n*1
:param sgd: is prediction array of sgd algrithm with shape of n*1
:param user__mean: is prediction array of user__mean algrithm with shape of n*1
:param knn_user is prediction array of knn_user algrithm with shape of n*1
:param knn_item is prediction array of knn_item algrithm with shape of n*1
:param svd is prediction array of svd algrithm with shape of n*1
:param svdpp is prediction array of svd algrithm with shape of n*1
:param nnmf: is prediction array of nnmf algrithm with shape of n*1
:param alpha: the parameter for regualrize
:param label: the true raking
:return: weight w and RMSE of blending
"""
m_train = np.concatenate((als, sgd, user__mean, knn_user, knn_item, svd, svdpp, nnmf), axis=1)
y_train = lable
# Ridge Regression
w = np.linalg.solve(m_train.T.dot(m_train) + alpha * np.eye(m_train.shape[1]), m_train.T.dot(y_train))
y_predict_train = m_train.dot(w)
# Cut predictions that are too high and too low
for i in range(len(y_predict_train)):
y_predict_train[i] = min(5, np.round(y_predict_train[i]))
y_predict_train[i] = max(1, np.round(y_predict_train[i]))
return w, np.sqrt(np.mean((y_train - y_predict_train) ** 2))
# with open('../data/predictions/merge_prediction.csv', 'w') as output:
# output.write('Id,Prediction\n')
# for i, (row, col) in enumerate(nnz_test):
# output.write('r{}_c{},{}\n'.format(row + 1, col + 1, y_predict_test[i]))
def blend_pred(als, nnmf, w, lable=None, predict=False):
m_train = np.concatenate((als, nnmf), axis=1)
y_train = lable
y_predict_train = m_train.dot(w)
# Cut predictions that are too high and too low
for i in range(len(y_predict_train)):
y_predict_train[i] = min(5, np.round(y_predict_train[i]))
y_predict_train[i] = max(1, np.round(y_predict_train[i]))
return y_predict_train
|
[
"neeraj.yadav@epfl.ch"
] |
neeraj.yadav@epfl.ch
|
356e9ad348952f8b101d113adef955cd75ab0c98
|
c7e67a9c5e7b420adec05469945d523489cf36bc
|
/CalculIndicateurs.py
|
18e804871ebd2bf36f56cf4bf6d3f316443c9b03
|
[] |
no_license
|
nicolasjammaron/skyhawk
|
b8669950e7a8b207c61b8018c30eac37de583f30
|
c75bfc49730ed913d03ef03c9812c888d772efd9
|
refs/heads/master
| 2021-01-21T08:32:52.991034
| 2017-06-09T15:07:38
| 2017-06-09T15:07:38
| 91,631,574
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 938
|
py
|
import talib
class CalculIndicateurs :
def __init__(self):
super().__init__()
self.emaNumbers = [5,8,13]
self.data
self.emaDico = {'5':None,'8':None,'13':None}
self.macd = {'macd':None,'signal':None,'hist':None}
def updateMACD(self):
macdvalues = talib.MACD(self.data, )
self.macd['macd'] = macdvalues[0]
self.macd['signal'] = macdvalues[1]
self.macd['hist'] = macdvalues[2]
def updateIndicators(self):
for key in self.emaDico.keys():
self.emaDico[key] = talib.EMA(self.data,key)
self.updateMACD()
def printEMA(self):
print("EMAs : ")
for ema in self.emaDico:
print(ema)
def printMACD(self):
print("MACD :")
print(self.macd)
def updateData(self,Data):
self.data = Data
self.updateIndicators()
self.printEMA()
self.printMACD()
|
[
"nicolas.jammaron@ensimag.grenoble-inp.fr"
] |
nicolas.jammaron@ensimag.grenoble-inp.fr
|
4c83e71c46773026b582981edca312904a019d88
|
72c5105f2ba72d06899db5a557027daee7273f9b
|
/aproxy/__init__.py
|
6cefd260df678679a1e7767f67b70bf3d72d6a13
|
[
"MIT"
] |
permissive
|
FrodeHus/aproxy
|
2e8d3ee4e870256ac21087cc9edef54949d153f4
|
bcb7041f445a6ad8f32cddc56604661fbe4e0b46
|
refs/heads/master
| 2021-04-15T01:45:45.732899
| 2020-05-20T07:11:32
| 2020-05-20T07:11:32
| 249,284,213
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 63
|
py
|
import aproxy.version
__version__ = aproxy.version.__version__
|
[
"frode.hus@outlook.com"
] |
frode.hus@outlook.com
|
33982be657ad93c9136ef94cb9ff6b50e2c5d92d
|
ff6248be9573caec94bea0fa2b1e4b6bf0aa682b
|
/StudentProblem/10.21.9.51/3/1569573233.py
|
bec2553682ba5fd391a6449b21a128a3f4e8303c
|
[] |
no_license
|
LennartElbe/codeEvo
|
0e41b1a7705204e934ef71a5a28c047366c10f71
|
e89b329bc9edd37d5d9986f07ca8a63d50686882
|
refs/heads/master
| 2020-12-21T17:28:25.150352
| 2020-03-26T10:22:35
| 2020-03-26T10:22:35
| 236,498,032
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 672
|
py
|
import functools
import typing
import string
import random
import pytest
def schaltjahr(x: int):
"""Gibt an ob sich das eingegebene Jahr um ein Schaltjahr handelt.
Agrs:
int: das jahr
Return:
True wenn es stimmt ansonsten False
"""
if x // 4 or (x // 100 and not x // 400):
return True
else:
return False
######################################################################
## Lösung Teil 2 (Tests)
def test_schaltjahr():
assert(schaltjahr(1582)) == True
assert(schaltjahr(2006)) == True
assert(schaltjahr(2007)) == False
######################################################################
|
[
"lenni.elbe@gmail.com"
] |
lenni.elbe@gmail.com
|
6ee5673a5b8a8462eae94ba73cee74e3b013a4a5
|
810c20e76f6e5aabce8046379178bb8bf4b6252f
|
/shoeapp/migrations/0001_initial.py
|
838e8358e582af527ca89657255020cff78b7144
|
[
"MIT"
] |
permissive
|
pbuzzo/shoestore
|
ac8d96abc824c3656de6dc59131b5a648cd884bf
|
58d95f9eaa8452f051a72f407f0637ebd96a4ec6
|
refs/heads/master
| 2022-11-14T04:39:52.858524
| 2020-07-02T15:15:55
| 2020-07-02T15:15:55
| 272,740,283
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,250
|
py
|
# Generated by Django 3.0.7 on 2020-06-19 00:53
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Manufacturer',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('website', models.URLField()),
],
),
migrations.CreateModel(
name='ShoeColor',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('color_name', models.CharField(choices=[('red', 'red'), ('orange', 'orange'), ('yellow', 'yellow'), ('green', 'green'), ('blue', 'blue'), ('indigo', 'indigo'), ('violet', 'violet'), ('white', 'white'), ('black', 'black')], default='white', max_length=6)),
],
),
migrations.CreateModel(
name='ShoeType',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('style', models.CharField(max_length=30)),
],
),
migrations.CreateModel(
name='Shoe',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(default='unknown', max_length=50)),
('size', models.IntegerField()),
('brand_name', models.CharField(max_length=30)),
('material', models.CharField(max_length=30)),
('fasten_type', models.CharField(max_length=30)),
('color', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='shoeapp.ShoeColor')),
('manufacturer', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='shoeapp.Manufacturer')),
('shoe_type', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='shoeapp.ShoeType')),
],
),
]
|
[
"pbuzzo04@gmail.com"
] |
pbuzzo04@gmail.com
|
f605f8081c067463f69d9c75df0f55e1116d7458
|
0aea5cbe160131697297415ac526826f68d77b25
|
/python/tutorials/lists/lists_intro.py
|
324dba22fd3a6b0ff88b0a517dea3e6919ee50d6
|
[] |
no_license
|
Xenolithes/Algorithims
|
61df1ac4dbb2480c3d9d9423be7bad64cf7b4575
|
f4f9898e8e13c8e62328c504bc236f33c431e38c
|
refs/heads/master
| 2023-03-15T13:15:11.756664
| 2021-11-28T17:31:26
| 2021-11-28T17:31:26
| 228,446,345
| 0
| 0
| null | 2023-03-06T12:35:23
| 2019-12-16T18:08:37
|
Python
|
UTF-8
|
Python
| false
| false
| 404
|
py
|
computer_parts = ["computer", "monitor", "keyboard", "mouse", "mouse mat"]
for part in computer_parts:
print(part)
print(computer_parts[2])
print(computer_parts[:3])
print(computer_parts[-1])
# Strings and Lists are both sequences
# The big differeneces between strings and lists is
# Strings are immutable meaning they cannot be changed
# Lists are mutable, you can change the contents of a list
|
[
"evan.schweitzer.hratx44@gmail.com"
] |
evan.schweitzer.hratx44@gmail.com
|
ff8d003eb83c85ec20fd29307cfa44507cee252a
|
b6e569f75870c331f69b7d55ee8f8e3e4355f51c
|
/account/views.py
|
914f15b982625e3dd870db5b5671c177d0059982
|
[] |
no_license
|
begimai123/MYBLOG
|
e390b62efaa0e78060babdb3ce4c1fca95109dde
|
3ae47766af8e0bfdfeadf209c9a7902392cc206c
|
refs/heads/master
| 2023-04-13T02:13:21.094426
| 2021-04-27T04:53:37
| 2021-04-27T04:53:37
| 361,983,958
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,138
|
py
|
from django.shortcuts import render
from rest_framework import status
from rest_framework.authtoken.views import ObtainAuthToken
from rest_framework.generics import get_object_or_404
from rest_framework.response import Response
from rest_framework.views import APIView
from .models import MyUser
from .serializers import RegistrationSerializer, CustomLoginSerializer, CreateNewPasswordSerializer
from .utils import send_activation_email
class RegistrationView(APIView):
def post(self, request):
data = request.data
serializer = RegistrationSerializer(data=data)
if serializer.is_valid(raise_exception=True):
serializer.save()
return Response('Successfully registered', status=status.HTTP_201_CREATED )
return Response('Not valid', status=status.HTTP_400_BAD_REQUEST)
class ActivationView(APIView):
def get(self, request, activation_code):
user = get_object_or_404(MyUser,activation_code=activation_code)
user.is_active = True
user.activation_code = ''
user.save()
return Response('Successfully activated', status=status.HTTP_200_OK)
class LoginView(ObtainAuthToken):
serializer_class = CustomLoginSerializer
# api/v1/account/forgot_password/?email=jannelya@gmail.com
class ForgotPassword(APIView):
def get(self, request):
email = request.query_params.get('email')
print(email)
user = get_object_or_404(MyUser, email=email)
user.is_active = False
user.create_activation_code()
user.save()
send_activation_email(email=email, activation_code=user.activation_code, is_password=True)
return Response('На Вашу почту отправлен код активации', status=status.HTTP_200_OK)
class ForgotPasswordComplete(APIView):
def post(self, request):
data = request.data
serializer = CreateNewPasswordSerializer(data=data)
if serializer.is_valid(raise_exception=True):
serializer.save()
return Response('Вы успешно восстановили пароль', status=status.HTTP_200_OK)
|
[
"begimaidosieva1@gmail.com"
] |
begimaidosieva1@gmail.com
|
533dcf1322f8166627a2158712fc9355dd632898
|
d087f90c6e64cb74c9f61a1dbc807662896e7ce9
|
/Domains/Algorithms/sWAP cASE/main.py
|
4f612d828d504e95cd9361bfcd55369b65995e8f
|
[] |
no_license
|
youfeng243/hackerrank
|
e7c26e56f30731ee27ac17b19712362c53a3bb94
|
913ec6db9a861c9f97726dde24488c10db63d23c
|
refs/heads/master
| 2021-01-10T01:08:38.413900
| 2016-02-29T14:24:25
| 2016-02-29T14:24:25
| 49,575,420
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 465
|
py
|
'''
from __future__ import print_function
text = raw_input()
textlen = len(text)
for i in xrange(textlen):
if text[i].islower():
print (text[i].upper(),sep="",end="")
else:
print (text[i].lower(),sep="",end="")
for i in text:
if i.islower():
print (i.upper(),sep="",end="")
else:
print (i.lower(),sep="",end="")
'''
print (''.join([ i.lower() if i.isupper() else i.upper() for i in raw_input() ]))
|
[
"youfeng243@163.com"
] |
youfeng243@163.com
|
83195e00d218aa30e013968d9740532fab773cf2
|
d30b45d4f5abc90434970d3280cd70a3c8a70690
|
/bin/pip
|
7c804f5eb1f36b261b935011acbfd931893b3173
|
[] |
no_license
|
ivan2375/venv01blog
|
9883a5f796a9cc0e85368b78be5fd73cf86b6759
|
b696ace9310a08380e53ef82ced3c8eca7df1d9d
|
refs/heads/master
| 2021-01-19T17:49:57.767251
| 2017-08-22T17:48:08
| 2017-08-22T17:48:08
| 101,089,049
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 260
|
#!/Users/ivan.hsu/Documents/ProgrammingProjects/Python/Venv01/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from pip import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"ivan.hsu@IvanMBP15.local"
] |
ivan.hsu@IvanMBP15.local
|
|
e7e466f95b12b358ad6e38e3f1a752e07fb850ae
|
6bcd16a2468ccaa6f0abe2d534094727258e7023
|
/dateevent_daemon
|
46f289359df7cb8e42acb789cac700051a56ad6e
|
[] |
no_license
|
Cermit/DateEvent
|
939112de3293274e221f84a8d2b578e7ea2464cb
|
6111e259eecbd9719ad2893d9684177b61d7c3e5
|
refs/heads/master
| 2020-06-03T13:41:50.549888
| 2012-03-05T18:55:32
| 2012-03-05T18:55:32
| 2,715,796
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,800
|
#!/usr/bin/env python
#-*- coding: utf-8 -*-
import os
import sys
if os.path.exists('/usr/share/dateevent'):
sys.path.append("/usr/share/dateevent")
import ConfigParser
import subprocess
import dbus
import dbus.mainloop.glib
from sqlite3 import *
from time import time
from datetime import datetime
from eventfeed import EventFeedService, EventFeedItem
from PySide import QtCore
from PySide import QtGui
from daemon import Daemon
class CalEventDaemon(QtCore.QObject):
def __init__(self):
QtCore.QObject.__init__(self)
# set data
self.all_calendars = self.get_calendars()
self.calendar_names = self.get_calendar_names(self.all_calendars)
self.calendar_ids = self.get_calendar_ids(self.all_calendars)
self.choice_days_ahead = ['1','2','3','4','5','6','7','14','30']
self.choice_show_max_events = ['1','2','3','4','5','7','10']
# Config stuff
self.config = ConfigParser.ConfigParser()
if os.path.exists(os.path.expanduser('~/.config/dateevent.cfg')):
self.readconf()
else:
self.defaultconf()
self.readconf()
self.timer = QtCore.QTimer()
self.timer.setSingleShot(True)
#dbus
dbus.mainloop.glib.DBusGMainLoop(set_as_default=True)
self.bus = dbus.SessionBus()
try:
# Get the remote object
self.remote_object = self.bus.get_object("org.freedesktop.Tracker1",
"/org/freedesktop/Tracker1/Resources")
# Get the remote interface for the remote object
self.iface = dbus.Interface(self.remote_object, "org.freedesktop.Tracker1.Resources")
except dbus.DBusException:
print_exc()
sys.exit(1)
self.iface.connect_to_signal("GraphUpdated", self.calendar_db_changed)
self.timer.timeout.connect(self.update_neu)
self.update_feed(self.selected_dayamount)
#-------------------------------------------------------------------------
# config-methods
def defaultconf(self):
self.config.add_section('General')
self.config.set('General', 'selected_dayamount', '2')
self.config.set('General', 'selected_calendars', [])
self.config.set('General', 'next_event_on_top', 'True')
self.config.set('General', 'show_events_max', '5')
self.save_config()
def readconf(self):
self.config.readfp(open(os.path.expanduser(
'~/.config/dateevent.cfg'), 'rwb'))
self.selected_dayamount = int(self.config.get('General',
'selected_dayamount'))
self.selected_calendars = list(eval(self.config.get('General',
'selected_calendars')))
self.next_event_on_top = eval(self.config.get('General',
'next_event_on_top'))
self.show_events_max = int(self.config.get('General',
'show_events_max'))
def save_config(self):
with open(os.path.expanduser('~/.config/dateevent.cfg'), 'wb') as configfile:
self.config.write(configfile)
#----------------------------------------------------------------------------
def calendar_db_changed(self, arg1, arg2, arg3):
if arg1 =='http://www.semanticdesktop.org/ontologies/2007/04/02/ncal#Event':
print "kalender-db verändert, update event-screen"
self.update_feed(self.selected_dayamount)
def start(self, new_dayamount):
self.get_events(new_dayamount, self.selected_calendars, self.show_events_max)
def update_neu(self):
self.update_feed(self.selected_dayamount)
def get_calendars(self):
# verbindung zur sqlite-db
conn = connect("/home/user/.calendar/db")
curs = conn.cursor()
# SQL-Abfrage für alle Kalender
query_calendars = "SELECT CalendarId,Name,Color\
FROM Calendars\
WHERE modifiedDate > 1306879230"
# SQL-Abfrage durchführen und Ergebnisse ausgeben
curs.execute(query_calendars)
all_calendars = curs.fetchall()
conn.close()
return all_calendars
def get_calendar_names(self,calendarlist):
calendar_names = []
for calId, name, color in calendarlist:
calendar_names.append(name)
return calendar_names
def get_calendar_ids(self,calendarlist):
calendar_ids = []
for calId, name, color in calendarlist:
calendar_ids.append(calId)
return calendar_ids
def get_events(self, dayamount, calendar_ids, show_events_max):
# verbindung zur sqlite-db
conn = connect("/home/user/.calendar/db")
curs = conn.cursor()
# nächsten tage, die abgefragt werden sollen
days_ahead = int(self.choice_days_ahead[int(dayamount)])
#print show_events_max
# calendars
selected_calender = [self.all_calendars[i][0] for i in calendar_ids]
selected_calender = unicode("','".join(selected_calender))
# unix zeit von heute + 5 Sekunden wg Timer beim Daemon
unixtime_now = int(time()) + 5
print "in get_events:", unixtime_now
unixtime_in_days_ahead = unixtime_now + days_ahead*86400
# SQL-Abfrage der Events nur für die ausgewählten Kalender
# zweite Datestartabfrage für die faketime
query_events = "SELECT Summary, Location, DateStart, Notebook, DateStart FROM Components \
WHERE DateStart BETWEEN {0} AND {1}\
AND Notebook in ('{2}')\
AND DateDeleted = '0'".format(unixtime_now,
unixtime_in_days_ahead,
selected_calender)
curs.execute(query_events)
self.all_events = []
self.all_events = curs.fetchall()
self.all_events = sorted(self.all_events, key = lambda event: event[2])
print self.all_events
if self.next_event_on_top:
self.all_events = self.change_events_timeline(self.all_events)
for summary, location, datestart, cal, faketime in self.all_events[:self.show_events_max]:
calId = self.calendar_ids.index(cal)
faketime = datetime.fromtimestamp(faketime)
datestart = datetime.fromtimestamp(datestart)
day = int(str(datestart)[8:10])
self.feeder(summary, location, faketime, day, calId)
# Verbindung zur DB beenden
conn.close()
def change_events_timeline(self, events):
if events:
time_last_event = events[-1][2]
for i, event in enumerate(events):
events[i] = list(event)
time_event = event[2]
days_delta = (time_last_event - time_event)/86400
faketime = time_event + days_delta * 86400 + (len(events)-i)*86400
events[i][4] = faketime
return events
#--------------------------------------------------------------------------------------------------
def feeder(self, summary, location, datestart, day, calId):
calendarname = self.all_calendars[calId][1]
calendarcolor = self.all_calendars[calId][2]
service = EventFeedService('dateevent', 'DateEvent')
if os.path.exists('/usr/share/dateevent/img'):
icon = '/usr/share/dateevent/img/icon-l-calendar-{0}.png'.format(day)
else:
icon = '/home/user/MyDocs/dateevent/img/icon-l-calendar-{0}.png'.format(day)
item = EventFeedItem(icon,
u'<font color="{0}" size="-3">{1}</font>'.format(calendarcolor,calendarname),
datestart)
#gültiger Timestamp: datetime.datetime(2011, 11, 02, 8, 30, 0, 0)
item.set_body(u'<font size="+1">{0}</font>'.format(summary))
item.set_footer(location)
item.set_custom_action(self.on_item_clicked)
service.add_item(item)
#--------------------------------------------------------------------------------------------------
def update_feed(self, dayamount):
print "machen wir mal ein Update!"
service = EventFeedService('dateevent', 'DateEvent')
service.remove_items()
self.readconf()
self.get_events(dayamount,self.selected_calendars, self.show_events_max)
print "Zeit Jetzt:", time()
print "Zeit nächster Termin:", self.all_events[0][2]
print "Zeit bis zum nächsten Termin:" , self.all_events[0][2]-time()
if self.timer.isActive():
self.timer.stop()
print "bin hier"
self.timer.setInterval((self.all_events[0][2]-time())*1000)
self.timer.start()
#--------------------------------------------------------------------------------------------------
def on_item_clicked(self):
print 'the user clicked the item'
subprocess.Popen(['/usr/bin/organiser'])
#--------------------------------------------------------------------------------------------------
class MyDaemon(Daemon):
def run(self):
app = QtCore.QCoreApplication(sys.argv)
start = CalEventDaemon()
sys.exit(app.exec_())
if __name__ == "__main__":
daemon = MyDaemon('/tmp/daemon-dateevent.pid')
if len(sys.argv) == 2:
if 'start' == sys.argv[1]:
daemon.start()
elif 'stop' == sys.argv[1]:
daemon.stop()
elif 'restart' == sys.argv[1]:
daemon.restart()
else:
print "Unknown command"
sys.exit(2)
sys.exit(0)
else:
print "usage: %s start|stop|restart" % sys.argv[0]
sys.exit(2)
|
[
"boris@pohlers-web.de"
] |
boris@pohlers-web.de
|
|
95faae4c3d99f9fe7b0b38261ca6ed324dd109f9
|
4bae4a5ff3089ea478a536c6f9d10803f0a1058b
|
/poste/views.py
|
2a5ac69fb743f7c35c7969b6bd6486ebd002865a
|
[] |
no_license
|
mohamed-amine-maaroufi/social-network-django
|
db6a36bbb6519361ed5b44f5f91ec1f6615a9970
|
4c8fe63a7060427c172b7cede4c5a44258de10d1
|
refs/heads/master
| 2021-08-28T18:26:05.297170
| 2017-12-12T23:23:17
| 2017-12-12T23:23:17
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,773
|
py
|
from django.contrib.auth.decorators import login_required
from django.http import JsonResponse
from django.utils import timezone
from django.shortcuts import render, get_object_or_404
from django.shortcuts import render, redirect
# Create your views here.
from django.views.decorators.http import require_POST
from poste.forms import PostForm, CommentForm
from poste.models import Post
def post_list(request):
posts = Post.objects.filter(published_date__lte=timezone.now()).order_by('published_date')
return render(request, 'poste/post_list.html', {'section': 'posts','posts': posts})
def post_detail(request, pk):
post = get_object_or_404(Post, pk=pk)
return render(request, 'poste/post_detail.html', {'post': post})
def post_new(request):
if request.method == "POST":
form = PostForm(request.POST)
if form.is_valid():
post = form.save(commit=False)
post.author = request.user
post.published_date = timezone.now()
post.save()
return redirect('post_detail', pk=post.pk)
else:
form = PostForm()
return render(request, 'poste/post_edit.html', {'form': form})
def post_edit(request, pk):
post = get_object_or_404(Post, pk=pk)
if request.method == "POST":
form = PostForm(request.POST, instance=post)
if form.is_valid():
post = form.save(commit=False)
post.author = request.user
post.published_date = timezone.now()
post.save()
return redirect('post_detail', pk=post.pk)
else:
form = PostForm(instance=post)
return render(request, 'poste/post_edit.html', {'form': form})
def post_remove(request, pk):
post = get_object_or_404(Post, pk=pk)
post.delete()
return redirect('dashboard')
def add_comment_to_post(request, pk):
post = get_object_or_404(Post, pk=pk)
if request.method == "POST":
form = CommentForm(request.POST)
if form.is_valid():
comment = form.save(commit=False)
comment.post = post
comment.save()
return redirect('post_detail', pk=post.pk)
else:
form = CommentForm()
return render(request, 'poste/add_comment_to_post.html', {'form': form})
@login_required
@require_POST
def post_like(request):
post_id = request.POST.get('id')
action = request.POST.get('action')
if post_id and action:
try:
post = Post.objects.get(id=post_id)
if action == 'like':
post.users_like.add(request.user)
else:
post.users_like.remove(request.user)
return JsonResponse({'status':'ok'})
except:
pass
return JsonResponse({'status':'ko'})
|
[
"aminemaaroufi40@gmail.com"
] |
aminemaaroufi40@gmail.com
|
e6494ca65bec7c312419a5a0650ba71912aa229e
|
84e661d5d293ec0c544fedab7727767f01e7ddcf
|
/security/utils.py
|
38eb674978ff268071338f7811f0de31645f8423
|
[
"BSD-3-Clause"
] |
permissive
|
groundupnews/gu
|
ea6734fcb9509efc407061e35724dfe8ba056044
|
4c036e79fd735dcb1e5a4f15322cdf87dc015a42
|
refs/heads/master
| 2023-08-31T13:13:47.178119
| 2023-08-18T11:42:58
| 2023-08-18T11:42:58
| 48,944,009
| 21
| 23
|
BSD-3-Clause
| 2023-09-14T13:06:42
| 2016-01-03T11:56:48
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 698
|
py
|
from django.contrib.auth.password_validation import MinimumLengthValidator
class StaffMinimumLengthValidator(MinimumLengthValidator):
def __init__(self, staff_min_length=14,
other_min_length=9):
self.staff_min_length = staff_min_length
self.other_min_length = other_min_length
super().__init__(self.staff_min_length)
def validate(self, password, user=None):
if user and user.is_staff is True:
self.min_length = self.staff_min_length
else:
self.min_length = self.other_min_length
super().validate(password, user)
|
[
"nathangeffen@gmail.com"
] |
nathangeffen@gmail.com
|
7f5235ef055eb553258319be0bcf2c6a64bb75c9
|
c5d1c3754b46b1509177add5ac1a532832bd6322
|
/quickstart.py
|
d356df68cc8d27f748adcade53ef5991637d7563
|
[] |
no_license
|
aylanismello/boost-instagram
|
f2d3ee011a63bcec154b63d1e89d10542b4b0e12
|
bec82f95edcf6458cf8e7bfe5a19f5fd98807971
|
refs/heads/master
| 2022-07-16T13:33:05.655302
| 2020-05-12T02:11:55
| 2020-05-12T02:11:55
| 263,208,831
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,184
|
py
|
# https://github.com/timgrossmann/InstaPy/blob/master/DOCUMENTATION.md
# https://github.com/InstaPy/instapy-quickstart/tree/master/quickstart_templates
# Blocking likes issue: https://github.com/timgrossmann/InstaPy/issues/4609
import csv
import random, os, sys
from dotenv import load_dotenv
# imports
from instapy import InstaPy
from instapy import smart_run
from instapy import set_workspace
load_dotenv()
followlist = []
with open('los_angles_users_with_followings.csv') as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
for row in csv_reader:
new_follower = row[5]
followlist.append(new_follower)
followlist = followlist[::-1]
try:
user = sys.argv[1]
except:
print('Pass which user you want to run this on')
sys.exit()
headless_browser = True
print(f'headless browser? -> {headless_browser}')
# follow_list
insta_username = os.getenv(
"HY_USERNAME") if user == 'hy' else os.getenv("BC_USERNAME")
insta_password = os.getenv(
"HY_PASSWORD") if user == 'hy' else os.getenv("BC_PASSWORD")
cookie_path = os.getenv(
"HY_COOKIE_PATH") if user == 'hy' else os.getenv("BC_COOKIE_PATH")
if os.path.exists(cookie_path):
# removing the file using the os.remove() method
os.remove(cookie_path)
print(f'Removed cookie at {cookie_path}')
else:
# file not found message
print(f'File {cookie_path} not found in the directory')
disable_image_load = True
max_followers = 3000
min_followers = 50
min_following = 80
comments = []
burn_tags = [
"bossanova", "latintrap", "jazz",
"futurebeats", "futurejazz", "futurebaile", "ableton",
'mpb', 'chillbaile', 'funk150',
'beats', 'soulection', 'bailefunk', 'jdilla', 'producer', 'chillhop',
'afrobeat', 'kuduro', 'moombahton', 'globalclub'
]
secondary_tags = [ "pickupjazz", "pickupbeats", "bossanova", "latintrap", "jazz",
"futurebeats", "futurejazz", "futurebaile", "ableton", "djmix",
'mpb', 'chillbaile', 'funk150',
"djset", "soundcloud", "djproducer", 'bedroomproducer', 'producerlife'
'beats', 'soulection', 'bailefunk', 'ukg', 'jdilla', 'producer', 'chillhop',
'afrobeat', 'kuduro' ]
hy_tags = [
'abletonpush', 'abletonbeats', 'abletonpush2', 'bossanova', 'mpb', 'pickupjazz', 'pickupbeats', 'futurebeats', 'futuremusic', 'soulection', 'bailefunk',
'chillbaile', 'chillhop', 'electronica', 'lamusic', 'soundcloud', 'latinhouse', 'musicproducer', 'abletonlive', 'studiolife', 'beatmakers', 'electrocumbia'
]
tags = hy_tags if user == 'hy' else burn_tags
random.shuffle(tags)
dont_like_tags = [
'yoga', 'vegan', 'workout', 'sexy', 'nazi', 'whitesupremacy',
'kkk', 'exercise', 'food', 'techno', 'trance', 'confederate',
'psytrance', 'trancemusic', 'housemix',
'learnguitar', 'lessons'
]
likes_per_tag = 5
locations = ['212999109/los-angeles-california/']
# get an InstaPy session!
# set headless_browser=True to run InstaPy in the background
session = InstaPy(username=insta_username,
password=insta_password,
disable_image_load=disable_image_load,
headless_browser=headless_browser)
with smart_run(session):
# GENERAL SETTINGS
session.set_dont_like(dont_like_tags)
session.set_relationship_bounds(enabled=True,
delimit_by_numbers=True,
max_followers=max_followers,
min_followers=min_followers,
min_following=min_following)
session.set_delimit_liking(enabled=True, max_likes=500)
# Look around this user's profile and do shit
session.set_user_interact(amount=3, randomize=True,
percentage=69, media='Photo')
session.set_do_like(enabled=True, percentage=69)
# ACITIVTY
# START!
session.like_by_tags(tags, amount=likes_per_tag, interact=True)
# session.follow_by_list(followlist=followlist,
# times=1, sleep_delay=600, interact=True)
# session.like_by_feed(amount=100, randomize=True,
# unfollow=False, interact=True)
# session.like_by_locations(locations, amount=100)
|
[
"aylanismello@gmail.com"
] |
aylanismello@gmail.com
|
eef72f6b51880660e4b1ce6582030f3c8b68cbff
|
235746e7bdb0cc4b11e9eda6878a85828853b7c7
|
/PlayAssetDelivery/BundletoolScriptSample/generate_asset_pack.py
|
27e7f8222032aca7b08a7536af942d4b0effce68
|
[
"Apache-2.0"
] |
permissive
|
android/app-bundle-samples
|
0a156df874331f2588f867050c439c5f7808e7ee
|
a72d67aa462b3f081cc101437110ae9f91b403cc
|
refs/heads/main
| 2023-08-16T00:25:19.290817
| 2023-03-22T14:14:32
| 2023-03-22T14:17:46
| 206,879,574
| 710
| 360
|
Apache-2.0
| 2023-03-22T14:17:48
| 2019-09-06T21:54:59
|
C++
|
UTF-8
|
Python
| false
| false
| 5,775
|
py
|
#!/usr/bin/env python3
#
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Script to generate a valid asset pack from a given assets folder.
Instant delivery is not supported.
"""
import argparse
import distutils.dir_util
import os
import shutil
import sys
import tempfile
manifest_template = """<?xml version="1.0" encoding="utf-8"?>
<manifest xmlns:dist="http://schemas.android.com/apk/distribution" package="{package_name}" split="{asset_pack_name}">
<dist:module dist:type="asset-pack">
<dist:delivery>
<dist:{delivery_mode}/>
</dist:delivery>
<dist:fusing dist:include="true"/>
</dist:module>
</manifest>
"""
def parse_args() -> argparse.Namespace:
"""Parse input arguments."""
parser = argparse.ArgumentParser(
description="Generates a valid asset pack from a given assets folder",
add_help=True)
parser.add_argument(
"--packagename", required=True, help="Package name of the app")
parser.add_argument(
"--assetpackname", required=True, help="Name of the asset pack module")
parser.add_argument(
"--deliverymode",
required=True,
choices=["install-time", "fast-follow", "on-demand"],
help="Delivery mode of the asset pack module")
parser.add_argument(
"--assetsdir", required=True, help="Folder to read assets from")
parser.add_argument("--outdir", required=True, help="Output folder")
parser.add_argument(
"--overwrite",
required=False,
action="store_true",
help="Overwrite existing files")
return parser.parse_args()
def abs_expand_all(path: str) -> str:
return os.path.abspath(os.path.expandvars(os.path.expanduser(path)))
def get_assets_dir(args: argparse.Namespace) -> str:
assets_dir = abs_expand_all(args.assetsdir)
if (not (os.path.isdir(assets_dir) and os.access(assets_dir, os.X_OK) and
os.access(assets_dir, os.R_OK))):
print(
"Assets folder ({assets_dir}) is not accessible. Check permissions."
.format(assets_dir=assets_dir),
file=sys.stderr)
sys.exit(-1)
return assets_dir
def create_output_dir(args: argparse.Namespace) -> str:
"""Get the output directory."""
output_dir = abs_expand_all(args.outdir)
if not os.path.isdir(output_dir):
try:
os.makedirs(output_dir)
except OSError as e:
print(e, file=sys.stderr)
sys.exit(-1)
if (not (os.path.isdir(output_dir) and os.access(output_dir, os.X_OK) and
os.access(output_dir, os.W_OK))):
print(
"Output folder ({output_dir}) is not accessible. Check permissions."
.format(output_dir=output_dir),
file=sys.stderr)
sys.exit(-1)
return output_dir
def get_output_file_name(output_dir: str, args: argparse.Namespace) -> str:
output_file_name = os.path.join(output_dir, args.assetpackname)
if os.path.exists(output_file_name) and not args.overwrite:
print(
"Output file {output_file_name} exists. Specify --overwrite to bypass. Exiting."
.format(output_file_name=output_file_name))
sys.exit(-1)
return output_file_name
def make_manifest(package_name: str, asset_pack_name: str, delivery_mode: str,
pack_directory: str) -> None:
"""Generate the Android Manifest file for the pack."""
manifest = manifest_template.format(
package_name=package_name,
asset_pack_name=asset_pack_name,
delivery_mode=delivery_mode)
manifest_folder = os.path.join(pack_directory, "manifest")
try:
os.makedirs(manifest_folder)
except OSError as e:
print("Cannot create manifest folder. {e}".format(e=e), file=sys.stderr)
sys.exit(-1)
manifest_file_name = os.path.join(manifest_folder, "AndroidManifest.xml")
manifest_file = open(manifest_file_name, "w")
print(manifest, file=manifest_file)
manifest_file.close()
print("Generated {manifest}".format(manifest=manifest_file_name))
def copy_assets(src: str, dest: str) -> None:
"""Copy assets from one folder to another."""
assets_folder = os.path.join(dest, "assets")
try:
os.makedirs(assets_folder)
except OSError as e:
print("Cannot create assets folder. {e}".format(e=e), file=sys.stderr)
sys.exit(-1)
try:
distutils.dir_util.copy_tree(src, assets_folder)
except FileNotFoundError as e:
print(
"Cannot copy assets folder into temporary folder. {e}".format(e=e),
file=sys.stderr)
sys.exit(-1)
print(
"Copied assets into {assets_folder}".format(assets_folder=assets_folder))
def main():
args = parse_args()
assets_dir = get_assets_dir(args)
output_dir = create_output_dir(args)
output_file_name = get_output_file_name(output_dir, args)
with tempfile.TemporaryDirectory(dir=output_dir) as pack_dir:
print("Created temporary working folder: {pack_dir}".format(
pack_dir=pack_dir))
make_manifest(args.packagename, args.assetpackname, args.deliverymode,
pack_dir)
copy_assets(assets_dir, pack_dir)
output_pack_path = shutil.make_archive(
os.path.join(output_dir, output_file_name), "zip", pack_dir)
print("Asset pack is generated at {output_pack_path}.\nDone.".format(
output_pack_path=output_pack_path))
if __name__ == "__main__":
main()
|
[
"agiuglia@google.com"
] |
agiuglia@google.com
|
2756de0d21cdc922c2a96026b315a55640b2ece7
|
38941ac80bb2473baa8558d870daa53459688f35
|
/utils/config.py
|
63264514746eb4a8e452699b4875471da5b6c57a
|
[] |
no_license
|
waxiao1214/flask
|
15ce2a710322e4b288c444bef9e365492fdbb626
|
f2f8aa551a9236ed9c64bc27d62741b363c043a8
|
refs/heads/master
| 2022-04-06T00:28:58.500293
| 2020-02-05T17:47:34
| 2020-02-05T17:47:34
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,701
|
py
|
from solariat_bottle import settings
class SettingsProxy(object):
""" Simple proxy to combine solariat_bottle.settings with Flask.config
"""
def __init__(self, config):
self._config = config
def __getitem__(self, name):
null = object()
item = settings.get_var(name, null)
if item is null:
item = self._config[name]
return item
def __setitem__(self, name, item):
setattr(settings, name, item)
self._config[name] = item
def get(self, name, default=None):
null = object()
item = settings.get_var(name, null)
if item is null:
item = self._config.get(name, default)
return item
def iteritems(self):
keys = set(vars(settings)) | set(vars(self._config))
return ((key, self.get(key)) for key in keys)
def iterkeys(self):
return (key for key,_ in self.iteritems())
def itervalues(self):
return (val for _,val in self.itervalues())
def sync_with_keyserver():
from solariat.cipher.keystore import secure_storage, RemoteKeyServerError
get_var = settings.get_var
secure_storage.configure(
storage_proxy=SettingsProxy({}),
key_field='AES_KEY_256',
keyserver_config=get_var('KEYSERVER_CONFIG'))
try:
secure_storage.keyserver_sync()
except RemoteKeyServerError as e:
settings.LOGGER.critical(unicode(e), exc_info=True)
raise
if get_var('APP_MODE') == 'prod' \
and get_var('KEYSERVER_CONFIG', {}).get('remote_sync_required', True):
assert get_var('AES_KEY_256') != get_var('TEST_AES_KEY_256'), \
"The key was not updated"
|
[
"henryza1994@outlook.com"
] |
henryza1994@outlook.com
|
cde8bc25a08de424823ffe9ff194c2c48cdbd611
|
a9192511e74c524f1b8f16b186e683fd8e849e95
|
/4.2 shopping/shopping.py
|
e78ac9e2cb7c85bebe4984ed9fd277c26319823d
|
[] |
no_license
|
NguyenLeVo/cs80
|
32fd212b2c9ba7a421a2977e390ef5b89f83a9c4
|
677ae97e00040631f6954dca36e3e6f35d415572
|
refs/heads/master
| 2022-11-10T01:21:58.790057
| 2020-06-28T20:07:46
| 2020-06-28T20:07:46
| 275,659,018
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,637
|
py
|
# Write an AI to predict whether online shopping customers will complete a purchase.
# Supervised Learning - Classification
import csv
import sys
import calendar
from sklearn.model_selection import train_test_split
from sklearn.neighbors import KNeighborsClassifier
from sklearn.linear_model import Perceptron # Perceptron classification model
from sklearn import svm # Support Vector Machine, a classification model
TEST_SIZE = 0.4
def main():
# Check command-line arguments
if len(sys.argv) != 2:
sys.exit("Usage: python shopping.py data")
# Load data from spreadsheet and split into train and test sets
evidence, labels = load_data(sys.argv[1])
X_train, X_test, y_train, y_test = train_test_split(
evidence, labels, test_size=TEST_SIZE
)
# Train model and make predictions
model = train_model(X_train, y_train)
predictions = model.predict(X_test)
sensitivity, specificity = evaluate(y_test, predictions)
# Print results
print(f"Correct: {(y_test == predictions).sum()}")
print(f"Incorrect: {(y_test != predictions).sum()}")
print(f"True Positive Rate: {100 * sensitivity:.2f}%")
print(f"True Negative Rate: {100 * specificity:.2f}%")
def load_data(filename):
"""
Load shopping data from a CSV file `filename` and convert into a list of
evidence lists and a list of labels. Return a tuple (evidence, labels).
evidence should be a list of lists, where each list contains the
following values, in order:
- Administrative, an integer
- Administrative_Duration, a floating point number
- Informational, an integer
- Informational_Duration, a floating point number
- ProductRelated, an integer
- ProductRelated_Duration, a floating point number
- BounceRates, a floating point number
- ExitRates, a floating point number
- PageValues, a floating point number
- SpecialDay, a floating point number
- Month, an index from 0 (January) to 11 (December)
- OperatingSystems, an integer
- Browser, an integer
- Region, an integer
- TrafficType, an integer
- VisitorType, an integer 0 (not returning) or 1 (returning)
- Weekend, an integer 0 (if false) or 1 (if true)
labels should be the corresponding list of labels, where each label
is 1 if Revenue is true, and 0 otherwise.
"""
# Open the file
with open("shopping.csv") as file:
reader = csv.reader(file)
# Skip the header
next(reader)
# Load shopping data
data = []
for row in reader:
'''
You have an abbreviated month name, so use %b:
from datetime import datetime
datetime.strptime('Jan', '%b')
>>> 1
or
import calendar
abbr_to_num = {name: num for num, name in enumerate(calendar.month_abbr) if num}
abbr_to_num['Jan']
>>> 1
'''
# Convert month (11th column) to number:
# In the dataset, every month is abbreviated, except for June. So manually change June
if row[10] == 'June':
row[10] = 5
else:
abbr_to_num = {name: num for num, name in enumerate(calendar.month_abbr) if num}
# In the month.abbr, the length was 13, element 0 was empty, we want Jan to be 0 not 1 so minus 1
row[10] = abbr_to_num[row[10]] - 1
# Convert VisitorType (16th column)
if row[15] == 'Returning_Visitor':
row[15] = 1
elif row[15] == 'New_Visitor':
row[15] = 0
# 'Other' case
else:
row[15] = 0.5
# Convert Weekend (17th column)
'''
Shorter
row[16] = int(row[16] == 'TRUE')
row[16] = int(row[16] == 'FALSE')
'''
if row[16].upper() == 'TRUE':
row[16] = 1
elif row[16].upper() == 'FALSE':
row[16] = 0
# Convert Revenue (18th column)
'''
row[17] = int(row[17] == 'TRUE')
row[17] = int(row[17] == 'FALSE')
'''
if row[17].upper() == 'TRUE':
row[17] = 1
elif row[17].upper() == 'FALSE':
row[17] = 0
# Append these values into database
data.append({
# 1st key: Evidencea: list of lists of values from 1st to 17th data values
"evidence": [float(cell) for cell in row[:17]],
# 2nd key: Labels: a list of labels of whether there was revenue (18th column)
# If there's revenue, return 1. Else return 0
"label": 1 if row[17] == 1 else 0
})
# Set up evidence and labels elememts of tuple
evidence = [row["evidence"] for row in data]
labels = [row["label"] for row in data]
return evidence, labels
def train_model(evidence, labels):
"""
Given a list of evidence lists and a list of labels, return a
fitted k-nearest neighbor model (k=1) trained on the data.
"""
model = KNeighborsClassifier(n_neighbors=1)
model.fit(evidence, labels)
return model
# Use y_test to save time. No need to rewrite the evidence and labels tuple to training and testing data set
def evaluate(labels, predictions):
"""
Given a list of actual labels and a list of predicted labels,
return a tuple (sensitivity, specificty).
Assume each label is either a 1 (positive) or 0 (negative).
`sensitivity` should be a floating-point value from 0 to 1
representing the "true positive rate": the proportion of
actual positive labels that were accurately identified.
`specificity` should be a floating-point value from 0 to 1
representing the "true negative rate": the proportion of
actual negative labels that were accurately identified.
"""
'''
This is to predict whether the program's prediction matches the actual result (+ vs + and - vs -)
correct = (labels == predictions).sum()
incorrect = (labels != predictions).sum()
'''
'''
To check sensivity and specificity, we need to:
If actual result is true for revenue, we add it to the positive sum counter
If the predicted result is positive (true for revenue), and it matches the actual result, we add it to the
positive counter.
Vice versa with the negative.
'''
pos = 0
neg = 0
sum_pos = 0
sum_neg = 0
for actual, predicted in zip(labels, predictions):
if actual == 1:
sum_pos += 1
if actual == predicted:
pos += 1
else:
sum_neg += 1
if actual == predicted:
neg += 1
sensivity = pos / sum_pos
specificity = neg / sum_neg
return sensivity, specificity
if __name__ == "__main__":
main()
'''
Perceptron
Correct: 4319
Incorrect: 613
True Positive Rate: 21.21%
True Negative Rate: 99.38%
Support Vector Machine
Correct: 4163
Incorrect: 769
True Positive Rate: 1.54%
True Negative Rate: 100.00%
KNeighbor K=3
Correct: 4217
Incorrect: 715
True Positive Rate: 32.35%
True Negative Rate: 95.47%
KNeighbor K=1
Correct: 4073
Incorrect: 859
True Positive Rate: 39.97%
True Negative Rate: 90.20%
'''
|
[
"NguyenLeVo@github.com"
] |
NguyenLeVo@github.com
|
f59fbe13735d62fa7d62c13cfbf744ecdd25f38e
|
c2cbbbb5157cd780d6a2ceecd53f2fdf7b5726c9
|
/portfolio/models.py
|
0e8aa06f5b4773ba74c0f94e50956c408497d5b4
|
[] |
no_license
|
Efukuda/efsREST
|
d588151d99ca0292f19319f1a330e5d6503e52eb
|
6dda452fe78ad6e4a346ba79bee819175ed21846
|
refs/heads/master
| 2020-09-10T17:56:18.908468
| 2019-11-14T21:10:30
| 2019-11-14T21:10:30
| 221,787,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,366
|
py
|
from django.db import models
from django.utils import timezone
# Create your models here.
class Customer(models.Model):
name = models.CharField(max_length=50)
address = models.CharField(max_length=200)
cust_number = models.IntegerField(blank=False, null=False)
city = models.CharField(max_length=50)
state = models.CharField(max_length=50)
zipcode = models.CharField(max_length=10)
email = models.EmailField(max_length=200)
cell_phone = models.CharField(max_length=50)
created_date = models.DateTimeField(
default=timezone.now)
updated_date = models.DateTimeField(auto_now_add=True)
def created(self):
self.created_date = timezone.now()
self.save()
def updated(self):
self.updated_date = timezone.now()
self.save()
def __str__(self):
return str(self.cust_number)
class Investment(models.Model):
customer = models.ForeignKey(Customer, on_delete=models.CASCADE, related_name='investments')
category = models.CharField(max_length=50)
description = models.CharField(max_length=200)
acquired_value = models.DecimalField(max_digits=10, decimal_places=2)
acquired_date = models.DateField(default=timezone.now)
recent_value = models.DecimalField(max_digits=10, decimal_places=2)
recent_date = models.DateField(default=timezone.now, blank=True, null=True)
def created(self):
self.acquired_date = timezone.now()
self.save()
def updated(self):
self.recent_date = timezone.now()
self.save()
def __str__(self):
return str(self.customer)
def results_by_investment(self):
return self.recent_value - self.acquired_value
class Stock(models.Model):
customer = models.ForeignKey(Customer, on_delete=models.CASCADE, related_name='stocks')
symbol = models.CharField(max_length=10)
name = models.CharField(max_length=50)
shares = models.DecimalField (max_digits=10, decimal_places=1)
purchase_price = models.DecimalField(max_digits=10, decimal_places=2)
purchase_date = models.DateField(default=timezone.now, blank=True, null=True)
def created(self):
self.recent_date = timezone.now()
self.save()
def __str__(self):
return str(self.customer)
def initial_stock_value(self):
return self.shares * self.purchase_price
|
[
"efukuda@unomaha.edu"
] |
efukuda@unomaha.edu
|
716cb71d2926d73b6c87c5c97ecc84f2d03c90c9
|
474b327e28a0aebaff65b2d8156cd2fa83b2aad1
|
/python/index.py
|
4f8ce5d2306234626b017badfb26b107034ba3de
|
[] |
no_license
|
harishvenkatram/demo1
|
0e2889c7b132d880f1ef108078b0ba979999fa4d
|
4d1438d933a31b92b42d471d8c53f9c682797451
|
refs/heads/master
| 2022-12-04T02:44:35.926423
| 2020-08-24T03:29:06
| 2020-08-24T03:29:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 118
|
py
|
#!C:\Users\UMANG KEJRIWAL\AppData\Local\Programs\Python\Python38-32\python
print ()
import cgi
print("Umang")
|
[
"noreply@github.com"
] |
harishvenkatram.noreply@github.com
|
b120efee5779518f4a6c6d97bfd7c808094ce080
|
419a5e8678eb1788d026cde793d108010a6d620e
|
/Project1/fresh_tomatoes.py
|
9c6747a766fdc9dd3582487b5e41da7de4b3b365
|
[] |
no_license
|
thuzarwin/Udacity-Full-Stack-Web-Developer-Nanodegree
|
67065b9b8983f1ad39c9d2d612d54fcb83b8f1a5
|
7dd1409c6e0f5f7542a103cea062b76966482279
|
refs/heads/master
| 2021-06-08T05:15:24.236832
| 2016-10-04T04:53:35
| 2016-10-04T04:54:49
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,696
|
py
|
import webbrowser
import os
import re
import entertainment_center
# Styles and scripting for the page
main_page_head = '''
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8">
<title>Fresh Tomatoes!</title>
<!-- Bootstrap 3 -->
<link rel="stylesheet" href="https://netdna.bootstrapcdn.com/bootstrap/3.1.0/css/bootstrap.min.css">
<link rel="stylesheet" href="https://netdna.bootstrapcdn.com/bootstrap/3.1.0/css/bootstrap-theme.min.css">
<script src="http://code.jquery.com/jquery-1.10.1.min.js"></script>
<script src="https://netdna.bootstrapcdn.com/bootstrap/3.1.0/js/bootstrap.min.js"></script>
<style type="text/css" media="screen">
body {
padding-top: 80px;
}
#trailer .modal-dialog {
margin-top: 200px;
width: 640px;
height: 480px;
}
.hanging-close {
position: absolute;
top: -12px;
right: -12px;
z-index: 9001;
}
#trailer-video {
width: 100%;
height: 100%;
}
.movie-tile {
margin-bottom: 20px;
padding-top: 20px;
}
.movie-tile:hover {
background-color: #EEE;
cursor: pointer;
}
.scale-media {
padding-bottom: 56.25%;
position: relative;
}
.scale-media iframe {
border: none;
height: 100%;
position: absolute;
width: 100%;
left: 0;
top: 0;
background-color: white;
}
</style>
<script type="text/javascript" charset="utf-8">
// Pause the video when the modal is closed
$(document).on('click', '.hanging-close, .modal-backdrop, .modal', function (event) {
// Remove the src so the player itself gets removed, as this is the only
// reliable way to ensure the video stops playing in IE
$("#trailer-video-container").empty();
});
// Start playing the video whenever the trailer modal is opened
$(document).on('click', '.movie-tile', function (event) {
var trailerYouTubeId = $(this).attr('data-trailer-youtube-id')
var sourceUrl = 'http://www.youtube.com/embed/' + trailerYouTubeId + '?autoplay=1&html5=1';
$("#trailer-video-container").empty().append($("<iframe></iframe>", {
'id': 'trailer-video',
'type': 'text-html',
'src': sourceUrl,
'frameborder': 0
}));
});
// Animate in the movies when the page loads
$(document).ready(function () {
$('.movie-tile').hide().first().show("fast", function showNext() {
$(this).next("div").show("fast", showNext);
});
});
</script>
</head>
'''
# The main page layout and title bar
main_page_content = '''
<body>
<!-- Trailer Video Modal -->
<div class="modal" id="trailer">
<div class="modal-dialog">
<div class="modal-content">
<a href="#" class="hanging-close" data-dismiss="modal" aria-hidden="true">
<img src="https://lh5.ggpht.com/v4-628SilF0HtHuHdu5EzxD7WRqOrrTIDi_MhEG6_qkNtUK5Wg7KPkofp_VJoF7RS2LhxwEFCO1ICHZlc-o_=s0#w=24&h=24"/>
</a>
<div class="scale-media" id="trailer-video-container">
</div>
</div>
</div>
</div>
<!-- Main Page Content -->
<div class="container">
<div class="navbar navbar-inverse navbar-fixed-top" role="navigation">
<div class="container">
<div class="navbar-header">
<a class="navbar-brand" href="#">Fresh Tomatoes Movie Trailers</a>
</div>
</div>
</div>
</div>
<div class="container">
{movie_tiles}
</div>
</body>
</html>
'''
# A single movie entry html template
movie_tile_content = '''
<div class="col-md-6 col-lg-4 movie-tile text-center" data-trailer-youtube-id="{trailer_youtube_id}" data-toggle="modal" data-target="#trailer">
<img src="{poster_image_url}" width="220" height="342">
<h2>{movie_title}</h2>
</div>
'''
def create_movie_tiles_content(movies):
# The HTML content for this section of the page
content = ''
for movie in movies:
# Extract the youtube ID from the url
youtube_id_match = re.search(
r'(?<=v=)[^&#]+', movie.trailer_youtube_url)
youtube_id_match = youtube_id_match or re.search(
r'(?<=be/)[^&#]+', movie.trailer_youtube_url)
trailer_youtube_id = (youtube_id_match.group(0) if youtube_id_match
else None)
# Append the tile for the movie with its content filled in
content += movie_tile_content.format(
movie_title=movie.title,
poster_image_url=movie.poster_image_url,
trailer_youtube_id=trailer_youtube_id
)
return content
def open_movies_page(movies):
# Create or overwrite the output file
output_file = open('fresh_tomatoes.html', 'w')
# Replace the movie tiles placeholder generated content
rendered_content = main_page_content.format(
movie_tiles=create_movie_tiles_content(movies))
# Output the file
output_file.write(main_page_head + rendered_content)
output_file.close()
# open the output file in the browser (in a new tab, if possible)
url = os.path.abspath(output_file.name)
webbrowser.open('file://' + url, new=2)
# Create movies with pre-defined movies inside entertainment_center.py.
my_movies = entertainment_center.entertainment_center()
open_movies_page(my_movies)
|
[
"gerrywu@utexas.edu"
] |
gerrywu@utexas.edu
|
d9c4f7f505a201081de9b7f4da64e67fe884cecb
|
2231edd55bb0e23471ccd2f4c543e0b67f1aa678
|
/src/C3/3-8.py
|
9e9f1786cdc82fa6e4e0e97642f4b5c3b4fad3f7
|
[] |
no_license
|
johnhuzhy/StellaPythonExam
|
665d8f906be7cc973b66bc8a8f84d177ed87b178
|
52d4fb09da1e19fc825a59de1f9ab23c4dbe2409
|
refs/heads/master
| 2022-11-10T22:02:04.298075
| 2020-06-25T11:10:30
| 2020-06-25T11:10:30
| 265,241,177
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 308
|
py
|
#这个break结束的是第二层循环,第一层循环还会去执行1,2,3
#else语句是和第一层for搭配的,所以会出力else里的内容
b=[['apple','orange','banana','grape'],(1,2,3)]
for y in b:
for z in y:
if z=='orange':
break
print(z)
else:
print('fruit is gone')
|
[
"johnho560@gmail.com"
] |
johnho560@gmail.com
|
af5e2fb5643a8c3f17b041036b893ffe89a19e84
|
1e9e5f745e34b4e490a090fa658ecc9084b2142e
|
/cancertype_prediction/learn_and_predict_tcga_split_cancertypes_optim.py
|
f40ba083bb5062ae7661fd395cf65cde2feb74ef
|
[
"MIT"
] |
permissive
|
DPBayes/dp-representation-transfer
|
588ccf78c49dbcdfc941054254716cf120c6c66d
|
0c8389cc36659a7606bceac2531eaef7663ac49c
|
refs/heads/master
| 2020-04-30T21:11:51.418433
| 2019-03-22T13:48:19
| 2019-03-22T13:48:19
| 177,088,165
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 18,755
|
py
|
'''
Representation learning testing for TCGA data.
Set all parameters in this script & run with parameter "batch" or "local".
'''
import numpy as np
import logging
import datetime
import time
import math
from common import expInterp, ensure_dir_exists, pretty_duration
import batch
from types import SimpleNamespace
######################################################################
# SETUP
######################################################################
# set the following parameter values
# logging configuration
logging.basicConfig(level=logging.INFO)
# input dataset
#data_set = "TCGA_split_pub_geneexpr"
data_set = "TCGA_geneexpr"
data_type = 'rnaseq_tpm_rle_log2_gene_expressions'
target_set = "TCGA_cancertype"
target_type = 'cancer_types'
# list of cancer type pairs that will for the private dataset
# (and will be predicted/classified)
#priv_splits = [
# ["brain lower grade glioma", "glioblastoma multiforme"], # brain
# ["skin cutaneous melanoma", "uveal melanoma"], # melanoma (skin, eye)
# ["kidney chromophobe", "kidney clear cell carcinoma", "kidney papillary cell carcinoma"], # kidney
# ["lung adenocarcinoma", "lung squamous cell carcinoma"], # lung
#]
priv_splits = [
("lung squamous cell carcinoma", "head & neck squamous cell carcinoma"),
# ("kidney clear cell carcinoma", "kidney papillary cell carcinoma"),
# ("lung adenocarcinoma", "lung squamous cell carcinoma"),
# ("breast invasive carcinoma", "lung squamous cell carcinoma"),
# ("colon adenocarcinoma", "rectum adenocarcinoma"),
]
#priv_splits = 'all'
# size of the representation to be learned
#repr_dims = [2, 4, 8, 12, 16]
#repr_dims = [2, 4, 8, 16]
#repr_dims = [16]
repr_dim = 4
# RNG seeds
#seeds = [0, 1, 2, 3, 4]
#seeds = [0, 1, 2]
#seeds = [4]
seeds = [0]
## parameters for representation learning
#n_epochs = 1
n_epochs = 2000
deadline = None
#max_duration = None
#max_duration = datetime.timedelta(minutes=5)
max_duration = datetime.timedelta(hours=1)
batch_size = 64
normalize_data = False
#validation_split = 0
validation_split = 0.2
# fraction of data to use for prediction testing (instead of training)
pred_test_size = 0.3
early_stopping = True
#early_stopping = 'val_secondary_loss'
# logging settings (what to log)
log_weights = False
# droputs (note: must be multiples of 0.1)
#first_layer_dropout = 0.1; other_layer_dropouts = 0.2
#first_layer_dropout = 0.2; other_layer_dropouts = 0.4
# save predicted values (i.e. first encoded then decoded ) in to a file?
save_pred = False
## parameters for predictions
#scale_fun = "none"; scale_const = 1.0
#scale_fun = "norm_max"; scale_const = 1.01
#scale_fun = "dims_max"; scale_const = 1.0
scale_fun = "norm_avg"; scale_const = 1.0
#scale_fun = "dims_std"; scale_const = 1.0
#clip = "none"
clip = "norm"
#clip = "dims"
bounding_slack = 0.01
#private = False
#epsilon = np.inf
epsilon = 1.0
regularizer_strength = 0.1
## the parameters that will be optimized
domain = [
{'name': 'learning_rate_log10', 'type': 'continuous', 'domain': (-5,-1)},
{'name': 'n_hidden_layers', 'type': 'discrete', 'domain': [1, 2, 3]},
{'name': 'hidden_layer_size_mul_log10', 'type': 'continuous', 'domain': (0,4)},
]
# any constraints for the parameters that will be optimized
constraints = None
## parameters for parameter optimization
gpyopt_batch_size = 4
gpyopt_max_iter = 20
## other parameters
id_suffix = ""
##########################################################################################
# END OF SETUP
##########################################################################################
def cross_entropy(x, x_pred):
epsilon = 10e-8
x_pred = np.clip(x_pred, epsilon, 1.0 - epsilon)
return -np.average(np.sum(x * np.log(x_pred), axis=1))
def relative_cross_entropy(x, x_pred):
x_avg = np.average(x, axis=0)
return cross_entropy(x, x_pred) / cross_entropy(x, x_avg)
def mean_squared_error(x, x_pred):
return np.average((x - x_pred) ** 2)
def relative_mean_squared_error(x, x_pred):
mse = mean_squared_error(x, x_pred)
x_avg = np.average(x, axis=0)
return mse / np.average((x - x_avg) ** 2)
def get_params(params, domain):
p = dict()
for i, var in enumerate(domain):
p[var['name']] = params[i]
return SimpleNamespace(**p)
def run_optimization(args, domain, constraints, batch_size, max_iter):
logging.info('Starting parameter optimization...')
import GPyOpt
ensure_dir_exists("param_opt")
initial_design_type = 'random'
initial_design_numdata = batch_size
logging.info('Selecting initial parameters...')
space = GPyOpt.core.task.space.Design_space(domain, constraints)
params = GPyOpt.experiment_design.initial_design(initial_design_type, space, initial_design_numdata)
logging.info('Running...')
results = run_batch(args, params)
all_params = params
all_results = results
for i in range(max_iter):
print(all_params, flush=True)
print(all_results, flush=True)
logging.info('Selecting a new set of parameters...')
bo = GPyOpt.methods.BayesianOptimization(f=None,
domain = domain,
X = all_params,
Y = all_results,
acquisition_type = 'EI',
normalize_Y = True,
evaluator_type = 'local_penalization',
batch_size = batch_size,
acquisition_jitter = 0)
params = bo.suggest_next_locations()
logging.info('Running...')
results = run_batch(args, params)
all_params = np.vstack((all_params, params))
all_results = np.vstack((all_results, results))
np.save("param_opt/opt_params%s.npy" % id_suffix, all_params)
np.save("param_opt/opt_results%s.npy" % id_suffix, all_results)
def run_batch(args, params):
ensure_dir_exists("run_parameters")
params = [get_params(p, domain) for p in params]
np.save("run_parameters/params.npy", params)
assert len(params) == gpyopt_batch_size
args.wait = True
batch.run_tasks(args)
# get results
#return np.random.randn(gpyopt_batch_size, 1)
res = np.zeros((gpyopt_batch_size, 1))
for param_id in range(gpyopt_batch_size):
tot_res = 0
for priv_cancertypes in priv_splits:
data_name = '-'.join(priv_cancertypes).replace(' ', '_')
for seed in seeds:
full_model_id = "%s-%d-%s-s%d%s" % (data_name, repr_dim, param_id, seed, id_suffix)
filename = "param_opt/opt_result%s-%s.txt" % (id_suffix, full_model_id)
try:
tot_res += np.loadtxt(filename)
import os
os.remove(filename)
except:
logging.info('Warning, could not load "%s"' % filename)
res[param_id] = tot_res / (len(priv_splits) * len(seeds))
return -res
# the task function that is run with each argument combination
def task(args):
import pandas
param_id, priv_cancertypes, seed = args
logging.info("priv classes = %s, params_id = %s, seed = %d",
priv_cancertypes, param_id, seed)
#repr_dim, (alg_id, _, make_alg), seed = args
#logging.info("algorithm = %s, seed = %d", alg_id, seed)
# read the data sets
alg_id = param_id
logging.info("Loading parameters...")
params = np.load("run_parameters/params.npy")
params = params[param_id]
logging.info("Reading data...")
gene_expr = pandas.read_hdf("data/%s.h5" % (data_set), data_type)
logging.info(" * gene expression shape: %d x %d" % gene_expr.shape)
logging.info("Filtering out genes with low expressions...")
low_expr = (np.median(gene_expr, axis=0) < 0.0)
gene_expr = gene_expr.iloc[:,~low_expr]
logging.info(" * %d of %d remaining (%d removed)" %
(sum(~low_expr), low_expr.size, sum(low_expr)))
logging.info("Loading cancer types...")
cancer_type = pandas.read_hdf("data/%s.h5" % (target_set), target_type)
assert np.array_equal(gene_expr.index, cancer_type.index)
# split
logging.info("Splitting...")
priv = cancer_type.isin(priv_cancertypes)
logging.info(" * %d private samples, %d public samples (of %d total)" %
(sum(priv), sum(~priv), priv.size))
from common import categorical_to_binary
x_pub = gene_expr[~priv].as_matrix()
y_pub = cancer_type[~priv].cat.codes.as_matrix()
x_priv = gene_expr[priv].as_matrix()
y_priv = cancer_type[priv].cat.codes.as_matrix()
#y = categorical_to_binary(aux_target.values)
#num_classes = y.shape[1]
data_name = '-'.join(priv_cancertypes).replace(' ', '_')
# A hack to have a different seed if the algorithm is run multiple times
# with the same parameters. Destroys reproducibility...
import time
seed0 = int(time.time()*100) % (2**32)
# init rng
np.random.seed(seed0)
import torch
torch.manual_seed(seed0)
if torch.cuda.is_available() and torch.cuda.device_count() > 0:
torch.cuda.manual_seed(seed0)
##################################
# representation learning
#################################
x = x_pub
y = y_pub
# separate validation set if needed
val_x = None
#val_y = None
if validation_split:
logging.info("Splitting into training and validation sets")
from sklearn.model_selection import train_test_split
train_x, val_x, train_y, val_y = train_test_split(x, y, test_size=validation_split, random_state=0)
x, y = train_x, train_y
#m = x.shape[0]
#perm = np.random.permutation(m)
#x = x[perm,:]
#y = y[perm,:]
#split_point = int(validation_split * m)
#(val_x, x) = (x[:split_point,:], x[split_point:,:])
#(val_y, y) = (y[:split_point,:], y[split_point:,:])
logging.info(" * training set shape: %d x %d" % x.shape)
logging.info(" * validation set shape: %d x %d" % val_x.shape)
data_dim = x.shape[1]
logging.info(" * data shape after preprocessing: %d x %d" % x.shape)
logging.info("Learning the representaiton on public data...")
logging.info(" * learning a representation of size %d", repr_dim)
start_time = time.time()
# init the algorithm
#alg = make_alg(data_dim, repr_dim, num_classes)
#alg = make_alg(data_dim, repr_dim)
from models.vae_pytorch import VAE
alg = VAE().init(
input_dim = data_dim,
latent_dim = repr_dim,
#enc_dims = [],
enc_dims = [int(10 ** params.hidden_layer_size_mul_log10)*repr_dim] * int(params.n_hidden_layers),
dec_dims = 'same',
enc_activations = 'relu',
dec_activations = 'relu',
prediction_mean_activation = 'sigmoid',
prediction_var = 'gs',
prediction_log_var_min = math.log(0.01**2),
normalize_input_type = 'quantiles',
normalize_input_quantile = 0.05,
normalize_input_axis = 'global',
normalize_input_target = (0, 1),
normalize_input_clip = True,
optimizer = 'Adam',
optimizer_params = {'lr': 10.0 ** params.learning_rate_log10},
n_epochs = n_epochs,
early_stopping = True,
reduce_lr_on_plateau = False,
batch_size = batch_size)
# create output dir if does not exist
ensure_dir_exists('res')
full_model_id = "%s-%d-%s-s%d%s" % (data_name, repr_dim, alg_id, seed, id_suffix)
# define the progress saving function
progress_filename = 'res/progress-encdec-mse-%s.txt' % (full_model_id)
progress_file = open(progress_filename, 'w', encoding='utf-8')
#aux_progress_filename = 'res/progress-aux-ce-%s.txt' % (full_model_id)
#aux_progress_file = open(aux_progress_filename, 'w', encoding='utf-8')
if val_x is not None:
val_progress_filename = 'res/progress-encdec-validation-mse-%s.txt' % (full_model_id)
val_progress_file = open(val_progress_filename, 'w', encoding='utf-8')
#aux_val_progress_filename = 'res/progress-aux-validation-ce-%s.txt' % (full_model_id)
#aux_val_progress_file = open(aux_val_progress_filename, 'w', encoding='utf-8')
def save_progress():
x_pred = alg.decode(alg.encode(x))
rel_mse = relative_mean_squared_error(x, x_pred)
progress_file.write("%g\n" % rel_mse)
#aux_pred = alg.predict_secondary(x)
#aux_rel_ce = relative_cross_entropy(y, aux_pred)
#aux_progress_file.write("%g\n" % aux_rel_ce)
if val_x is not None:
val_x_pred = alg.decode(alg.encode(val_x))
rel_mse = relative_mean_squared_error(val_x, val_x_pred)
val_progress_file.write("%g\n" % rel_mse)
#val_aux_pred = alg.predict_secondary(val_x)
#aux_rel_ce = relative_cross_entropy(val_y, val_aux_pred)
#aux_val_progress_file.write("%g\n" % aux_rel_ce)
# fit to the training data
alg.learn(x, validation_data=val_x,
log_file_prefix=("log/%s" % (full_model_id)),
per_epoch_callback_funs=[save_progress],
deadline=deadline, max_duration=max_duration)
# test reconstruction error
x_pred = alg.decode(alg.encode(x))
rel_mse = relative_mean_squared_error(x, x_pred)
val_x_pred = alg.decode(alg.encode(val_x))
val_rel_mse = relative_mean_squared_error(val_x, val_x_pred)
logging.info(" * final error: rel_mse = %g, val_rel_mse = %g",
rel_mse, val_rel_mse)
elapsed = time.time() - start_time
logging.info(" * running time = %s", pretty_duration(elapsed))
# save model
#logging.info("Saving the learned model...")
#ensure_dir_exists('repr_models')
#alg.save("repr_models/%s" % (full_model_id))
##################################
# representation mapping
#################################
x = x_priv
y = y_priv
# get the representation
logging.info("Making the representation of private data...")
x_repr = alg.encode(x)
# test to predict the data itself
x_pred = alg.decode(x_repr)
rel_mse = relative_mean_squared_error(x, x_pred)
logging.info(" * reconstruct the data: rel_mse = %g", rel_mse)
ensure_dir_exists("res")
with open("res/private-encdec-rel_mse-%d-%s-%s-s%d%s.txt" %
(repr_dim, data_name, alg_id, seed, id_suffix),
'w', encoding='utf-8') as f:
f.write("%.6f\n" % rel_mse)
# save the representation
#logging.info("Saving the representation...")
#ensure_dir_exists("data_repr")
#np.savetxt("data_repr/repr-%s-%d-%s-s%d%s.csv" %
# (data_name, repr_dim, alg_id, seed, id_suffix),
# x_repr, delimiter=',')
##################################
# prediction
#################################
x = x_repr
# split train and test sets
logging.info("Splitting to train and test sets...")
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=pred_test_size, random_state=0)
logging.info(" * train samples: %d" % x_train.shape[0])
logging.info(" * test samples: %d" % x_test.shape[0])
# init rng
np.random.seed(seed0)
#print(np.amax(np.linalg.norm(x_train, axis=1)))
#print(np.mean(np.linalg.norm(x_train, axis=1)))
logging.info("Bounding the data to 1-sphere...")
if scale_fun == "norm_max":
logging.info(" * scale by max norm")
scale_factor = np.amax(np.linalg.norm(x_train, axis=1))
elif scale_fun == "dims_max":
logging.info(" * scale each dimension by max absolute value")
scale_factor = np.amax(np.abs(x_train), axis=0)
elif scale_fun == "norm_avg":
logging.info(" * scale by average norm")
scale_factor = np.mean(np.linalg.norm(x_train, axis=1))
elif scale_fun == "dims_std":
logging.info(" * scale each dimension by standard deviation")
scale_factor = np.std(x_train, axis=0)
elif scale_fun == "none":
scale_factor = 1.0
else:
assert False
x_train /= scale_factor * scale_const
x_test /= scale_factor * scale_const
#print(np.amax(np.linalg.norm(x_train, axis=1, keepdims=True)))
if clip == "norm":
logging.info(" * clip norms to max 1")
x_train /= np.maximum(np.linalg.norm(x_train, axis=1, keepdims=True) * (1 + bounding_slack), 1)
x_test /= np.maximum(np.linalg.norm(x_test, axis=1, keepdims=True) * (1 + bounding_slack),1)
elif clip == "dims":
assert False, "not implemented"
elif clip == "none":
logging.info(" * no clipping -> no bounding")
assert private == False #or np.isinf(epsilon)
else:
assert False
#for private in [False, True]:
for private in [True]:
# fit
logging.info("Fitting a model...")
if private:
logging.info(" * DP logistic regression: epsilon=%g, alpha=%g", epsilon, regularizer_strength)
from models.logistic_regression import DPLogisticRegression
model = DPLogisticRegression().init(repr_dim, classes=np.unique(y),
alpha=regularizer_strength, epsilon=epsilon)
else:
logging.info(" * logistic regression: alpha=%g", regularizer_strength)
from sklearn.linear_model import LogisticRegression
model = LogisticRegression(C=1/regularizer_strength)
model.fit(x_train, y_train)
#print(model.predict(x_test))
# compute mean accuracy on test set
logging.info("Testing the model...")
#acc = model.score(x_test, y_test)
from sklearn.metrics import accuracy_score
train_acc = accuracy_score(y_train, model.predict(x_train))
test_acc = accuracy_score(y_test, model.predict(x_test))
logging.info(" * train accuracy = %.6f", train_acc)
logging.info(" * test accuracy = %.6f", test_acc)
logging.info("Writing results to disk...")
ensure_dir_exists("res")
filename = ("res/cancertype-pred-accuracy-%d-%s-%s-s%d-%s-%d-%s%s.txt" %
(repr_dim, data_name, alg_id, seed, scale_fun, scale_const, clip,
("-e%g" % (epsilon) if private else "-nonpriv")))
logging.info(" * filename: %s", filename)
with open(filename, 'w', encoding='utf-8') as f:
f.write("%.6f\n" % test_acc)
filename = "param_opt/opt_result%s-%s.txt" % (id_suffix, full_model_id)
with open(filename, 'w', encoding='utf-8') as f:
f.write("%.6f\n" % test_acc)
########## MAIN ##########
def main():
param_ids = range(gpyopt_batch_size)
batch.init(task=task, args_ranges=(param_ids, priv_splits, seeds))
args = batch.parse_args()
if args.action == "task":
assert args.task is not None
id = int(args.task)
logging.info('Running task id %d...', id)
batch._run_task(id)
else:
run_optimization(args, domain, constraints, gpyopt_batch_size, gpyopt_max_iter)
# init and run
main()
# try to workaround a bug that tensorflow randomly throws an exception in the end
# this seems to be the same: https://github.com/tensorflow/tensorflow/issues/3745
# possibly also this: https://github.com/tensorflow/tensorflow/issues/3388
from sys import modules
if "keras.backend.tensorflow_backend" in modules:
import keras.backend
keras.backend.clear_session()
|
[
"mixheikk@LM8-MAT-01.local"
] |
mixheikk@LM8-MAT-01.local
|
734ccbece3793fccf093f76b9406ba61a8eecd2c
|
85f513c638304a04243ba361ae0c3cf59910bdbb
|
/indglobal_employee/models/hr_employee.py
|
502a39fd27a725b22ccc9a76e0e732327efe83f7
|
[] |
no_license
|
Raghupathy15/Posiflex
|
0c27058559755336a7a79f8c4cea3c3e06842431
|
9551dd52aa93f6b46258bd7cc07f05e2532b6d92
|
refs/heads/main
| 2023-01-22T19:53:17.344864
| 2020-12-09T09:34:36
| 2020-12-09T09:34:36
| 319,907,074
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,039
|
py
|
from datetime import datetime, timedelta
from odoo import models, fields,api, _
class HrEmployeePrivate(models.Model):
_inherit = 'hr.employee'
emp_id = fields.Char(string='Emp ID')
work_email = fields.Char('Work Email',required=True)
user_level = fields.Selection([('admin', 'Admin'),('sales_head', 'Sales Head'),('rsm','RSM'),('sm','SM'),
('partner','Partner'),('internal_sales_person','Internal-sales person')],string='CRM User Level',required=True)
parent_id = fields.Many2one('hr.employee', 'Sales Manager', domain="['|', ('company_id', '=', False), ('company_id', '=', company_id)]")
rsm_id = fields.Many2one('hr.employee', 'RSM', domain="['|', ('company_id', '=', False), ('company_id', '=', company_id)]")
sales_head_id = fields.Many2one('hr.employee', 'Sales Head', domain="['|', ('company_id', '=', False), ('company_id', '=', company_id)]")
company_ids = fields.Many2many('res.company', 'res_company_emp_rel', 'user_id', 'cid',
string='Allowed Companies', default=lambda self: self.env.company.ids)
@api.model
def create(self, vals):
res = super(HrEmployeePrivate, self).create(vals)
if res:
# create user when create employee
user_id = self.env['res.users'].create({
'name': res.name,
'login': res.work_email,
'email': res.work_email,
'sales_manager_id': res.parent_id.id,
'rsm_id': res.rsm_id.id,
'sales_head_id': res.sales_head_id.id,
'password': '123',
'groups_id': [(4,self.env.ref('base.group_user').id)]
})
user_id.write({'groups_id': [(6,0,[self.env.ref('hr.group_hr_user').id])]})
if res.user_level == 'admin':
user_id.write({'groups_id': [(4, self.env.ref('hr.group_hr_manager').id)]})
user_id.write({'groups_id': [(4, self.env.ref('sales_team.group_sale_manager').id)]})
user_id.write({'groups_id': [(4, self.env.ref('stock.group_stock_manager').id)]})
user_id.write({'groups_id': [(4, self.env.ref('purchase.group_purchase_manager').id)]})
user_id.write({'groups_id': [(4, self.env.ref('base.group_erp_manager').id)]})
if res.user_level == 'sales_head':
user_id.write({'groups_id': [(4, self.env.ref('hr.group_hr_manager').id)]})
user_id.write({'groups_id': [(4, self.env.ref('indglobal_employee.group_hr_sales_head').id)]})
user_id.write({'groups_id': [(4, self.env.ref('sales_team.group_sale_salesman').id)]})
user_id.write({'groups_id': [(4, self.env.ref('stock.group_stock_user').id)]})
user_id.write({'groups_id': [(4, self.env.ref('purchase.group_purchase_user').id)]})
if res.user_level == 'rsm':
user_id.write({'groups_id': [(4, self.env.ref('hr.group_hr_manager').id)]})
user_id.write({'groups_id': [(4, self.env.ref('indglobal_employee.group_hr_rsm').id)]})
user_id.write({'groups_id': [(4, self.env.ref('sales_team.group_sale_salesman').id)]})
user_id.write({'groups_id': [(4, self.env.ref('stock.group_stock_user').id)]})
user_id.write({'groups_id': [(4, self.env.ref('purchase.group_purchase_user').id)]})
if res.user_level == 'sm':
user_id.write({'groups_id': [(4, self.env.ref('hr.group_hr_manager').id)]})
user_id.write({'groups_id': [(4, self.env.ref('indglobal_employee.group_hr_sm').id)]})
user_id.write({'groups_id': [(4, self.env.ref('sales_team.group_sale_salesman').id)]})
user_id.write({'groups_id': [(4, self.env.ref('stock.group_stock_user').id)]})
user_id.write({'groups_id': [(4, self.env.ref('purchase.group_purchase_user').id)]})
if res.user_level == 'partner':
user_id.write({'groups_id': [(4, self.env.ref('hr.group_hr_manager').id)]})
user_id.write({'groups_id': [(4, self.env.ref('indglobal_employee.group_hr_partner').id)]})
user_id.write({'groups_id': [(4, self.env.ref('sales_team.group_sale_salesman').id)]})
user_id.write({'groups_id': [(4, self.env.ref('stock.group_stock_user').id)]})
user_id.write({'groups_id': [(4, self.env.ref('purchase.group_purchase_user').id)]})
if res.user_level == 'internal_sales_person':
user_id.write({'groups_id': [(4, self.env.ref('hr.group_hr_manager').id)]})
user_id.write({'groups_id': [(4, self.env.ref('indglobal_employee.group_hr_internal_sales_person').id)]})
user_id.write({'groups_id': [(4, self.env.ref('sales_team.group_sale_salesman').id)]})
user_id.write({'groups_id': [(4, self.env.ref('stock.group_stock_user').id)]})
user_id.write({'groups_id': [(4, self.env.ref('purchase.group_purchase_user').id)]})
res.user_id=user_id.id
return res
def write(self, values):
res = super(HrEmployeePrivate, self).write(values)
user_search = self.env['res.users'].search([('login', '=', self.work_email)])
if user_search:
user_details = self.env['res.users'].write({
'name': user_search.name,
'login': user_search.work_email,
'email': user_search.work_email,
'sales_manager_id': self.parent_id.id
})
user_search.write({'sales_manager_id':self.parent_id.id,'rsm_id':self.rsm_id.id,'sales_head_id':self.sales_head_id.id})
user_search.write({'groups_id': [(6,0, [self.env.ref('hr.group_hr_user').id])]})
if self.user_level == 'admin':
user_search.write({'groups_id': [(4, self.env.ref('hr.group_hr_manager').id)]})
user_search.write({'groups_id': [(4, self.env.ref('sales_team.group_sale_manager').id)]})
user_search.write({'groups_id': [(4, self.env.ref('stock.group_stock_manager').id)]})
user_search.write({'groups_id': [(4, self.env.ref('purchase.group_purchase_manager').id)]})
user_search.write({'groups_id': [(4, self.env.ref('base.group_erp_manager').id)]})
if self.user_level == 'sales_head':
user_search.write({'groups_id': [(4, self.env.ref('hr.group_hr_manager').id)]})
user_search.write({'groups_id': [(4, self.env.ref('indglobal_employee.group_hr_sales_head').id)]})
user_search.write({'groups_id': [(4, self.env.ref('sales_team.group_sale_salesman').id)]})
user_search.write({'groups_id': [(4, self.env.ref('stock.group_stock_user').id)]})
user_search.write({'groups_id': [(4, self.env.ref('purchase.group_purchase_user').id)]})
if self.user_level == 'rsm':
user_search.write({'groups_id': [(4, self.env.ref('hr.group_hr_manager').id)]})
user_search.write({'groups_id': [(4, self.env.ref('indglobal_employee.group_hr_rsm').id)]})
user_search.write({'groups_id': [(4, self.env.ref('sales_team.group_sale_salesman').id)]})
user_search.write({'groups_id': [(4, self.env.ref('stock.group_stock_user').id)]})
user_search.write({'groups_id': [(4, self.env.ref('purchase.group_purchase_user').id)]})
if self.user_level == 'sm':
user_search.write({'groups_id': [(4, self.env.ref('hr.group_hr_manager').id)]})
user_search.write({'groups_id': [(4, self.env.ref('indglobal_employee.group_hr_sm').id)]})
user_search.write({'groups_id': [(4, self.env.ref('sales_team.group_sale_salesman').id)]})
user_search.write({'groups_id': [(4, self.env.ref('stock.group_stock_user').id)]})
user_search.write({'groups_id': [(4, self.env.ref('purchase.group_purchase_user').id)]})
if self.user_level == 'partner':
user_search.write({'groups_id': [(4, self.env.ref('hr.group_hr_manager').id)]})
user_search.write({'groups_id': [(4, self.env.ref('indglobal_employee.group_hr_partner').id)]})
user_search.write({'groups_id': [(4, self.env.ref('sales_team.group_sale_salesman').id)]})
user_search.write({'groups_id': [(4, self.env.ref('stock.group_stock_user').id)]})
user_search.write({'groups_id': [(4, self.env.ref('purchase.group_purchase_user').id)]})
if self.user_level == 'internal_sales_person':
user_search.write({'groups_id': [(4, self.env.ref('hr.group_hr_manager').id)]})
user_search.write({'groups_id': [(4, self.env.ref('indglobal_employee.group_hr_internal_sales_person').id)]})
user_search.write({'groups_id': [(4, self.env.ref('sales_team.group_sale_salesman').id)]})
user_search.write({'groups_id': [(4, self.env.ref('stock.group_stock_user').id)]})
user_search.write({'groups_id': [(4, self.env.ref('purchase.group_purchase_user').id)]})
return res
|
[
"raghupathy@emxcelsolutions.com"
] |
raghupathy@emxcelsolutions.com
|
a7ba89bcca1697463abe9959caa34d1f86a6b2cb
|
e1d9897c9daa5067b2572b6cbe871bfc3193ecfe
|
/groups/admin.py
|
df270398b93798dec953106ed6f5aeb9634db460
|
[] |
no_license
|
andreas-andersen/wishlist
|
2816db7cf47ec07190c0423e5774b9fdcfc8bdaf
|
5fac4503485817f8a2b73f07aa6e56f73cba08ca
|
refs/heads/main
| 2023-07-01T05:55:31.298843
| 2021-08-10T18:06:01
| 2021-08-10T18:06:01
| 384,810,001
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 910
|
py
|
from django.contrib import admin
from django.contrib.auth.models import Group
from .forms import (
CustomGroupAdminForm,
AssignmentAdminForm,
AssignmentsAdminForm,
)
from .models import (
CustomGroup,
Assignment,
Assignments,
)
class CustomGroupAdmin(admin.ModelAdmin):
form = CustomGroupAdminForm
model = CustomGroup
filter_horizontal = ['permissions',]
list_display = ['name', 'leader', 'created', 'deadline',]
class AssignmentAdmin(admin.ModelAdmin):
form = AssignmentAdminForm
model = Assignment
list_display = ['member', 'assignment']
class AssignmentsAdmin(admin.ModelAdmin):
form = AssignmentsAdminForm
model = Assignments
list_display = ['group', 'time']
admin.site.unregister(Group)
admin.site.register(CustomGroup, CustomGroupAdmin)
admin.site.register(Assignment, AssignmentAdmin)
admin.site.register(Assignments, AssignmentsAdmin)
|
[
"andreas.andersen@gmail.com"
] |
andreas.andersen@gmail.com
|
5054475bccaf0879e8a054dc1c76c35c7c5437a1
|
fdf66fa44ebfb24c138ad9cd9c14ec4850f696cb
|
/ladim_plugins/sedimentation/ibm.py
|
4d887aa9e9f6b720c9d999301bc19f2dc1c77529
|
[
"MIT"
] |
permissive
|
mcarvajalino/ladim_plugins
|
71ffb62abe4a1829fbf9670da7dad9c6d9e498ba
|
a83ca24f41400e42d2beaddad455fdfdb7a3e1cd
|
refs/heads/master
| 2022-11-24T13:34:49.970967
| 2020-07-20T14:25:27
| 2020-07-20T14:25:27
| 278,305,441
| 0
| 0
|
MIT
| 2020-07-09T08:18:36
| 2020-07-09T08:18:35
| null |
UTF-8
|
Python
| false
| false
| 4,848
|
py
|
import numpy as np
class IBM:
def __init__(self, config):
# Time before a particle is taken out of the simulation [seconds]
self.lifespan = config['ibm']['lifespan']
# Vertical mixing [m*2/s]
self.D = config['ibm']['vertical_mixing'] # 0.001 m2/s -- 0.01 m2/s (?)
self.taucrit = config['ibm'].get('taucrit', None)
self.vertical_diffusion = self.D > 0
# Store time step value to calculate age
self.dt = config['dt']
# Reference to other modules
self.grid = None
self.forcing = None
self.state = None
def update_ibm(self, grid, state, forcing):
self.grid = grid
self.forcing = forcing
self.state = state
self.resuspend()
self.diffuse()
self.sink()
self.bury()
self.kill_old()
def resuspend(self):
if self.taucrit is None:
return
ustar = self.shear_velocity_btm()
tau = shear_stress_btm(ustar)
resusp = tau > self.taucrit
self.state.active[resusp] = True
def bury(self):
grid = self.grid
a = self.state.active != 0
X, Y, Z = self.state.X[a], self.state.Y[a], self.state.Z[a]
# Define which particles have settled to the bottom and which have not
H = grid.sample_depth(X, Y) # Water depth
at_seabed = Z > H
Z[at_seabed] = H[at_seabed]
# Store new vertical position
self.state.Z[a] = Z
self.state.active[a] = ~at_seabed
def diffuse(self):
# Get parameters
a = self.state.active != 0
x, y, z = self.state.X[a], self.state.Y[a], self.state.Z[a]
h = self.grid.sample_depth(x, y)
# Diffusion
b0 = np.sqrt(2 * self.D)
dw = np.random.randn(z.size).reshape(z.shape) * np.sqrt(self.dt)
z1 = z + b0 * dw
# Reflexive boundary conditions
z1[z1 < 0] *= -1 # Surface
below_seabed = z1 > h
z1[below_seabed] = 2*h[below_seabed] - z1[below_seabed]
# Store new vertical position
self.state.Z[a] = z1
def sink(self):
# Get parameters
a = self.state.active != 0
z = self.state.Z[a]
w = self.state.sink_vel[a] # Sink velocity
# Euler scheme, no boundary conditions
self.state.Z[a] = z + self.dt * w
def kill_old(self):
state = self.state
state.age += state.dt
state.alive = state.alive & (state.age <= self.lifespan)
def shear_velocity_btm(self):
# Calculates bottom shear velocity from last computational layer
# velocity
# returns: Ustar at bottom cell
x = self.state.X
y = self.state.Y
h = self.grid.sample_depth(x, y)
u_btm, v_btm = self.forcing.velocity(x, y, h, tstep=0)
U2 = u_btm*u_btm + v_btm*v_btm
c = 0.003
return np.sqrt(c * U2)
def shear_stress_btm(ustar):
rho = 1000
return ustar * ustar * rho
def ladis(x0, t0, t1, v, K):
"""
Lagrangian Advection and DIffusion Solver.
Solve the diffusion equation in a Lagrangian framework. The equation is
dc/dt = - grad (vc) + div (K grad c),
where c is concentration, v is 3-dimensional velocity, K is a diagonal
tensor (i.e. main axes oriented along coordinate axes) of diffusion.
This is translated to a stochastic differential equation of the form
dx = (v_x + d/dx K_xx) * dt + sqrt(2*K_xx) * dw_x,
dy = (v_y + d/dy K_yy) * dt + sqrt(2*K_yy) * dw_y,
dz = (v_z + d/dz K_zz) * dt + sqrt(2*K_zz) * dw_z,
where x, y, z is the spatial position, K_xx, K_yy, K_zz are the diagonal
elements of K, and dw_x, dw_y, dw_z are Wiener process differential elements
with zero mean and stdev = sqrt(dt).
Algorithm:
Operator splitting: Diffusion first, then advection. Diffusion is solved
using the gradient-free backwards Itô scheme, according to LaBolle
(2000, 10.1029/1999WR900224).
:param x0: An N x M vector of initial values, where N is the number of
particles and M is the number of coordinates.
:param t0: The initial time.
:param t1: The end time.
:param v: The velocity. A function (x, t) --> x-like.
:param K: The diagonal elements of the diffusion tensor.
A function (x, t) --> x-like.
:return: An x0-like array of the new particle positions.
"""
dt = t1 - t0
# --- Diffusion, LaBolle scheme ---
# First diffusion step (predictor)
b0 = np.sqrt(2 * K(x0, t0))
dw = np.random.randn(x0.size).reshape(x0.shape) * np.sqrt(dt)
x1 = x0 + b0 * dw
# Second diffusion step (corrector)
b1 = np.sqrt(2 * K(x1, t0))
x2 = x0 + b1 * dw
# --- Advection, forward Euler ---
a3 = v(x2, t0)
x3 = x2 + a3 * dt
return x3
|
[
"paal.naeverlid.saevik@hi.no"
] |
paal.naeverlid.saevik@hi.no
|
4166a69a49c224c29b3c6e48f4a46bf539ccb102
|
3daaa4fca3c829d4654c3cd1fe0fc9215c8d31ee
|
/build/env/bin/pycolor
|
3fa70750c308cfd16d35d81a0267b67a049b31d6
|
[
"Apache-2.0"
] |
permissive
|
yongshengwang/hue
|
0f8e41793ae8c1165860846fd2edb599e70a35a1
|
7d2a85f4269e8a3b5fd817bf52db80c8c98c76c7
|
refs/heads/master
| 2021-01-17T08:59:49.760232
| 2015-09-17T00:56:59
| 2015-09-17T00:56:59
| 42,576,621
| 0
| 0
| null | 2015-09-16T09:05:36
| 2015-09-16T09:05:36
| null |
UTF-8
|
Python
| false
| false
| 321
|
#!/home/tiny/hue/build/env/bin/python2.7
# EASY-INSTALL-ENTRY-SCRIPT: 'ipython==0.10','console_scripts','pycolor'
__requires__ = 'ipython==0.10'
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.exit(
load_entry_point('ipython==0.10', 'console_scripts', 'pycolor')()
)
|
[
"wysnight@gmail.com"
] |
wysnight@gmail.com
|
|
bb495c4beaa3e9dc52378f1884975061abdacc69
|
bc0a3e754509416b33501deb272801c119842c2d
|
/pur_beurre/functional_tests/tests.py
|
6029c076e1dcc51ec286cd5b8cb6a74ed66effef
|
[] |
no_license
|
oliviernes/oc_P8
|
88374759eafb1f3f35e35cfc205c8ec38ca90a74
|
4671b8ff76ba2a7d99fd043772249ca2778ae11f
|
refs/heads/master
| 2023-08-11T23:11:53.304025
| 2020-10-25T19:19:39
| 2020-10-25T19:19:39
| 248,483,352
| 0
| 0
| null | 2021-09-22T19:35:51
| 2020-03-19T11:16:04
|
CSS
|
UTF-8
|
Python
| false
| false
| 8,389
|
py
|
"""Functional tests"""
import time
from django.test import LiveServerTestCase
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
class NewVisitorTest(LiveServerTestCase):
"""Class testing a new user visiting the Pur Beurre website"""
fixtures = ["dumpy_content_fixtures"]
def setUp(self):
self.browser = webdriver.Firefox()
def tearDown(self):
self.browser.quit()
def test_user_story(self):
"""Test the story of an user inside Pur Beurre app"""
# Mell has heard about a website to get healthier products. She goes
# to check out its homepage
self.browser.get(self.live_server_url)
time.sleep(1)
# She notices the form bar in the header and in the center of the page
self.assertIn("Pur", self.browser.title)
bar_text = self.browser.find_element_by_tag_name("a").text
self.assertIn("Pur", bar_text)
# She is invited to enter a product item straight away
inputbox = self.browser.find_element_by_name("query")
self.assertEqual(inputbox.get_attribute("placeholder"), "Chercher")
# She types "Nutella" into the text box in the middle of the screen
inputboxs = self.browser.find_elements_by_name("query")
inputbox_center = inputboxs[1]
self.assertEqual(
inputbox_center.get_attribute("placeholder"), "Produit"
)
inputbox_center.send_keys("Nutella")
# When she hits enter, the page updates, and now the page lists
# healthier products of the same category.
inputbox_center.send_keys(Keys.ENTER)
time.sleep(1)
prod_text = self.browser.find_element_by_tag_name("h4").text
self.assertIn("Nocciolata", prod_text)
# She selects a product and get a new page with the detail
# of the product
link = self.browser.find_element_by_xpath(
'//a[@href="/product/8001505005592"]'
)
link.click()
time.sleep(1)
# She enters a new product in the textbox in the top of the screen.
inputbox = self.browser.find_element_by_name("query")
self.assertEqual(inputbox.get_attribute("placeholder"), "Chercher")
inputbox.send_keys("Véritable petit beurre")
inputbox.send_keys(Keys.ENTER)
time.sleep(1)
# The page updates and show a new list of healthier products.
# Mell try to save the first product. She click on the
# save link but as she is not connected, the link send
# her on the login page:
save_texts = self.browser.find_elements_by_tag_name("h4")
self.assertIn("Sauvegarder", save_texts[1].text)
save_texts[1].click()
time.sleep(1)
# She tries to connect. She enters her email
# and a wrong password. Then she clicks on the login button.
signup_link = self.browser.find_element_by_id("signup")
self.assertIn("un compte!", signup_link.text)
username = self.browser.find_element_by_id("id_username")
password = self.browser.find_element_by_id("id_password")
username.send_keys("mell2010@gmail.com")
password.send_keys("XXXXX")
password.send_keys(Keys.ENTER)
time.sleep(1)
# The system inform her to try again:
# Then, she click on the signup button because she doesn't
# have an account:
signup = self.browser.find_element_by_id("signup")
signup.click()
time.sleep(1)
# The system display the signup form. She enters her
# account information:
username = self.browser.find_element_by_id("id_username")
first_name = self.browser.find_element_by_id("id_first_name")
email = self.browser.find_element_by_id("id_email")
password1 = self.browser.find_element_by_id("id_password1")
password2 = self.browser.find_element_by_id("id_password2")
username.send_keys("Mell2010")
first_name.send_keys("Mell")
email.send_keys("mell2010@gmail.com")
password1.send_keys("monsupermdp1234")
password2.send_keys("monsupermdp1234")
signup_title = self.browser.find_element_by_id("signup_title")
self.assertEqual("Création de compte:", signup_title.text)
button = self.browser.find_elements_by_tag_name("button")
button[1].click()
time.sleep(1)
# The app display Mell's account page. She is now connected.
# She search again the product "Véritable petit beurre"
# in the search bar.
inputbox = self.browser.find_element_by_name("query")
self.assertEqual(inputbox.get_attribute("placeholder"), "Chercher")
inputbox.send_keys("Véritable petit beurre")
inputbox.send_keys(Keys.ENTER)
time.sleep(1)
# She tries now to save the second product ('Biscuit raisin')
save_texts = self.browser.find_elements_by_tag_name("h4")
self.assertEqual("Biscuit raisin", save_texts[2].text)
save_texts[3].click()
# A page inform her of the recording of the substitute and
# display a table with her substitutes recorded. The table is
# empty as it's her first product recorded.
caption = self.browser.find_element_by_tag_name("h4")
self.assertIn("Vos substituts enregistrés", caption.text)
time.sleep(2)
# She looks for an other substitute of 'Véritable petit beurre':
inputbox = self.browser.find_element_by_name("query")
self.assertEqual(inputbox.get_attribute("placeholder"), "Chercher")
inputbox.send_keys("Véritable petit beurre")
inputbox.send_keys(Keys.ENTER)
# She try to save the same substitute than the last recording:
time.sleep(2)
save_texts = self.browser.find_elements_by_tag_name("h4")
self.assertEqual("Biscuit raisin", save_texts[2].text)
save_texts[3].click()
# Now the app inform her that the product is already recorded
# in the database:
time.sleep(2)
first_p = self.browser.find_elements_by_tag_name("p")
self.assertIn("est déjà enregistré pour le produit", first_p[0].text)
# Now the table display her first recording:
first_rec = self.browser.find_elements_by_tag_name("td")
self.assertEqual("Véritable petit beurre", first_rec[0].text)
# She enters an other product in the search bar:
inputbox = self.browser.find_element_by_name("query")
inputbox.send_keys("La paille d’or aux framboises")
inputbox.send_keys(Keys.ENTER)
time.sleep(2)
# She try to save an other substitute:
save_texts = self.browser.find_elements_by_tag_name("h4")
self.assertEqual("Belvita petit dejeuner moelleux", save_texts[2].text)
save_texts[3].click()
# Now the app inform her that the product has been recorded
# in the database:
first_p = self.browser.find_elements_by_tag_name("p")
self.assertIn("a été enregistré pour le produit", first_p[0].text)
time.sleep(2)
# Now she logs out:
logout = self.browser.find_element_by_xpath('//a[@href="/logout"]')
logout.click()
time.sleep(2)
# Then she logs in:
login = self.browser.find_element_by_xpath('//a[@href="/login/"]')
login.click()
time.sleep(2)
username = self.browser.find_element_by_id("id_username")
password = self.browser.find_element_by_id("id_password")
username.send_keys("mell2010@gmail.com")
password.send_keys("monsupermdp1234")
password.send_keys(Keys.ENTER)
# She checks that her recorded substitutes are still in the database:
time.sleep(3)
carrot = self.browser.find_element_by_xpath('//a[@href="/favorites/"]')
carrot.click()
first_rec = self.browser.find_elements_by_tag_name("td")
self.assertEqual("Véritable petit beurre", first_rec[0].text)
sec_rec = self.browser.find_elements_by_tag_name("td")
self.assertEqual("Belvita petit dejeuner moelleux", sec_rec[3].text)
time.sleep(2)
# self.fail('Finish the test!')
# Satisfied, she goes back to sleep after logging out:
|
[
"olivier.nesme@gmail.com"
] |
olivier.nesme@gmail.com
|
d6cd2c0b41c0f13605d7b9fff2cf085d44260e30
|
9cf434b6ee59ab22496ee031fb4ab145bbaff1a2
|
/tranque_v1.8.4_source/backend/src/alerts/modules/ef/m2/desplazamiento_deformacion_muro/C4.py
|
f4cd5ac1032141486d6824cc375ae30512c1ed27
|
[] |
no_license
|
oliverhernandezmoreno/SourcesOH
|
f2ff1a5e3377f0ac1fb8b3153d99d0ee703700b7
|
5d9ca5ab1caceafd4d11207139c9e56210156ef8
|
refs/heads/master
| 2023-01-05T02:51:25.172103
| 2020-08-27T14:39:34
| 2020-08-27T14:39:34
| 64,422,812
| 0
| 1
| null | 2022-12-30T17:25:10
| 2016-07-28T19:33:44
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 656
|
py
|
from django.db.models import Q
from alerts.modules import base
from alerts.modules.base import spread
from targets.models import DataSource
from .base import DesplazamientoDeformacionController
from alerts.modules.base_states import EVENT_STATES
from alerts.modules.utils import single_state_create
@spread(DataSource, Q(groups__canonical_name='inclinometros'))
class Controller(DesplazamientoDeformacionController):
states = base.StringEnum(*EVENT_STATES, "C4")
TEMPLATE = "ef-mvp.m2.parameters.deformacion.inclinometro.muro.eje-x"
relevant_events = [base.event_query(Q(template_name=TEMPLATE), 1)]
create = single_state_create("C4")
|
[
"oliverhernandezmoreno@gmail.com"
] |
oliverhernandezmoreno@gmail.com
|
5fcd07dbaf13100556f96409f7c098f0889be097
|
39e678fb9200560f9135ef3cb18c93f2a118fe9c
|
/svm/svm.py
|
c5001383c4e4f68913b80b5aacb4f07db83489c9
|
[] |
no_license
|
devikrishnan/Prediction-Model---Diabetes
|
7061cbaa9a3fae4c0b02f9cec1752fb38dddadf8
|
533d0bfd90ef9b24cf46933311f3f6c6272f5c46
|
refs/heads/master
| 2021-09-07T07:08:12.572746
| 2018-02-19T10:33:18
| 2018-02-19T10:33:18
| 105,686,511
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,294
|
py
|
from __future__ import print_function
import pandas as pd
import numpy as np
from sklearn import datasets
from sklearn import preprocessing
from sklearn.model_selection import train_test_split
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import classification_report
from sklearn.svm import SVC
from sklearn.model_selection import KFold
from sklearn.metrics import confusion_matrix
parameter_candidates = [
{'C': [1, 10, 100, 1000], 'kernel': ['linear']},
{'C': [1, 10, 100, 1000], 'gamma': [0.001, 0.0001], 'kernel': ['rbf']},
]
class svm():
def fit(self):
df = pd.read_csv('Datasetnew.csv',header=None)
h=np.asarray(df)
dataset = np.nan_to_num(h)
XX = dataset[:,1:65]
y = dataset[:,0]
X = preprocessing.normalize(XX)
k_fold = KFold(10)
scores = ['precision', 'recall']
for score in scores:
print("# Tuning hyper-parameters for %s" % score)
print()
clf = GridSearchCV(SVC(), parameter_candidates, cv=5, scoring='%s_macro' % score)
for k, (train, test) in enumerate(k_fold.split(X, y)):
clf.fit(X[train], y[train])
#print()
#print(y[train])
#print()
print("Best parameters set found on development set:")
print()
print(clf.best_params_)
print()
'''print("Grid scores on development set:")
print()
means = clf.cv_results_['mean_test_score']
stds = clf.cv_results_['std_test_score']
for mean, std, params in zip(means, stds, clf.cv_results_['params']):
print("%0.3f (+/-%0.03f) for %r" % (mean, std * 2, params))
print()'''
print("Detailed classification report:")
print()
y_true, y_pred = y[test], clf.predict(X[test])
cnf_matrix_mnb = confusion_matrix(y[test], y_pred)
print(classification_report(y_true, y_pred))
print()
print(clf.score(X[test], y[test]))
print(cnf_matrix_mnb)
print()
print()
svm().fit()
|
[
"devikrishnan67@gmail.com"
] |
devikrishnan67@gmail.com
|
acfbf5087c276f5a595830928c51cc2d5546c02a
|
2fbba2e0dd9afe5fbde70bd0d907b34cda5265ae
|
/lesson5/excersise3.py
|
299f887d87c8d7c353adb26e3def81196585f892
|
[] |
no_license
|
htr2/pynetme
|
c3655fe6cf51c9a50dc69425410ef89088f4d8c4
|
59de01967cbab4f746e28bf4c929e02975bcf0d6
|
refs/heads/master
| 2020-03-27T00:25:52.258387
| 2019-07-29T10:08:56
| 2019-07-29T10:08:56
| 145,622,330
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,986
|
py
|
#/usr/bin/env python
"""
Similar to lesson3, exercise4 write a function that normalizes a MAC address to the following
format:
01:23:45:67:89:AB
This function should handle the lower-case to upper-case conversion.
It should also handle converting from '0000.aaaa.bbbb' and from '00-00-aa-aa-bb-bb' formats.
The function should have one parameter, the mac_address. It should return the normalized MAC address
Single digit bytes should be zero-padded to two digits. In other words, this:
a:b:c:d:e:f
should be converted to:
0A:0B:0C:0D:0E:0F
Write several test cases for your function and verify it is working properly.
"""
from __future__ import print_function, unicode_literals
import re
def normalise_mac(mac_address):
mac_address = mac_address.upper()
normalised_mac = []
work_mac = re.split(r"[-:.]", mac_address)
if len(work_mac) == 3:
for mac_parts in work_mac:
mac_part1 = mac_parts
if len(mac_part1) < 4:
mac_part1 = mac_part1.zfill(4)
normalised_mac.append(mac_part1[:2])
normalised_mac.append(mac_part1[2:])
elif len(work_mac) == 6:
for mac_part1 in work_mac:
if len(mac_part1) < 2:
mac_part1 = mac_part1.zfill(2)
normalised_mac.append(mac_part1)
else:
print("Input Error")
return ":".join(normalised_mac)
"""
print("0000.aaaa.bbbb:",normalise_mac("0000.aaaa.bbb"))
print("00-00-aa-aa-bb-bb:",normalise_mac("00-00-aa-aa-bb-bb"))
print("a:b:c:d:e:f:",normalise_mac("a:b:c:d:e:f"))
print("1:1:1:1",normalise_mac("1:1:1:1"))
"""
# Some tests
assert "01:23:02:34:04:56" == normalise_mac('123.234.456')
assert "AA:BB:CC:DD:EE:FF" == normalise_mac('aabb.ccdd.eeff')
assert "0A:0B:0C:0D:0E:0F" == normalise_mac('a:b:c:d:e:f')
assert "01:02:0A:0B:03:44" == normalise_mac('1:2:a:b:3:44')
assert "0A:0B:0C:0D:0E:0F" == normalise_mac('a-b-c-d-e-f')
assert "01:02:0A:0B:03:44" == normalise_mac('1-2-a-b-3-44')
print("Tests passed")
|
[
"hvoelksen@gmail.com"
] |
hvoelksen@gmail.com
|
b7d59052bc35ff4ff1b5a0faa8b6f4383e22edad
|
bc9f66258575dd5c8f36f5ad3d9dfdcb3670897d
|
/platform/bq/third_party/google/auth/credentials.py
|
f3014a08d291f6cf1e635a17f5d8b3ed1381593a
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
google-cloud-sdk-unofficial/google-cloud-sdk
|
05fbb473d629195f25887fc5bfaa712f2cbc0a24
|
392abf004b16203030e6efd2f0af24db7c8d669e
|
refs/heads/master
| 2023-08-31T05:40:41.317697
| 2023-08-23T18:23:16
| 2023-08-23T18:23:16
| 335,182,594
| 9
| 2
|
NOASSERTION
| 2022-10-29T20:49:13
| 2021-02-02T05:47:30
|
Python
|
UTF-8
|
Python
| false
| false
| 13,988
|
py
|
#!/usr/bin/env python
# Copyright 2016 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Interfaces for credentials."""
import abc
import os
import six
from google.auth import _helpers, environment_vars
from google.auth import exceptions
@six.add_metaclass(abc.ABCMeta)
class Credentials(object):
"""Base class for all credentials.
All credentials have a :attr:`token` that is used for authentication and
may also optionally set an :attr:`expiry` to indicate when the token will
no longer be valid.
Most credentials will be :attr:`invalid` until :meth:`refresh` is called.
Credentials can do this automatically before the first HTTP request in
:meth:`before_request`.
Although the token and expiration will change as the credentials are
:meth:`refreshed <refresh>` and used, credentials should be considered
immutable. Various credentials will accept configuration such as private
keys, scopes, and other options. These options are not changeable after
construction. Some classes will provide mechanisms to copy the credentials
with modifications such as :meth:`ScopedCredentials.with_scopes`.
"""
def __init__(self):
self.token = None
"""str: The bearer token that can be used in HTTP headers to make
authenticated requests."""
self.expiry = None
"""Optional[datetime]: When the token expires and is no longer valid.
If this is None, the token is assumed to never expire."""
self._quota_project_id = None
"""Optional[str]: Project to use for quota and billing purposes."""
@property
def expired(self):
"""Checks if the credentials are expired.
Note that credentials can be invalid but not expired because
Credentials with :attr:`expiry` set to None is considered to never
expire.
"""
if not self.expiry:
return False
# Remove some threshold from expiry to err on the side of reporting
# expiration early so that we avoid the 401-refresh-retry loop.
skewed_expiry = self.expiry - _helpers.REFRESH_THRESHOLD
return _helpers.utcnow() >= skewed_expiry
@property
def valid(self):
"""Checks the validity of the credentials.
This is True if the credentials have a :attr:`token` and the token
is not :attr:`expired`.
"""
return self.token is not None and not self.expired
@property
def quota_project_id(self):
"""Project to use for quota and billing purposes."""
return self._quota_project_id
@abc.abstractmethod
def refresh(self, request):
"""Refreshes the access token.
Args:
request (google.auth.transport.Request): The object used to make
HTTP requests.
Raises:
google.auth.exceptions.RefreshError: If the credentials could
not be refreshed.
"""
# pylint: disable=missing-raises-doc
# (pylint doesn't recognize that this is abstract)
raise NotImplementedError("Refresh must be implemented")
def apply(self, headers, token=None):
"""Apply the token to the authentication header.
Args:
headers (Mapping): The HTTP request headers.
token (Optional[str]): If specified, overrides the current access
token.
"""
headers["authorization"] = "Bearer {}".format(
_helpers.from_bytes(token or self.token)
)
if self.quota_project_id:
headers["x-goog-user-project"] = self.quota_project_id
def before_request(self, request, method, url, headers):
"""Performs credential-specific before request logic.
Refreshes the credentials if necessary, then calls :meth:`apply` to
apply the token to the authentication header.
Args:
request (google.auth.transport.Request): The object used to make
HTTP requests.
method (str): The request's HTTP method or the RPC method being
invoked.
url (str): The request's URI or the RPC service's URI.
headers (Mapping): The request's headers.
"""
# pylint: disable=unused-argument
# (Subclasses may use these arguments to ascertain information about
# the http request.)
if not self.valid:
self.refresh(request)
self.apply(headers)
class CredentialsWithQuotaProject(Credentials):
"""Abstract base for credentials supporting ``with_quota_project`` factory"""
def with_quota_project(self, quota_project_id):
"""Returns a copy of these credentials with a modified quota project.
Args:
quota_project_id (str): The project to use for quota and
billing purposes
Returns:
google.oauth2.credentials.Credentials: A new credentials instance.
"""
raise NotImplementedError("This credential does not support quota project.")
def with_quota_project_from_environment(self):
quota_from_env = os.environ.get(environment_vars.GOOGLE_CLOUD_QUOTA_PROJECT)
if quota_from_env:
return self.with_quota_project(quota_from_env)
return self
class CredentialsWithTokenUri(Credentials):
"""Abstract base for credentials supporting ``with_token_uri`` factory"""
def with_token_uri(self, token_uri):
"""Returns a copy of these credentials with a modified token uri.
Args:
token_uri (str): The uri to use for fetching/exchanging tokens
Returns:
google.oauth2.credentials.Credentials: A new credentials instance.
"""
raise NotImplementedError("This credential does not use token uri.")
class AnonymousCredentials(Credentials):
"""Credentials that do not provide any authentication information.
These are useful in the case of services that support anonymous access or
local service emulators that do not use credentials.
"""
@property
def expired(self):
"""Returns `False`, anonymous credentials never expire."""
return False
@property
def valid(self):
"""Returns `True`, anonymous credentials are always valid."""
return True
def refresh(self, request):
"""Raises :class:``InvalidOperation``, anonymous credentials cannot be
refreshed."""
raise exceptions.InvalidOperation("Anonymous credentials cannot be refreshed.")
def apply(self, headers, token=None):
"""Anonymous credentials do nothing to the request.
The optional ``token`` argument is not supported.
Raises:
google.auth.exceptions.InvalidValue: If a token was specified.
"""
if token is not None:
raise exceptions.InvalidValue("Anonymous credentials don't support tokens.")
def before_request(self, request, method, url, headers):
"""Anonymous credentials do nothing to the request."""
@six.add_metaclass(abc.ABCMeta)
class ReadOnlyScoped(object):
"""Interface for credentials whose scopes can be queried.
OAuth 2.0-based credentials allow limiting access using scopes as described
in `RFC6749 Section 3.3`_.
If a credential class implements this interface then the credentials either
use scopes in their implementation.
Some credentials require scopes in order to obtain a token. You can check
if scoping is necessary with :attr:`requires_scopes`::
if credentials.requires_scopes:
# Scoping is required.
credentials = credentials.with_scopes(scopes=['one', 'two'])
Credentials that require scopes must either be constructed with scopes::
credentials = SomeScopedCredentials(scopes=['one', 'two'])
Or must copy an existing instance using :meth:`with_scopes`::
scoped_credentials = credentials.with_scopes(scopes=['one', 'two'])
Some credentials have scopes but do not allow or require scopes to be set,
these credentials can be used as-is.
.. _RFC6749 Section 3.3: https://tools.ietf.org/html/rfc6749#section-3.3
"""
def __init__(self):
super(ReadOnlyScoped, self).__init__()
self._scopes = None
self._default_scopes = None
@property
def scopes(self):
"""Sequence[str]: the credentials' current set of scopes."""
return self._scopes
@property
def default_scopes(self):
"""Sequence[str]: the credentials' current set of default scopes."""
return self._default_scopes
@abc.abstractproperty
def requires_scopes(self):
"""True if these credentials require scopes to obtain an access token.
"""
return False
def has_scopes(self, scopes):
"""Checks if the credentials have the given scopes.
.. warning: This method is not guaranteed to be accurate if the
credentials are :attr:`~Credentials.invalid`.
Args:
scopes (Sequence[str]): The list of scopes to check.
Returns:
bool: True if the credentials have the given scopes.
"""
credential_scopes = (
self._scopes if self._scopes is not None else self._default_scopes
)
return set(scopes).issubset(set(credential_scopes or []))
class Scoped(ReadOnlyScoped):
"""Interface for credentials whose scopes can be replaced while copying.
OAuth 2.0-based credentials allow limiting access using scopes as described
in `RFC6749 Section 3.3`_.
If a credential class implements this interface then the credentials either
use scopes in their implementation.
Some credentials require scopes in order to obtain a token. You can check
if scoping is necessary with :attr:`requires_scopes`::
if credentials.requires_scopes:
# Scoping is required.
credentials = credentials.create_scoped(['one', 'two'])
Credentials that require scopes must either be constructed with scopes::
credentials = SomeScopedCredentials(scopes=['one', 'two'])
Or must copy an existing instance using :meth:`with_scopes`::
scoped_credentials = credentials.with_scopes(scopes=['one', 'two'])
Some credentials have scopes but do not allow or require scopes to be set,
these credentials can be used as-is.
.. _RFC6749 Section 3.3: https://tools.ietf.org/html/rfc6749#section-3.3
"""
@abc.abstractmethod
def with_scopes(self, scopes, default_scopes=None):
"""Create a copy of these credentials with the specified scopes.
Args:
scopes (Sequence[str]): The list of scopes to attach to the
current credentials.
Raises:
NotImplementedError: If the credentials' scopes can not be changed.
This can be avoided by checking :attr:`requires_scopes` before
calling this method.
"""
raise NotImplementedError("This class does not require scoping.")
def with_scopes_if_required(credentials, scopes, default_scopes=None):
"""Creates a copy of the credentials with scopes if scoping is required.
This helper function is useful when you do not know (or care to know) the
specific type of credentials you are using (such as when you use
:func:`google.auth.default`). This function will call
:meth:`Scoped.with_scopes` if the credentials are scoped credentials and if
the credentials require scoping. Otherwise, it will return the credentials
as-is.
Args:
credentials (google.auth.credentials.Credentials): The credentials to
scope if necessary.
scopes (Sequence[str]): The list of scopes to use.
default_scopes (Sequence[str]): Default scopes passed by a
Google client library. Use 'scopes' for user-defined scopes.
Returns:
google.auth.credentials.Credentials: Either a new set of scoped
credentials, or the passed in credentials instance if no scoping
was required.
"""
if isinstance(credentials, Scoped) and credentials.requires_scopes:
return credentials.with_scopes(scopes, default_scopes=default_scopes)
else:
return credentials
@six.add_metaclass(abc.ABCMeta)
class Signing(object):
"""Interface for credentials that can cryptographically sign messages."""
@abc.abstractmethod
def sign_bytes(self, message):
"""Signs the given message.
Args:
message (bytes): The message to sign.
Returns:
bytes: The message's cryptographic signature.
"""
# pylint: disable=missing-raises-doc,redundant-returns-doc
# (pylint doesn't recognize that this is abstract)
raise NotImplementedError("Sign bytes must be implemented.")
@abc.abstractproperty
def signer_email(self):
"""Optional[str]: An email address that identifies the signer."""
# pylint: disable=missing-raises-doc
# (pylint doesn't recognize that this is abstract)
raise NotImplementedError("Signer email must be implemented.")
@abc.abstractproperty
def signer(self):
"""google.auth.crypt.Signer: The signer used to sign bytes."""
# pylint: disable=missing-raises-doc
# (pylint doesn't recognize that this is abstract)
raise NotImplementedError("Signer must be implemented.")
|
[
"cloudsdk.mirror@gmail.com"
] |
cloudsdk.mirror@gmail.com
|
ade1e463d92aaf8de7ffcf2b58ea3cefa5b27722
|
9f2f386a692a6ddeb7670812d1395a0b0009dad9
|
/python/paddle/metric/metrics.py
|
d399cb2052498529f41a3617a60b9a221b5d837c
|
[
"Apache-2.0"
] |
permissive
|
sandyhouse/Paddle
|
2f866bf1993a036564986e5140e69e77674b8ff5
|
86e0b07fe7ee6442ccda0aa234bd690a3be2cffa
|
refs/heads/develop
| 2023-08-16T22:59:28.165742
| 2022-06-03T05:23:39
| 2022-06-03T05:23:39
| 181,423,712
| 0
| 7
|
Apache-2.0
| 2022-08-15T08:46:04
| 2019-04-15T06:15:22
|
C++
|
UTF-8
|
Python
| false
| false
| 28,621
|
py
|
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
import abc
import numpy as np
from ..fluid.data_feeder import check_variable_and_dtype
from ..fluid.layer_helper import LayerHelper
from ..fluid.framework import core, _varbase_creator, _non_static_mode, _in_legacy_dygraph
import paddle
from paddle import _C_ops
__all__ = []
def _is_numpy_(var):
return isinstance(var, (np.ndarray, np.generic))
@six.add_metaclass(abc.ABCMeta)
class Metric(object):
r"""
Base class for metric, encapsulates metric logic and APIs
Usage:
.. code-block:: text
m = SomeMetric()
for prediction, label in ...:
m.update(prediction, label)
m.accumulate()
Advanced usage for :code:`compute`:
Metric calculation can be accelerated by calculating metric states
from model outputs and labels by build-in operators not by Python/NumPy
in :code:`compute`, metric states will be fetched as NumPy array and
call :code:`update` with states in NumPy format.
Metric calculated as follows (operations in Model and Metric are
indicated with curly brackets, while data nodes not):
.. code-block:: text
inputs & labels || ------------------
| ||
{model} ||
| ||
outputs & labels ||
| || tensor data
{Metric.compute} ||
| ||
metric states(tensor) ||
| ||
{fetch as numpy} || ------------------
| ||
metric states(numpy) || numpy data
| ||
{Metric.update} \/ ------------------
Examples:
For :code:`Accuracy` metric, which takes :code:`pred` and :code:`label`
as inputs, we can calculate the correct prediction matrix between
:code:`pred` and :code:`label` in :code:`compute`.
For examples, prediction results contains 10 classes, while :code:`pred`
shape is [N, 10], :code:`label` shape is [N, 1], N is mini-batch size,
and we only need to calculate accurary of top-1 and top-5, we could
calculate the correct prediction matrix of the top-5 scores of the
prediction of each sample like follows, while the correct prediction
matrix shape is [N, 5].
.. code-block:: text
def compute(pred, label):
# sort prediction and slice the top-5 scores
pred = paddle.argsort(pred, descending=True)[:, :5]
# calculate whether the predictions are correct
correct = pred == label
return paddle.cast(correct, dtype='float32')
With the :code:`compute`, we split some calculations to OPs (which
may run on GPU devices, will be faster), and only fetch 1 tensor with
shape as [N, 5] instead of 2 tensors with shapes as [N, 10] and [N, 1].
:code:`update` can be define as follows:
.. code-block:: text
def update(self, correct):
accs = []
for i, k in enumerate(self.topk):
num_corrects = correct[:, :k].sum()
num_samples = len(correct)
accs.append(float(num_corrects) / num_samples)
self.total[i] += num_corrects
self.count[i] += num_samples
return accs
"""
def __init__(self):
pass
@abc.abstractmethod
def reset(self):
"""
Reset states and result
"""
raise NotImplementedError("function 'reset' not implemented in {}.".
format(self.__class__.__name__))
@abc.abstractmethod
def update(self, *args):
"""
Update states for metric
Inputs of :code:`update` is the outputs of :code:`Metric.compute`,
if :code:`compute` is not defined, the inputs of :code:`update`
will be flatten arguments of **output** of mode and **label** from data:
:code:`update(output1, output2, ..., label1, label2,...)`
see :code:`Metric.compute`
"""
raise NotImplementedError("function 'update' not implemented in {}.".
format(self.__class__.__name__))
@abc.abstractmethod
def accumulate(self):
"""
Accumulates statistics, computes and returns the metric value
"""
raise NotImplementedError(
"function 'accumulate' not implemented in {}.".format(
self.__class__.__name__))
@abc.abstractmethod
def name(self):
"""
Returns metric name
"""
raise NotImplementedError("function 'name' not implemented in {}.".
format(self.__class__.__name__))
def compute(self, *args):
"""
This API is advanced usage to accelerate metric calculating, calulations
from outputs of model to the states which should be updated by Metric can
be defined here, where Paddle OPs is also supported. Outputs of this API
will be the inputs of "Metric.update".
If :code:`compute` is defined, it will be called with **outputs**
of model and **labels** from data as arguments, all outputs and labels
will be concatenated and flatten and each filed as a separate argument
as follows:
:code:`compute(output1, output2, ..., label1, label2,...)`
If :code:`compute` is not defined, default behaviour is to pass
input to output, so output format will be:
:code:`return output1, output2, ..., label1, label2,...`
see :code:`Metric.update`
"""
return args
class Accuracy(Metric):
"""
Encapsulates accuracy metric logic.
Args:
topk (list[int]|tuple[int]): Number of top elements to look at
for computing accuracy. Default is (1,).
name (str, optional): String name of the metric instance. Default
is `acc`.
Example by standalone:
.. code-block:: python
import numpy as np
import paddle
x = paddle.to_tensor(np.array([
[0.1, 0.2, 0.3, 0.4],
[0.1, 0.4, 0.3, 0.2],
[0.1, 0.2, 0.4, 0.3],
[0.1, 0.2, 0.3, 0.4]]))
y = paddle.to_tensor(np.array([[0], [1], [2], [3]]))
m = paddle.metric.Accuracy()
correct = m.compute(x, y)
m.update(correct)
res = m.accumulate()
print(res) # 0.75
Example with Model API:
.. code-block:: python
import paddle
from paddle.static import InputSpec
import paddle.vision.transforms as T
from paddle.vision.datasets import MNIST
input = InputSpec([None, 1, 28, 28], 'float32', 'image')
label = InputSpec([None, 1], 'int64', 'label')
transform = T.Compose([T.Transpose(), T.Normalize([127.5], [127.5])])
train_dataset = MNIST(mode='train', transform=transform)
model = paddle.Model(paddle.vision.models.LeNet(), input, label)
optim = paddle.optimizer.Adam(
learning_rate=0.001, parameters=model.parameters())
model.prepare(
optim,
loss=paddle.nn.CrossEntropyLoss(),
metrics=paddle.metric.Accuracy())
model.fit(train_dataset, batch_size=64)
"""
def __init__(self, topk=(1, ), name=None, *args, **kwargs):
super(Accuracy, self).__init__(*args, **kwargs)
self.topk = topk
self.maxk = max(topk)
self._init_name(name)
self.reset()
def compute(self, pred, label, *args):
"""
Compute the top-k (maximum value in `topk`) indices.
Args:
pred (Tensor): The predicted value is a Tensor with dtype
float32 or float64. Shape is [batch_size, d0, ..., dN].
label (Tensor): The ground truth value is Tensor with dtype
int64. Shape is [batch_size, d0, ..., 1], or
[batch_size, d0, ..., num_classes] in one hot representation.
Return:
Tensor: Correct mask, a tensor with shape [batch_size, d0, ..., topk].
"""
pred = paddle.argsort(pred, descending=True)
pred = paddle.slice(
pred, axes=[len(pred.shape) - 1], starts=[0], ends=[self.maxk])
if (len(label.shape) == 1) or \
(len(label.shape) == 2 and label.shape[-1] == 1):
# In static mode, the real label data shape may be different
# from shape defined by paddle.static.InputSpec in model
# building, reshape to the right shape.
label = paddle.reshape(label, (-1, 1))
elif label.shape[-1] != 1:
# one-hot label
label = paddle.argmax(label, axis=-1, keepdim=True)
correct = pred == label
return paddle.cast(correct, dtype='float32')
def update(self, correct, *args):
"""
Update the metrics states (correct count and total count), in order to
calculate cumulative accuracy of all instances. This function also
returns the accuracy of current step.
Args:
correct: Correct mask, a tensor with shape [batch_size, d0, ..., topk].
Return:
Tensor: the accuracy of current step.
"""
if isinstance(correct, (paddle.Tensor, paddle.fluid.core.eager.Tensor)):
correct = correct.numpy()
num_samples = np.prod(np.array(correct.shape[:-1]))
accs = []
for i, k in enumerate(self.topk):
num_corrects = correct[..., :k].sum()
accs.append(float(num_corrects) / num_samples)
self.total[i] += num_corrects
self.count[i] += num_samples
accs = accs[0] if len(self.topk) == 1 else accs
return accs
def reset(self):
"""
Resets all of the metric state.
"""
self.total = [0.] * len(self.topk)
self.count = [0] * len(self.topk)
def accumulate(self):
"""
Computes and returns the accumulated metric.
"""
res = []
for t, c in zip(self.total, self.count):
r = float(t) / c if c > 0 else 0.
res.append(r)
res = res[0] if len(self.topk) == 1 else res
return res
def _init_name(self, name):
name = name or 'acc'
if self.maxk != 1:
self._name = ['{}_top{}'.format(name, k) for k in self.topk]
else:
self._name = [name]
def name(self):
"""
Return name of metric instance.
"""
return self._name
class Precision(Metric):
"""
Precision (also called positive predictive value) is the fraction of
relevant instances among the retrieved instances. Refer to
https://en.wikipedia.org/wiki/Evaluation_of_binary_classifiers
Noted that this class manages the precision score only for binary
classification task.
Args:
name (str, optional): String name of the metric instance.
Default is `precision`.
Example by standalone:
.. code-block:: python
import numpy as np
import paddle
x = np.array([0.1, 0.5, 0.6, 0.7])
y = np.array([0, 1, 1, 1])
m = paddle.metric.Precision()
m.update(x, y)
res = m.accumulate()
print(res) # 1.0
Example with Model API:
.. code-block:: python
import numpy as np
import paddle
import paddle.nn as nn
class Data(paddle.io.Dataset):
def __init__(self):
super(Data, self).__init__()
self.n = 1024
self.x = np.random.randn(self.n, 10).astype('float32')
self.y = np.random.randint(2, size=(self.n, 1)).astype('float32')
def __getitem__(self, idx):
return self.x[idx], self.y[idx]
def __len__(self):
return self.n
model = paddle.Model(nn.Sequential(
nn.Linear(10, 1),
nn.Sigmoid()
))
optim = paddle.optimizer.Adam(
learning_rate=0.001, parameters=model.parameters())
model.prepare(
optim,
loss=nn.BCELoss(),
metrics=paddle.metric.Precision())
data = Data()
model.fit(data, batch_size=16)
"""
def __init__(self, name='precision', *args, **kwargs):
super(Precision, self).__init__(*args, **kwargs)
self.tp = 0 # true positive
self.fp = 0 # false positive
self._name = name
def update(self, preds, labels):
"""
Update the states based on the current mini-batch prediction results.
Args:
preds (numpy.ndarray): The prediction result, usually the output
of two-class sigmoid function. It should be a vector (column
vector or row vector) with data type: 'float64' or 'float32'.
labels (numpy.ndarray): The ground truth (labels),
the shape should keep the same as preds.
The data type is 'int32' or 'int64'.
"""
if isinstance(preds, (paddle.Tensor, paddle.fluid.core.eager.Tensor)):
preds = preds.numpy()
elif not _is_numpy_(preds):
raise ValueError("The 'preds' must be a numpy ndarray or Tensor.")
if isinstance(labels, (paddle.Tensor, paddle.fluid.core.eager.Tensor)):
labels = labels.numpy()
elif not _is_numpy_(labels):
raise ValueError("The 'labels' must be a numpy ndarray or Tensor.")
sample_num = labels.shape[0]
preds = np.floor(preds + 0.5).astype("int32")
for i in range(sample_num):
pred = preds[i]
label = labels[i]
if pred == 1:
if pred == label:
self.tp += 1
else:
self.fp += 1
def reset(self):
"""
Resets all of the metric state.
"""
self.tp = 0
self.fp = 0
def accumulate(self):
"""
Calculate the final precision.
Returns:
A scaler float: results of the calculated precision.
"""
ap = self.tp + self.fp
return float(self.tp) / ap if ap != 0 else .0
def name(self):
"""
Returns metric name
"""
return self._name
class Recall(Metric):
"""
Recall (also known as sensitivity) is the fraction of
relevant instances that have been retrieved over the
total amount of relevant instances
Refer to:
https://en.wikipedia.org/wiki/Precision_and_recall
Noted that this class manages the recall score only for
binary classification task.
Args:
name (str, optional): String name of the metric instance.
Default is `recall`.
Example by standalone:
.. code-block:: python
import numpy as np
import paddle
x = np.array([0.1, 0.5, 0.6, 0.7])
y = np.array([1, 0, 1, 1])
m = paddle.metric.Recall()
m.update(x, y)
res = m.accumulate()
print(res) # 2.0 / 3.0
Example with Model API:
.. code-block:: python
import numpy as np
import paddle
import paddle.nn as nn
class Data(paddle.io.Dataset):
def __init__(self):
super(Data, self).__init__()
self.n = 1024
self.x = np.random.randn(self.n, 10).astype('float32')
self.y = np.random.randint(2, size=(self.n, 1)).astype('float32')
def __getitem__(self, idx):
return self.x[idx], self.y[idx]
def __len__(self):
return self.n
model = paddle.Model(nn.Sequential(
nn.Linear(10, 1),
nn.Sigmoid()
))
optim = paddle.optimizer.Adam(
learning_rate=0.001, parameters=model.parameters())
model.prepare(
optim,
loss=nn.BCELoss(),
metrics=[paddle.metric.Precision(), paddle.metric.Recall()])
data = Data()
model.fit(data, batch_size=16)
"""
def __init__(self, name='recall', *args, **kwargs):
super(Recall, self).__init__(*args, **kwargs)
self.tp = 0 # true positive
self.fn = 0 # false negative
self._name = name
def update(self, preds, labels):
"""
Update the states based on the current mini-batch prediction results.
Args:
preds(numpy.array): prediction results of current mini-batch,
the output of two-class sigmoid function.
Shape: [batch_size, 1]. Dtype: 'float64' or 'float32'.
labels(numpy.array): ground truth (labels) of current mini-batch,
the shape should keep the same as preds.
Shape: [batch_size, 1], Dtype: 'int32' or 'int64'.
"""
if isinstance(preds, (paddle.Tensor, paddle.fluid.core.eager.Tensor)):
preds = preds.numpy()
elif not _is_numpy_(preds):
raise ValueError("The 'preds' must be a numpy ndarray or Tensor.")
if isinstance(labels, (paddle.Tensor, paddle.fluid.core.eager.Tensor)):
labels = labels.numpy()
elif not _is_numpy_(labels):
raise ValueError("The 'labels' must be a numpy ndarray or Tensor.")
sample_num = labels.shape[0]
preds = np.rint(preds).astype("int32")
for i in range(sample_num):
pred = preds[i]
label = labels[i]
if label == 1:
if pred == label:
self.tp += 1
else:
self.fn += 1
def accumulate(self):
"""
Calculate the final recall.
Returns:
A scaler float: results of the calculated Recall.
"""
recall = self.tp + self.fn
return float(self.tp) / recall if recall != 0 else .0
def reset(self):
"""
Resets all of the metric state.
"""
self.tp = 0
self.fn = 0
def name(self):
"""
Returns metric name
"""
return self._name
class Auc(Metric):
"""
The auc metric is for binary classification.
Refer to https://en.wikipedia.org/wiki/Receiver_operating_characteristic#Area_under_the_curve.
Please notice that the auc metric is implemented with python, which may be a little bit slow.
The `auc` function creates four local variables, `true_positives`,
`true_negatives`, `false_positives` and `false_negatives` that are used to
compute the AUC. To discretize the AUC curve, a linearly spaced set of
thresholds is used to compute pairs of recall and precision values. The area
under the ROC-curve is therefore computed using the height of the recall
values by the false positive rate, while the area under the PR-curve is the
computed using the height of the precision values by the recall.
Args:
curve (str): Specifies the mode of the curve to be computed,
'ROC' or 'PR' for the Precision-Recall-curve. Default is 'ROC'.
num_thresholds (int): The number of thresholds to use when
discretizing the roc curve. Default is 4095.
'ROC' or 'PR' for the Precision-Recall-curve. Default is 'ROC'.
name (str, optional): String name of the metric instance. Default
is `auc`.
"NOTE: only implement the ROC curve type via Python now."
Example by standalone:
.. code-block:: python
import numpy as np
import paddle
m = paddle.metric.Auc()
n = 8
class0_preds = np.random.random(size = (n, 1))
class1_preds = 1 - class0_preds
preds = np.concatenate((class0_preds, class1_preds), axis=1)
labels = np.random.randint(2, size = (n, 1))
m.update(preds=preds, labels=labels)
res = m.accumulate()
Example with Model API:
.. code-block:: python
import numpy as np
import paddle
import paddle.nn as nn
class Data(paddle.io.Dataset):
def __init__(self):
super(Data, self).__init__()
self.n = 1024
self.x = np.random.randn(self.n, 10).astype('float32')
self.y = np.random.randint(2, size=(self.n, 1)).astype('int64')
def __getitem__(self, idx):
return self.x[idx], self.y[idx]
def __len__(self):
return self.n
model = paddle.Model(nn.Sequential(
nn.Linear(10, 2), nn.Softmax())
)
optim = paddle.optimizer.Adam(
learning_rate=0.001, parameters=model.parameters())
def loss(x, y):
return nn.functional.nll_loss(paddle.log(x), y)
model.prepare(
optim,
loss=loss,
metrics=paddle.metric.Auc())
data = Data()
model.fit(data, batch_size=16)
"""
def __init__(self,
curve='ROC',
num_thresholds=4095,
name='auc',
*args,
**kwargs):
super(Auc, self).__init__(*args, **kwargs)
self._curve = curve
self._num_thresholds = num_thresholds
_num_pred_buckets = num_thresholds + 1
self._stat_pos = np.zeros(_num_pred_buckets)
self._stat_neg = np.zeros(_num_pred_buckets)
self._name = name
def update(self, preds, labels):
"""
Update the auc curve with the given predictions and labels.
Args:
preds (numpy.array): An numpy array in the shape of
(batch_size, 2), preds[i][j] denotes the probability of
classifying the instance i into the class j.
labels (numpy.array): an numpy array in the shape of
(batch_size, 1), labels[i] is either o or 1,
representing the label of the instance i.
"""
if isinstance(labels, (paddle.Tensor, paddle.fluid.core.eager.Tensor)):
labels = labels.numpy()
elif not _is_numpy_(labels):
raise ValueError("The 'labels' must be a numpy ndarray or Tensor.")
if isinstance(preds, (paddle.Tensor, paddle.fluid.core.eager.Tensor)):
preds = preds.numpy()
elif not _is_numpy_(preds):
raise ValueError("The 'preds' must be a numpy ndarray or Tensor.")
for i, lbl in enumerate(labels):
value = preds[i, 1]
bin_idx = int(value * self._num_thresholds)
assert bin_idx <= self._num_thresholds
if lbl:
self._stat_pos[bin_idx] += 1.0
else:
self._stat_neg[bin_idx] += 1.0
@staticmethod
def trapezoid_area(x1, x2, y1, y2):
return abs(x1 - x2) * (y1 + y2) / 2.0
def accumulate(self):
"""
Return the area (a float score) under auc curve
Return:
float: the area under auc curve
"""
tot_pos = 0.0
tot_neg = 0.0
auc = 0.0
idx = self._num_thresholds
while idx >= 0:
tot_pos_prev = tot_pos
tot_neg_prev = tot_neg
tot_pos += self._stat_pos[idx]
tot_neg += self._stat_neg[idx]
auc += self.trapezoid_area(tot_neg, tot_neg_prev, tot_pos,
tot_pos_prev)
idx -= 1
return auc / tot_pos / tot_neg if tot_pos > 0.0 and tot_neg > 0.0 else 0.0
def reset(self):
"""
Reset states and result
"""
_num_pred_buckets = self._num_thresholds + 1
self._stat_pos = np.zeros(_num_pred_buckets)
self._stat_neg = np.zeros(_num_pred_buckets)
def name(self):
"""
Returns metric name
"""
return self._name
def accuracy(input, label, k=1, correct=None, total=None, name=None):
"""
accuracy layer.
Refer to the https://en.wikipedia.org/wiki/Precision_and_recall
This function computes the accuracy using the input and label.
If the correct label occurs in top k predictions, then correct will increment by one.
Note: the dtype of accuracy is determined by input. the input and label dtype can be different.
Args:
input(Tensor): The input of accuracy layer, which is the predictions of network. A Tensor with type float32,float64.
The shape is ``[sample_number, class_dim]`` .
label(Tensor): The label of dataset. Tensor with type int64. The shape is ``[sample_number, 1]`` .
k(int, optional): The top k predictions for each class will be checked. Data type is int64 or int32.
correct(Tensor, optional): The correct predictions count. A Tensor with type int64 or int32.
total(Tensor, optional): The total entries count. A tensor with type int64 or int32.
name(str, optional): The default value is None. Normally there is no need for
user to set this property. For more information, please refer to :ref:`api_guide_Name`
Returns:
Tensor, the correct rate. A Tensor with type float32.
Examples:
.. code-block:: python
import paddle
predictions = paddle.to_tensor([[0.2, 0.1, 0.4, 0.1, 0.1], [0.2, 0.3, 0.1, 0.15, 0.25]], dtype='float32')
label = paddle.to_tensor([[2], [0]], dtype="int64")
result = paddle.metric.accuracy(input=predictions, label=label, k=1)
# [0.5]
"""
if _non_static_mode():
if correct is None:
correct = _varbase_creator(dtype="int32")
if total is None:
total = _varbase_creator(dtype="int32")
topk_out, topk_indices = paddle.topk(input, k=k)
_acc, _, _ = _C_ops.accuracy(topk_out, topk_indices, label, correct,
total)
return _acc
helper = LayerHelper("accuracy", **locals())
check_variable_and_dtype(input, 'input', ['float16', 'float32', 'float64'],
'accuracy')
topk_out, topk_indices = paddle.topk(input, k=k)
acc_out = helper.create_variable_for_type_inference(dtype="float32")
if correct is None:
correct = helper.create_variable_for_type_inference(dtype="int32")
if total is None:
total = helper.create_variable_for_type_inference(dtype="int32")
helper.append_op(
type="accuracy",
inputs={
"Out": [topk_out],
"Indices": [topk_indices],
"Label": [label]
},
outputs={
"Accuracy": [acc_out],
"Correct": [correct],
"Total": [total],
})
return acc_out
|
[
"noreply@github.com"
] |
sandyhouse.noreply@github.com
|
cbe80998335946c7b3add27c0670ae2dba4ea8a5
|
3f2c58ba20a4c176d18eed4b699806bb79db1b9a
|
/Stacks and Queues/Stack Min.py
|
56d937d022a731cfc5509d6547a67eea8410c089
|
[] |
no_license
|
Kabileshj/college-works
|
ffa67dc923335854c0fee53f7ea737f8639e5f52
|
96c49540339fcaa898f621a92e1c14c57042ab61
|
refs/heads/master
| 2022-11-12T22:12:39.849885
| 2020-07-09T15:59:39
| 2020-07-09T15:59:39
| 240,673,821
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,830
|
py
|
class Node:
def __init__(self, value):
self.value = value
self.next = None
def __str__(self):
return "Node({})".format(self.value)
__repr__ = __str__
class Stack:
def __init__(self):
self.top = None
self.count = 0
self.minimum = None
def __str__(self):
temp = self.top
out = []
while temp:
out.append(str(temp.value))
temp = temp.next
out = '\n'.join(out)
return ('Top {} \n\nStack :\n{}'.format(self.top,out))
__repr__=__str__
def getMin(self):
if self.top is None:
return "Stack is empty"
else:
print("Minimum Element in the stack is: {}" .format(self.minimum))
def isEmpty(self):
if self.top == None:
return True
else:
return False
def __len__(self):
self.count = 0
tempNode = self.top
while tempNode:
tempNode = tempNode.next
self.count+=1
return self.count
def push(self,value):
if self.top is None:
self.top = Node(value)
self.minimum = value
elif value < self.minimum:
temp = (2 * value) - self.minimum
new_node = Node(temp)
new_node.next = self.top
self.top = new_node
self.minimum = value
else:
new_node = Node(value)
new_node.next = self.top
self.top = new_node
def pop(self):
if self.top is None:
print( "Stack is empty")
else:
removedNode = self.top.value
self.top = self.top.next
if removedNode < self.minimum:
print ("Top Most Element Removed: {} " .format(self.minimum))
self.minimum = ( ( 2 * self.minimum ) - removedNode )
else:
print ("Top Most Element Removed: {}" .format(removedNode))
# Driver code
stack = Stack()
for i in [int(i) for i in input().split()]:
stack.push(i)
stack.getMin()
stack.pop()
stack.getMin()
|
[
"noreply@github.com"
] |
Kabileshj.noreply@github.com
|
8874932836a21ac4c38e9b9e7da1f1da049b0afc
|
64b7ce2f4d2d24d24f7e169a58fe69257078984c
|
/buffpy/__init__.py
|
183065769855aea39bd77235e47028371189e1e5
|
[
"MIT"
] |
permissive
|
holdenk/buffer-python
|
055d444c385ea6e6ee285bb348b0c19ebda304c1
|
001a3bffda8736f32a8ffae3d26af9b79b2a3680
|
refs/heads/master
| 2020-03-29T20:50:58.408856
| 2018-09-26T19:25:51
| 2018-09-26T19:25:51
| 150,334,500
| 1
| 0
|
MIT
| 2018-09-25T22:02:00
| 2018-09-25T22:01:59
| null |
UTF-8
|
Python
| false
| false
| 43
|
py
|
from .api import *
from .response import *
|
[
"vladtemian@gmail.com"
] |
vladtemian@gmail.com
|
7691a901b311b39403ab979b2a4a02e22bf8cbf3
|
3492b97a904d7397c13c03334d0abe23484024bc
|
/edts/wizard/signature_wizard.py
|
c24588177933775b7382ca64c429096aacf78979
|
[] |
no_license
|
hassanfadl/vendor_portal_app
|
e578262ae25261e6740e4329cb45168ec2e790b4
|
ebf8f4f7452190c986fbc9c225b8660fe0bd4bd4
|
refs/heads/main
| 2023-08-23T10:12:07.838305
| 2021-10-30T04:44:40
| 2021-10-30T04:44:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,273
|
py
|
from odoo import fields, models, api
from datetime import datetime
class EDTSSignatureWizard(models.TransientModel):
_name = 'edts.signature.wizard'
_description = 'EDTS Signature Wizard'
account_move_id = fields.Many2one('account.move', string='Account Move')
liquidation_reference_id = fields.Many2one('edts.liquidation.reference', string='EDTS Liquidation Reference')
action = fields.Selection([
('approve', 'Approve'),
('validate', 'Validate'),
], string='Action', default=False)
dept_head_signature = fields.Binary(string="Department Head's Signature")
accounting_signature = fields.Binary(string="Accounting Department's Signature")
def signature_action_proceed(self):
if self.account_move_id:
if self.action in ['approve']:
self.account_move_id.dept_head_signature = self.dept_head_signature
self.account_move_id.set_waiting_for_accounting_status()
self.account_move_id.send_edts_for_validation_acctg_email()
elif self.action in ['validate']:
self.account_move_id.accounting_signature = self.accounting_signature
if self.account_move_id.edts_subtype in ['agency_contracts_accruals', 'recurring_transactions_accruals']:
self.account_move_id.set_ongoing_status()
else:
self.account_move_id.set_processing_accounting_status()
if self.liquidation_reference_id:
if self.action in ['approve']:
self.liquidation_reference_id.dept_head_signature = self.dept_head_signature
self.liquidation_reference_id.status = 'waiting_for_accounting'
self.liquidation_reference_id.approved_by = self._uid
self.liquidation_reference_id.approved_date = datetime.now()
elif self.action in ['validate']:
self.liquidation_reference_id.accounting_signature = self.accounting_signature
self.liquidation_reference_id.status = 'validated'
self.liquidation_reference_id.validated_by = self._uid
self.liquidation_reference_id.validated_date = datetime.now()
return {'type': 'ir.actions.act_window_close'}
|
[
"dennisboysilva@gmail.com"
] |
dennisboysilva@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.