blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2 values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313 values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107 values | src_encoding stringclasses 20 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.02M | extension stringclasses 78 values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
e3c07ab1c0f8f850ba14972383d4621221272771 | f36be49208238248fa155311a8fa115e3e672efb | /faq/migrations/0001_initial.py | 6eb45e075d0be3bdde3f88467367a6da286b3f51 | [
"MIT"
] | permissive | Sejong-Creative-Semester2021/OJ-BE | 71eb3bd88ce6767952227811d922000a9f407867 | cecc511b771f1979ba7a556abdae1cbefa8e17bd | refs/heads/main | 2023-08-14T03:23:54.029155 | 2021-09-17T13:00:45 | 2021-09-17T13:00:45 | 397,147,494 | 0 | 0 | MIT | 2021-08-30T04:03:01 | 2021-08-17T07:12:44 | Python | UTF-8 | Python | false | false | 1,141 | py | # Generated by Django 3.2.6 on 2021-09-17 12:49
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import utils.models
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='FAQ',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('question', models.TextField()),
('answer', utils.models.RichTextField()),
('create_time', models.DateTimeField(auto_now_add=True)),
('last_update_time', models.DateTimeField(auto_now=True)),
('visible', models.BooleanField(default=True)),
('created_by', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'db_table': 'faq',
'ordering': ('-create_time',),
},
),
]
| [
"mksin00@naver.com"
] | mksin00@naver.com |
d8229a35567ff7594f50dbb89b7cea36bec123ac | 148125096da896fd93292d2cd408265d159fec28 | /qa/rpc-tests/p2p-acceptblock.py | 2267768dbfeb2685302144171cfdd388f4355b4c | [
"MIT"
] | permissive | lycion/lkcoinse | 7cfbcbdfc1e98f20d9dfc497ea65fd75ca6de25d | 9cf9ed5730217566b44466c22dc255f0134ad1bb | refs/heads/master | 2020-03-30T03:24:44.245148 | 2018-09-28T04:55:30 | 2018-09-28T04:55:30 | 150,548,883 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,678 | py | #!/usr/bin/env python3
# Copyright (c) 2015-2016 The Lkcoinse Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
import test_framework.loginit
from test_framework.mininode import *
from test_framework.test_framework import LkcoinseTestFramework
from test_framework.util import *
import time
from test_framework.blocktools import create_block, create_coinbase
'''
AcceptBlockTest -- test processing of unrequested blocks.
Since behavior differs when receiving unrequested blocks from whitelisted peers
versus non-whitelisted peers, this tests the behavior of both (effectively two
separate tests running in parallel).
Setup: two nodes, node0 and node1, not connected to each other. Node0 does not
whitelist localhost, but node1 does. They will each be on their own chain for
this test.
We have one NodeConn connection to each, test_node and white_node respectively.
The test:
1. Generate one block on each node, to leave IBD.
2. Mine a new block on each tip, and deliver to each node from node's peer.
The tip should advance.
3. Mine a block that forks the previous block, and deliver to each node from
corresponding peer.
Node0 should not process this block (just accept the header), because it is
unrequested and doesn't have more work than the tip.
Node1 should process because this is coming from a whitelisted peer.
4. Send another block that builds on the forking block.
Node0 should process this block but be stuck on the shorter chain, because
it's missing an intermediate block.
Node1 should reorg to this longer chain.
4b.Send 288 more blocks on the longer chain.
Node0 should process all but the last block (too far ahead in height).
Send all headers to Node1, and then send the last block in that chain.
Node1 should accept the block because it's coming from a whitelisted peer.
5. Send a duplicate of the block in #3 to Node0.
Node0 should not process the block because it is unrequested, and stay on
the shorter chain.
6. Send Node0 an inv for the height 3 block produced in #4 above.
Node0 should figure out that Node0 has the missing height 2 block and send a
getdata.
7. Send Node0 the missing block again.
Node0 should process and the tip should advance.
'''
# TestNode: bare-bones "peer". Used mostly as a conduit for a test to sending
# p2p messages to a node, generating the messages in the main testing logic.
class TestNode(NodeConnCB):
def __init__(self):
NodeConnCB.__init__(self)
self.connection = None
self.ping_counter = 1
self.last_pong = msg_pong()
def add_connection(self, conn):
self.connection = conn
# Track the last getdata message we receive (used in the test)
def on_getdata(self, conn, message):
self.last_getdata = message
# Spin until verack message is received from the node.
# We use this to signal that our test can begin. This
# is called from the testing thread, so it needs to acquire
# the global lock.
def wait_for_verack(self):
while True:
with mininode_lock:
if self.verack_received:
return
time.sleep(0.05)
# Wrapper for the NodeConn's send_message function
def send_message(self, message):
self.connection.send_message(message)
def on_pong(self, conn, message):
self.last_pong = message
# Sync up with the node after delivery of a block
def sync_with_ping(self, timeout=30):
self.connection.send_message(msg_ping(nonce=self.ping_counter))
received_pong = False
sleep_time = 0.05
while not received_pong and timeout > 0:
time.sleep(sleep_time)
timeout -= sleep_time
with mininode_lock:
if self.last_pong.nonce == self.ping_counter:
received_pong = True
self.ping_counter += 1
return received_pong
class AcceptBlockTest(LkcoinseTestFramework):
def add_options(self, parser):
parser.add_option("--testbinary", dest="testbinary",
default=os.getenv("LKCOINSED", "lkcoinsed"),
help="lkcoinsed binary to test")
def setup_chain(self):
initialize_chain_clean(self.options.tmpdir, 2)
def setup_network(self):
# Node0 will be used to test behavior of processing unrequested blocks
# from peers which are not whitelisted, while Node1 will be used for
# the whitelisted case.
self.nodes = []
self.nodes.append(start_node(0, self.options.tmpdir, ["-debug"],
binary=self.options.testbinary))
self.nodes.append(start_node(1, self.options.tmpdir,
["-debug", "-whitelist=127.0.0.1"],
binary=self.options.testbinary))
def run_test(self):
# Setup the p2p connections and start up the network thread.
test_node = TestNode() # connects to node0 (not whitelisted)
white_node = TestNode() # connects to node1 (whitelisted)
connections = []
connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], test_node))
connections.append(NodeConn('127.0.0.1', p2p_port(1), self.nodes[1], white_node))
test_node.add_connection(connections[0])
white_node.add_connection(connections[1])
NetworkThread().start() # Start up network handling in another thread
# Test logic begins here
test_node.wait_for_verack()
white_node.wait_for_verack()
# 1. Have both nodes mine a block (leave IBD)
[ n.generate(1) for n in self.nodes ]
tips = [ int("0x" + n.getbestblockhash(), 0) for n in self.nodes ]
# 2. Send one block that builds on each tip.
# This should be accepted.
blocks_h2 = [] # the height 2 blocks on each node's chain
block_time = int(time.time()) + 1
for i in range(2):
blocks_h2.append(create_block(tips[i], create_coinbase(2), block_time))
blocks_h2[i].solve()
block_time += 1
test_node.send_message(msg_block(blocks_h2[0]))
white_node.send_message(msg_block(blocks_h2[1]))
[ x.sync_with_ping() for x in [test_node, white_node] ]
assert_equal(self.nodes[0].getblockcount(), 2)
assert_equal(self.nodes[1].getblockcount(), 2)
print("First height 2 block accepted by both nodes")
# 3. Send another block that builds on the original tip.
blocks_h2f = [] # Blocks at height 2 that fork off the main chain
for i in range(2):
blocks_h2f.append(create_block(tips[i], create_coinbase(2), blocks_h2[i].nTime+1))
blocks_h2f[i].solve()
test_node.send_message(msg_block(blocks_h2f[0]))
white_node.send_message(msg_block(blocks_h2f[1]))
[ x.sync_with_ping() for x in [test_node, white_node] ]
for x in self.nodes[0].getchaintips():
if x['hash'] == blocks_h2f[0].hash:
assert_equal(x['status'], "headers-only")
for x in self.nodes[1].getchaintips():
if x['hash'] == blocks_h2f[1].hash:
assert_equal(x['status'], "valid-headers")
print("Second height 2 block accepted only from whitelisted peer")
# 4. Now send another block that builds on the forking chain.
blocks_h3 = []
for i in range(2):
blocks_h3.append(create_block(blocks_h2f[i].sha256, create_coinbase(3), blocks_h2f[i].nTime+1))
blocks_h3[i].solve()
test_node.send_message(msg_block(blocks_h3[0]))
white_node.send_message(msg_block(blocks_h3[1]))
[ x.sync_with_ping() for x in [test_node, white_node] ]
# Since the earlier block was not processed by node0, the new block
# can't be fully validated.
for x in self.nodes[0].getchaintips():
if x['hash'] == blocks_h3[0].hash:
assert_equal(x['status'], "headers-only")
# But this block should be accepted by node0 since it has more work.
try:
self.nodes[0].getblock(blocks_h3[0].hash)
print("Unrequested more-work block accepted from non-whitelisted peer")
except:
raise AssertionError("Unrequested more work block was not processed")
# Node1 should have accepted and reorged.
assert_equal(self.nodes[1].getblockcount(), 3)
print("Successfully reorged to length 3 chain from whitelisted peer")
# 4b. Now mine 288 more blocks and deliver; all should be processed but
# the last (height-too-high) on node0. Node1 should process the tip if
# we give it the headers chain leading to the tip.
tips = blocks_h3
headers_message = msg_headers()
all_blocks = [] # node0's blocks
for j in range(2):
for i in range(288):
next_block = create_block(tips[j].sha256, create_coinbase(i + 4), tips[j].nTime+1)
next_block.solve()
if j==0:
test_node.send_message(msg_block(next_block))
all_blocks.append(next_block)
else:
headers_message.headers.append(CBlockHeader(next_block))
tips[j] = next_block
test_node.sync_with_ping()
time.sleep(2)
for x in all_blocks:
try:
self.nodes[0].getblock(x.hash)
if x == all_blocks[287]:
raise AssertionError("Unrequested block too far-ahead should have been ignored")
except:
if x == all_blocks[287]:
print("Unrequested block too far-ahead not processed")
else:
raise AssertionError("Unrequested block with more work should have been accepted")
headers_message.headers.pop() # Ensure the last block is unrequested
white_node.send_message(headers_message) # Send headers leading to tip
white_node.send_message(msg_block(tips[1])) # Now deliver the tip
try:
white_node.sync_with_ping()
self.nodes[1].getblock(tips[1].hash)
print("Unrequested block far ahead of tip accepted from whitelisted peer")
except:
raise AssertionError("Unrequested block from whitelisted peer not accepted")
# 5. Test handling of unrequested block on the node that didn't process
# Should still not be processed (even though it has a child that has more
# work).
test_node.send_message(msg_block(blocks_h2f[0]))
# Here, if the sleep is too short, the test could falsely succeed (if the
# node hasn't processed the block by the time the sleep returns, and then
# the node processes it and incorrectly advances the tip).
# But this would be caught later on, when we verify that an inv triggers
# a getdata request for this block.
test_node.sync_with_ping()
assert_equal(self.nodes[0].getblockcount(), 2)
print("Unrequested block that would complete more-work chain was ignored")
# 6. Try to get node to request the missing block.
# Poke the node with an inv for block at height 3 and see if that
# triggers a getdata on block 2 (it should if block 2 is missing).
with mininode_lock:
# Clear state so we can check the getdata request
test_node.last_getdata = None
test_node.send_message(msg_inv([CInv(2, blocks_h3[0].sha256)]))
test_node.sync_with_ping()
with mininode_lock:
getdata = test_node.last_getdata
# Check that the getdata includes the right block
assert_equal(getdata.inv[0].hash, blocks_h2f[0].sha256)
print("Inv at tip triggered getdata for unprocessed block")
# 7. Send the missing block for the third time (now it is requested)
test_node.send_message(msg_block(blocks_h2f[0]))
test_node.sync_with_ping()
# Wait for the reorg to complete. It can be slower on some systems.
while self.nodes[0].getblockcount() != 290:
time.sleep(1)
j = j + 1
if (j > 60):
break
assert_equal(self.nodes[0].getblockcount(), 290)
print("Successfully reorged to longer chain from non-whitelisted peer")
[ c.disconnect_node() for c in connections ]
if __name__ == '__main__':
AcceptBlockTest().main()
| [
"lycion@gmail.com"
] | lycion@gmail.com |
948f743d6e0bc327c25c496c6faeecf388f602ac | 569970f62eca0ff81e1f3aaca7f14c08021fa1a3 | /example1.py | 459893bf3f7a622ea4c88414a80137553c732951 | [
"MIT"
] | permissive | u9n/enron-modbus | c958c0af7408dcbc6efe3b526b55eba694e63c98 | 68418a07dbbd8b6e4763e4d1cb8eb2915ff040d7 | refs/heads/main | 2023-08-17T01:07:55.522410 | 2023-08-08T12:33:17 | 2023-08-08T12:33:17 | 377,763,911 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 594 | py | from enron_modbus.client import EnronModbusClient
from enron_modbus.transports import SerialTransport
transport = SerialTransport(port="/dev/tty.usbserial", baudrate=9600)
client = EnronModbusClient(transport=transport)
client.connect()
print(client.read_numerics(1, 5160, 6))
print(client.read_numeric(1, 5160))
print(client.read_booleans(1, 1010, 2))
print(client.read_booleans(1, 1010, 33))
print(client.read_boolean(1, 1010))
client.write_boolean(1, 1010, True)
print(client.read_boolean(1, 1010))
client.write_numeric(1, 7001, 46)
print(client.read_numeric(1, 7001))
client.disconnect()
| [
"henrik@pwit.se"
] | henrik@pwit.se |
ce5dade7d36a431e3ec81dade64648f6c22eca35 | 7832e7dc8f1583471af9c08806ce7f1117cd228a | /aliyun-python-sdk-emr/aliyunsdkemr/request/v20160408/RunClusterServiceActionRequest.py | eb1c959505c70fd4e06aa43388665c4d9f9b06a3 | [
"Apache-2.0"
] | permissive | dianplus/aliyun-openapi-python-sdk | d6494850ddf0e66aaf04607322f353df32959725 | 6edf1ed02994245dae1d1b89edc6cce7caa51622 | refs/heads/master | 2023-04-08T11:35:36.216404 | 2017-11-02T12:01:15 | 2017-11-02T12:01:15 | 109,257,597 | 0 | 0 | NOASSERTION | 2023-03-23T17:59:30 | 2017-11-02T11:44:27 | Python | UTF-8 | Python | false | false | 3,508 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
class RunClusterServiceActionRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Emr', '2016-04-08', 'RunClusterServiceAction')
def get_ResourceOwnerId(self):
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self,ResourceOwnerId):
self.add_query_param('ResourceOwnerId',ResourceOwnerId)
def get_ClusterId(self):
return self.get_query_params().get('ClusterId')
def set_ClusterId(self,ClusterId):
self.add_query_param('ClusterId',ClusterId)
def get_HostIdList(self):
return self.get_query_params().get('HostIdList')
def set_HostIdList(self,HostIdList):
self.add_query_param('HostIdList',HostIdList)
def get_ServiceName(self):
return self.get_query_params().get('ServiceName')
def set_ServiceName(self,ServiceName):
self.add_query_param('ServiceName',ServiceName)
def get_ServiceActionName(self):
return self.get_query_params().get('ServiceActionName')
def set_ServiceActionName(self,ServiceActionName):
self.add_query_param('ServiceActionName',ServiceActionName)
def get_CustomCommand(self):
return self.get_query_params().get('CustomCommand')
def set_CustomCommand(self,CustomCommand):
self.add_query_param('CustomCommand',CustomCommand)
def get_ComponentNameList(self):
return self.get_query_params().get('ComponentNameList')
def set_ComponentNameList(self,ComponentNameList):
self.add_query_param('ComponentNameList',ComponentNameList)
def get_Comment(self):
return self.get_query_params().get('Comment')
def set_Comment(self,Comment):
self.add_query_param('Comment',Comment)
def get_IsRolling(self):
return self.get_query_params().get('IsRolling')
def set_IsRolling(self,IsRolling):
self.add_query_param('IsRolling',IsRolling)
def get_NodeCountPerBatch(self):
return self.get_query_params().get('NodeCountPerBatch')
def set_NodeCountPerBatch(self,NodeCountPerBatch):
self.add_query_param('NodeCountPerBatch',NodeCountPerBatch)
def get_TotlerateFailCount(self):
return self.get_query_params().get('TotlerateFailCount')
def set_TotlerateFailCount(self,TotlerateFailCount):
self.add_query_param('TotlerateFailCount',TotlerateFailCount)
def get_OnlyRestartStaleConfigNodes(self):
return self.get_query_params().get('OnlyRestartStaleConfigNodes')
def set_OnlyRestartStaleConfigNodes(self,OnlyRestartStaleConfigNodes):
self.add_query_param('OnlyRestartStaleConfigNodes',OnlyRestartStaleConfigNodes)
def get_TurnOnMaintenanceMode(self):
return self.get_query_params().get('TurnOnMaintenanceMode')
def set_TurnOnMaintenanceMode(self,TurnOnMaintenanceMode):
self.add_query_param('TurnOnMaintenanceMode',TurnOnMaintenanceMode) | [
"haowei.yao@alibaba-inc.com"
] | haowei.yao@alibaba-inc.com |
1d0914e57305a093924e35492cdf55af4109608e | 5805bf03876af7f32e75fbc467b257768676ce42 | /compadre-appengine/compadre-server/run.py | 5b81242d833dbdb0fafb4183df8d100cd0b475bd | [] | no_license | mtaziz/Compadre | 53ce8c717674886cadb16c81f2d87bdc7a11fe78 | 83473b404597dcf7140e8a435ac4c1e4d894020f | refs/heads/master | 2021-01-17T15:54:39.716474 | 2016-06-20T16:57:01 | 2016-06-20T16:57:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 104 | py | import os
import sys
sys.path.insert(1, os.path.join(os.path.abspath('.'), 'lib'))
import application
| [
"yonatano@gmail.com"
] | yonatano@gmail.com |
91f7b4d2efaf48ed26bfcc96e2670ac062a664fe | 6515c886cc420539bed05b2250c76e1c6974e5da | /models/mxnet_resnet_50.py | 708dbb07c13c01468c1d3fe4962f17ca8206bfd6 | [] | no_license | yuanmengzhixing/pytorch_deep_metric_learning | a320fd4e8863b9b8c3768b61e46027ccfc2077ee | b57621355a49af89573447c72685694043548434 | refs/heads/master | 2020-03-22T23:10:11.622231 | 2018-03-11T08:02:56 | 2018-03-11T08:02:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 35,697 | py | import torch.nn as nn
import torch.nn.functional as F
import torch
import numpy as np
__weights_dict = dict()
pre_trained_path = '/home/zhengxiawu/project/pytorch_deep_metric_learning/pretrained_models/kit_pytorch.npy'
#pre_trained_path = '/home/zhengxiawu/deep_learning/model/mxnet_2_resnet/mx2pt_resnet_50.npy'
#pre_trained_path = '/home/zhengxiawu/project/pytorch_deep_metric_learning/pretrained_models/resnet_50.npy'
pre_trained_path = '/home/zhengxiawu/deep_learning/model/mxnet_2_resnet/resnet_50_pytorch.npy'
def load_weights():
try:
weights_dict = np.load(pre_trained_path).item()
except:
weights_dict = np.load(pre_trained_path, encoding='bytes').item()
return weights_dict
class mxnet_resnet_50(nn.Module):
def __init__(self, **kwargs):
super(mxnet_resnet_50, self).__init__()
num_class = kwargs['num_class']
if kwargs['pretrain']:
global __weights_dict
__weights_dict = load_weights()
self.conv1 = self.__conv(2, name='conv1', in_channels=3, out_channels=64, kernel_size=(7L, 7L), stride=(2L, 2L),
groups=1, bias=True)
self.bn_conv1 = self.__batch_normalization(2, 'bn_conv1', num_features=64, eps=9.99999974738e-05,
momentum=0.899999976158)
self.res2a_branch1 = self.__conv(2, name='res2a_branch1', in_channels=64, out_channels=256,
kernel_size=(1L, 1L), stride=(1L, 1L), groups=1, bias=False)
self.res2a_branch2a = self.__conv(2, name='res2a_branch2a', in_channels=64, out_channels=64,
kernel_size=(1L, 1L), stride=(1L, 1L), groups=1, bias=False)
self.bn2a_branch1 = self.__batch_normalization(2, 'bn2a_branch1', num_features=256, eps=9.99999974738e-05,
momentum=0.899999976158)
self.bn2a_branch2a = self.__batch_normalization(2, 'bn2a_branch2a', num_features=64, eps=9.99999974738e-05,
momentum=0.899999976158)
self.res2a_branch2b = self.__conv(2, name='res2a_branch2b', in_channels=64, out_channels=64,
kernel_size=(3L, 3L), stride=(1L, 1L), groups=1, bias=False)
self.bn2a_branch2b = self.__batch_normalization(2, 'bn2a_branch2b', num_features=64, eps=9.99999974738e-05,
momentum=0.899999976158)
self.res2a_branch2c = self.__conv(2, name='res2a_branch2c', in_channels=64, out_channels=256,
kernel_size=(1L, 1L), stride=(1L, 1L), groups=1, bias=False)
self.bn2a_branch2c = self.__batch_normalization(2, 'bn2a_branch2c', num_features=256, eps=9.99999974738e-05,
momentum=0.899999976158)
self.res2b_branch2a = self.__conv(2, name='res2b_branch2a', in_channels=256, out_channels=64,
kernel_size=(1L, 1L), stride=(1L, 1L), groups=1, bias=False)
self.bn2b_branch2a = self.__batch_normalization(2, 'bn2b_branch2a', num_features=64, eps=9.99999974738e-05,
momentum=0.899999976158)
self.res2b_branch2b = self.__conv(2, name='res2b_branch2b', in_channels=64, out_channels=64,
kernel_size=(3L, 3L), stride=(1L, 1L), groups=1, bias=False)
self.bn2b_branch2b = self.__batch_normalization(2, 'bn2b_branch2b', num_features=64, eps=9.99999974738e-05,
momentum=0.899999976158)
self.res2b_branch2c = self.__conv(2, name='res2b_branch2c', in_channels=64, out_channels=256,
kernel_size=(1L, 1L), stride=(1L, 1L), groups=1, bias=False)
self.bn2b_branch2c = self.__batch_normalization(2, 'bn2b_branch2c', num_features=256, eps=9.99999974738e-05,
momentum=0.899999976158)
self.res2c_branch2a = self.__conv(2, name='res2c_branch2a', in_channels=256, out_channels=64,
kernel_size=(1L, 1L), stride=(1L, 1L), groups=1, bias=False)
self.bn2c_branch2a = self.__batch_normalization(2, 'bn2c_branch2a', num_features=64, eps=9.99999974738e-05,
momentum=0.899999976158)
self.res2c_branch2b = self.__conv(2, name='res2c_branch2b', in_channels=64, out_channels=64,
kernel_size=(3L, 3L), stride=(1L, 1L), groups=1, bias=False)
self.bn2c_branch2b = self.__batch_normalization(2, 'bn2c_branch2b', num_features=64, eps=9.99999974738e-05,
momentum=0.899999976158)
self.res2c_branch2c = self.__conv(2, name='res2c_branch2c', in_channels=64, out_channels=256,
kernel_size=(1L, 1L), stride=(1L, 1L), groups=1, bias=False)
self.bn2c_branch2c = self.__batch_normalization(2, 'bn2c_branch2c', num_features=256, eps=9.99999974738e-05,
momentum=0.899999976158)
self.res3a_branch1 = self.__conv(2, name='res3a_branch1', in_channels=256, out_channels=512,
kernel_size=(1L, 1L), stride=(2L, 2L), groups=1, bias=False)
self.res3a_branch2a = self.__conv(2, name='res3a_branch2a', in_channels=256, out_channels=128,
kernel_size=(1L, 1L), stride=(2L, 2L), groups=1, bias=False)
self.bn3a_branch1 = self.__batch_normalization(2, 'bn3a_branch1', num_features=512, eps=9.99999974738e-05,
momentum=0.899999976158)
self.bn3a_branch2a = self.__batch_normalization(2, 'bn3a_branch2a', num_features=128, eps=9.99999974738e-05,
momentum=0.899999976158)
self.res3a_branch2b = self.__conv(2, name='res3a_branch2b', in_channels=128, out_channels=128,
kernel_size=(3L, 3L), stride=(1L, 1L), groups=1, bias=False)
self.bn3a_branch2b = self.__batch_normalization(2, 'bn3a_branch2b', num_features=128, eps=9.99999974738e-05,
momentum=0.899999976158)
self.res3a_branch2c = self.__conv(2, name='res3a_branch2c', in_channels=128, out_channels=512,
kernel_size=(1L, 1L), stride=(1L, 1L), groups=1, bias=False)
self.bn3a_branch2c = self.__batch_normalization(2, 'bn3a_branch2c', num_features=512, eps=9.99999974738e-05,
momentum=0.899999976158)
self.res3b_branch2a = self.__conv(2, name='res3b_branch2a', in_channels=512, out_channels=128,
kernel_size=(1L, 1L), stride=(1L, 1L), groups=1, bias=False)
self.bn3b_branch2a = self.__batch_normalization(2, 'bn3b_branch2a', num_features=128, eps=9.99999974738e-05,
momentum=0.899999976158)
self.res3b_branch2b = self.__conv(2, name='res3b_branch2b', in_channels=128, out_channels=128,
kernel_size=(3L, 3L), stride=(1L, 1L), groups=1, bias=False)
self.bn3b_branch2b = self.__batch_normalization(2, 'bn3b_branch2b', num_features=128, eps=9.99999974738e-05,
momentum=0.899999976158)
self.res3b_branch2c = self.__conv(2, name='res3b_branch2c', in_channels=128, out_channels=512,
kernel_size=(1L, 1L), stride=(1L, 1L), groups=1, bias=False)
self.bn3b_branch2c = self.__batch_normalization(2, 'bn3b_branch2c', num_features=512, eps=9.99999974738e-05,
momentum=0.899999976158)
self.res3c_branch2a = self.__conv(2, name='res3c_branch2a', in_channels=512, out_channels=128,
kernel_size=(1L, 1L), stride=(1L, 1L), groups=1, bias=False)
self.bn3c_branch2a = self.__batch_normalization(2, 'bn3c_branch2a', num_features=128, eps=9.99999974738e-05,
momentum=0.899999976158)
self.res3c_branch2b = self.__conv(2, name='res3c_branch2b', in_channels=128, out_channels=128,
kernel_size=(3L, 3L), stride=(1L, 1L), groups=1, bias=False)
self.bn3c_branch2b = self.__batch_normalization(2, 'bn3c_branch2b', num_features=128, eps=9.99999974738e-05,
momentum=0.899999976158)
self.res3c_branch2c = self.__conv(2, name='res3c_branch2c', in_channels=128, out_channels=512,
kernel_size=(1L, 1L), stride=(1L, 1L), groups=1, bias=False)
self.bn3c_branch2c = self.__batch_normalization(2, 'bn3c_branch2c', num_features=512, eps=9.99999974738e-05,
momentum=0.899999976158)
self.res3d_branch2a = self.__conv(2, name='res3d_branch2a', in_channels=512, out_channels=128,
kernel_size=(1L, 1L), stride=(1L, 1L), groups=1, bias=False)
self.bn3d_branch2a = self.__batch_normalization(2, 'bn3d_branch2a', num_features=128, eps=9.99999974738e-05,
momentum=0.899999976158)
self.res3d_branch2b = self.__conv(2, name='res3d_branch2b', in_channels=128, out_channels=128,
kernel_size=(3L, 3L), stride=(1L, 1L), groups=1, bias=False)
self.bn3d_branch2b = self.__batch_normalization(2, 'bn3d_branch2b', num_features=128, eps=9.99999974738e-05,
momentum=0.899999976158)
self.res3d_branch2c = self.__conv(2, name='res3d_branch2c', in_channels=128, out_channels=512,
kernel_size=(1L, 1L), stride=(1L, 1L), groups=1, bias=False)
self.bn3d_branch2c = self.__batch_normalization(2, 'bn3d_branch2c', num_features=512, eps=9.99999974738e-05,
momentum=0.899999976158)
self.res4a_branch1 = self.__conv(2, name='res4a_branch1', in_channels=512, out_channels=1024,
kernel_size=(1L, 1L), stride=(2L, 2L), groups=1, bias=False)
self.res4a_branch2a = self.__conv(2, name='res4a_branch2a', in_channels=512, out_channels=256,
kernel_size=(1L, 1L), stride=(2L, 2L), groups=1, bias=False)
self.bn4a_branch1 = self.__batch_normalization(2, 'bn4a_branch1', num_features=1024, eps=9.99999974738e-05,
momentum=0.899999976158)
self.bn4a_branch2a = self.__batch_normalization(2, 'bn4a_branch2a', num_features=256, eps=9.99999974738e-05,
momentum=0.899999976158)
self.res4a_branch2b = self.__conv(2, name='res4a_branch2b', in_channels=256, out_channels=256,
kernel_size=(3L, 3L), stride=(1L, 1L), groups=1, bias=False)
self.bn4a_branch2b = self.__batch_normalization(2, 'bn4a_branch2b', num_features=256, eps=9.99999974738e-05,
momentum=0.899999976158)
self.res4a_branch2c = self.__conv(2, name='res4a_branch2c', in_channels=256, out_channels=1024,
kernel_size=(1L, 1L), stride=(1L, 1L), groups=1, bias=False)
self.bn4a_branch2c = self.__batch_normalization(2, 'bn4a_branch2c', num_features=1024, eps=9.99999974738e-05,
momentum=0.899999976158)
self.res4b_branch2a = self.__conv(2, name='res4b_branch2a', in_channels=1024, out_channels=256,
kernel_size=(1L, 1L), stride=(1L, 1L), groups=1, bias=False)
self.bn4b_branch2a = self.__batch_normalization(2, 'bn4b_branch2a', num_features=256, eps=9.99999974738e-05,
momentum=0.899999976158)
self.res4b_branch2b = self.__conv(2, name='res4b_branch2b', in_channels=256, out_channels=256,
kernel_size=(3L, 3L), stride=(1L, 1L), groups=1, bias=False)
self.bn4b_branch2b = self.__batch_normalization(2, 'bn4b_branch2b', num_features=256, eps=9.99999974738e-05,
momentum=0.899999976158)
self.res4b_branch2c = self.__conv(2, name='res4b_branch2c', in_channels=256, out_channels=1024,
kernel_size=(1L, 1L), stride=(1L, 1L), groups=1, bias=False)
self.bn4b_branch2c = self.__batch_normalization(2, 'bn4b_branch2c', num_features=1024, eps=9.99999974738e-05,
momentum=0.899999976158)
self.res4c_branch2a = self.__conv(2, name='res4c_branch2a', in_channels=1024, out_channels=256,
kernel_size=(1L, 1L), stride=(1L, 1L), groups=1, bias=False)
self.bn4c_branch2a = self.__batch_normalization(2, 'bn4c_branch2a', num_features=256, eps=9.99999974738e-05,
momentum=0.899999976158)
self.res4c_branch2b = self.__conv(2, name='res4c_branch2b', in_channels=256, out_channels=256,
kernel_size=(3L, 3L), stride=(1L, 1L), groups=1, bias=False)
self.bn4c_branch2b = self.__batch_normalization(2, 'bn4c_branch2b', num_features=256, eps=9.99999974738e-05,
momentum=0.899999976158)
self.res4c_branch2c = self.__conv(2, name='res4c_branch2c', in_channels=256, out_channels=1024,
kernel_size=(1L, 1L), stride=(1L, 1L), groups=1, bias=False)
self.bn4c_branch2c = self.__batch_normalization(2, 'bn4c_branch2c', num_features=1024, eps=9.99999974738e-05,
momentum=0.899999976158)
self.res4d_branch2a = self.__conv(2, name='res4d_branch2a', in_channels=1024, out_channels=256,
kernel_size=(1L, 1L), stride=(1L, 1L), groups=1, bias=False)
self.bn4d_branch2a = self.__batch_normalization(2, 'bn4d_branch2a', num_features=256, eps=9.99999974738e-05,
momentum=0.899999976158)
self.res4d_branch2b = self.__conv(2, name='res4d_branch2b', in_channels=256, out_channels=256,
kernel_size=(3L, 3L), stride=(1L, 1L), groups=1, bias=False)
self.bn4d_branch2b = self.__batch_normalization(2, 'bn4d_branch2b', num_features=256, eps=9.99999974738e-05,
momentum=0.899999976158)
self.res4d_branch2c = self.__conv(2, name='res4d_branch2c', in_channels=256, out_channels=1024,
kernel_size=(1L, 1L), stride=(1L, 1L), groups=1, bias=False)
self.bn4d_branch2c = self.__batch_normalization(2, 'bn4d_branch2c', num_features=1024, eps=9.99999974738e-05,
momentum=0.899999976158)
self.res4e_branch2a = self.__conv(2, name='res4e_branch2a', in_channels=1024, out_channels=256,
kernel_size=(1L, 1L), stride=(1L, 1L), groups=1, bias=False)
self.bn4e_branch2a = self.__batch_normalization(2, 'bn4e_branch2a', num_features=256, eps=9.99999974738e-05,
momentum=0.899999976158)
self.res4e_branch2b = self.__conv(2, name='res4e_branch2b', in_channels=256, out_channels=256,
kernel_size=(3L, 3L), stride=(1L, 1L), groups=1, bias=False)
self.bn4e_branch2b = self.__batch_normalization(2, 'bn4e_branch2b', num_features=256, eps=9.99999974738e-05,
momentum=0.899999976158)
self.res4e_branch2c = self.__conv(2, name='res4e_branch2c', in_channels=256, out_channels=1024,
kernel_size=(1L, 1L), stride=(1L, 1L), groups=1, bias=False)
self.bn4e_branch2c = self.__batch_normalization(2, 'bn4e_branch2c', num_features=1024, eps=9.99999974738e-05,
momentum=0.899999976158)
self.res4f_branch2a = self.__conv(2, name='res4f_branch2a', in_channels=1024, out_channels=256,
kernel_size=(1L, 1L), stride=(1L, 1L), groups=1, bias=False)
self.bn4f_branch2a = self.__batch_normalization(2, 'bn4f_branch2a', num_features=256, eps=9.99999974738e-05,
momentum=0.899999976158)
self.res4f_branch2b = self.__conv(2, name='res4f_branch2b', in_channels=256, out_channels=256,
kernel_size=(3L, 3L), stride=(1L, 1L), groups=1, bias=False)
self.bn4f_branch2b = self.__batch_normalization(2, 'bn4f_branch2b', num_features=256, eps=9.99999974738e-05,
momentum=0.899999976158)
self.res4f_branch2c = self.__conv(2, name='res4f_branch2c', in_channels=256, out_channels=1024,
kernel_size=(1L, 1L), stride=(1L, 1L), groups=1, bias=False)
self.bn4f_branch2c = self.__batch_normalization(2, 'bn4f_branch2c', num_features=1024, eps=9.99999974738e-05,
momentum=0.899999976158)
self.res5a_branch1 = self.__conv(2, name='res5a_branch1', in_channels=1024, out_channels=2048,
kernel_size=(1L, 1L), stride=(2L, 2L), groups=1, bias=False)
self.res5a_branch2a = self.__conv(2, name='res5a_branch2a', in_channels=1024, out_channels=512,
kernel_size=(1L, 1L), stride=(2L, 2L), groups=1, bias=False)
self.bn5a_branch1 = self.__batch_normalization(2, 'bn5a_branch1', num_features=2048, eps=9.99999974738e-05,
momentum=0.899999976158)
self.bn5a_branch2a = self.__batch_normalization(2, 'bn5a_branch2a', num_features=512, eps=9.99999974738e-05,
momentum=0.899999976158)
self.res5a_branch2b = self.__conv(2, name='res5a_branch2b', in_channels=512, out_channels=512,
kernel_size=(3L, 3L), stride=(1L, 1L), groups=1, bias=False)
self.bn5a_branch2b = self.__batch_normalization(2, 'bn5a_branch2b', num_features=512, eps=9.99999974738e-05,
momentum=0.899999976158)
self.res5a_branch2c = self.__conv(2, name='res5a_branch2c', in_channels=512, out_channels=2048,
kernel_size=(1L, 1L), stride=(1L, 1L), groups=1, bias=False)
self.bn5a_branch2c = self.__batch_normalization(2, 'bn5a_branch2c', num_features=2048, eps=9.99999974738e-05,
momentum=0.899999976158)
self.res5b_branch2a = self.__conv(2, name='res5b_branch2a', in_channels=2048, out_channels=512,
kernel_size=(1L, 1L), stride=(1L, 1L), groups=1, bias=False)
self.bn5b_branch2a = self.__batch_normalization(2, 'bn5b_branch2a', num_features=512, eps=9.99999974738e-05,
momentum=0.899999976158)
self.res5b_branch2b = self.__conv(2, name='res5b_branch2b', in_channels=512, out_channels=512,
kernel_size=(3L, 3L), stride=(1L, 1L), groups=1, bias=False)
self.bn5b_branch2b = self.__batch_normalization(2, 'bn5b_branch2b', num_features=512, eps=9.99999974738e-05,
momentum=0.899999976158)
self.res5b_branch2c = self.__conv(2, name='res5b_branch2c', in_channels=512, out_channels=2048,
kernel_size=(1L, 1L), stride=(1L, 1L), groups=1, bias=False)
self.bn5b_branch2c = self.__batch_normalization(2, 'bn5b_branch2c', num_features=2048, eps=9.99999974738e-05,
momentum=0.899999976158)
self.res5c_branch2a = self.__conv(2, name='res5c_branch2a', in_channels=2048, out_channels=512,
kernel_size=(1L, 1L), stride=(1L, 1L), groups=1, bias=False)
self.bn5c_branch2a = self.__batch_normalization(2, 'bn5c_branch2a', num_features=512, eps=9.99999974738e-05,
momentum=0.899999976158)
self.res5c_branch2b = self.__conv(2, name='res5c_branch2b', in_channels=512, out_channels=512,
kernel_size=(3L, 3L), stride=(1L, 1L), groups=1, bias=False)
self.bn5c_branch2b = self.__batch_normalization(2, 'bn5c_branch2b', num_features=512, eps=9.99999974738e-05,
momentum=0.899999976158)
self.res5c_branch2c = self.__conv(2, name='res5c_branch2c', in_channels=512, out_channels=2048,
kernel_size=(1L, 1L), stride=(1L, 1L), groups=1, bias=False)
self.bn5c_branch2c = self.__batch_normalization(2, 'bn5c_branch2c', num_features=2048, eps=9.99999974738e-05,
momentum=0.899999976158)
self.class_fc = nn.Linear(4096, num_class)
nn.init.xavier_uniform(self.class_fc._parameters['weight'],gain=0.624)
nn.init.constant(self.class_fc._parameters['weight'],0)
def forward(self, x, **kwargs):
conv1_pad = F.pad(x, (3L, 3L, 3L, 3L))
conv1 = self.conv1(conv1_pad)
# conv1_numpy = conv1.data.cpu().numpy()
# param_numpy = self.conv1._parameters['weight'].data.cpu().numpy()
bn_conv1 = self.bn_conv1(conv1)
conv1_relu = F.relu(bn_conv1)
pool1 = F.max_pool2d(conv1_relu, kernel_size=(3L, 3L), stride=(2L, 2L))
res2a_branch1 = self.res2a_branch1(pool1)
res2a_branch2a = self.res2a_branch2a(pool1)
bn2a_branch1 = self.bn2a_branch1(res2a_branch1)
bn2a_branch2a = self.bn2a_branch2a(res2a_branch2a)
res2a_branch2a_relu = F.relu(bn2a_branch2a)
res2a_branch2b_pad = F.pad(res2a_branch2a_relu, (1L, 1L, 1L, 1L))
res2a_branch2b = self.res2a_branch2b(res2a_branch2b_pad)
bn2a_branch2b = self.bn2a_branch2b(res2a_branch2b)
res2a_branch2b_relu = F.relu(bn2a_branch2b)
res2a_branch2c = self.res2a_branch2c(res2a_branch2b_relu)
bn2a_branch2c = self.bn2a_branch2c(res2a_branch2c)
res2a = bn2a_branch1 + bn2a_branch2c
res2a_relu = F.relu(res2a)
res2b_branch2a = self.res2b_branch2a(res2a_relu)
bn2b_branch2a = self.bn2b_branch2a(res2b_branch2a)
res2b_branch2a_relu = F.relu(bn2b_branch2a)
res2b_branch2b_pad = F.pad(res2b_branch2a_relu, (1L, 1L, 1L, 1L))
res2b_branch2b = self.res2b_branch2b(res2b_branch2b_pad)
bn2b_branch2b = self.bn2b_branch2b(res2b_branch2b)
res2b_branch2b_relu = F.relu(bn2b_branch2b)
res2b_branch2c = self.res2b_branch2c(res2b_branch2b_relu)
bn2b_branch2c = self.bn2b_branch2c(res2b_branch2c)
res2b = res2a_relu + bn2b_branch2c
res2b_relu = F.relu(res2b)
res2c_branch2a = self.res2c_branch2a(res2b_relu)
bn2c_branch2a = self.bn2c_branch2a(res2c_branch2a)
res2c_branch2a_relu = F.relu(bn2c_branch2a)
res2c_branch2b_pad = F.pad(res2c_branch2a_relu, (1L, 1L, 1L, 1L))
res2c_branch2b = self.res2c_branch2b(res2c_branch2b_pad)
bn2c_branch2b = self.bn2c_branch2b(res2c_branch2b)
res2c_branch2b_relu = F.relu(bn2c_branch2b)
res2c_branch2c = self.res2c_branch2c(res2c_branch2b_relu)
bn2c_branch2c = self.bn2c_branch2c(res2c_branch2c)
res2c = res2b_relu + bn2c_branch2c
res2c_relu = F.relu(res2c)
res3a_branch1 = self.res3a_branch1(res2c_relu)
res3a_branch2a = self.res3a_branch2a(res2c_relu)
bn3a_branch1 = self.bn3a_branch1(res3a_branch1)
bn3a_branch2a = self.bn3a_branch2a(res3a_branch2a)
res3a_branch2a_relu = F.relu(bn3a_branch2a)
res3a_branch2b_pad = F.pad(res3a_branch2a_relu, (1L, 1L, 1L, 1L))
res3a_branch2b = self.res3a_branch2b(res3a_branch2b_pad)
bn3a_branch2b = self.bn3a_branch2b(res3a_branch2b)
res3a_branch2b_relu = F.relu(bn3a_branch2b)
res3a_branch2c = self.res3a_branch2c(res3a_branch2b_relu)
bn3a_branch2c = self.bn3a_branch2c(res3a_branch2c)
res3a = bn3a_branch1 + bn3a_branch2c
res3a_relu = F.relu(res3a)
res3b_branch2a = self.res3b_branch2a(res3a_relu)
bn3b_branch2a = self.bn3b_branch2a(res3b_branch2a)
res3b_branch2a_relu = F.relu(bn3b_branch2a)
res3b_branch2b_pad = F.pad(res3b_branch2a_relu, (1L, 1L, 1L, 1L))
res3b_branch2b = self.res3b_branch2b(res3b_branch2b_pad)
bn3b_branch2b = self.bn3b_branch2b(res3b_branch2b)
res3b_branch2b_relu = F.relu(bn3b_branch2b)
res3b_branch2c = self.res3b_branch2c(res3b_branch2b_relu)
bn3b_branch2c = self.bn3b_branch2c(res3b_branch2c)
res3b = res3a_relu + bn3b_branch2c
res3b_relu = F.relu(res3b)
res3c_branch2a = self.res3c_branch2a(res3b_relu)
bn3c_branch2a = self.bn3c_branch2a(res3c_branch2a)
res3c_branch2a_relu = F.relu(bn3c_branch2a)
res3c_branch2b_pad = F.pad(res3c_branch2a_relu, (1L, 1L, 1L, 1L))
res3c_branch2b = self.res3c_branch2b(res3c_branch2b_pad)
bn3c_branch2b = self.bn3c_branch2b(res3c_branch2b)
res3c_branch2b_relu = F.relu(bn3c_branch2b)
res3c_branch2c = self.res3c_branch2c(res3c_branch2b_relu)
bn3c_branch2c = self.bn3c_branch2c(res3c_branch2c)
res3c = res3b_relu + bn3c_branch2c
res3c_relu = F.relu(res3c)
res3d_branch2a = self.res3d_branch2a(res3c_relu)
bn3d_branch2a = self.bn3d_branch2a(res3d_branch2a)
res3d_branch2a_relu = F.relu(bn3d_branch2a)
res3d_branch2b_pad = F.pad(res3d_branch2a_relu, (1L, 1L, 1L, 1L))
res3d_branch2b = self.res3d_branch2b(res3d_branch2b_pad)
bn3d_branch2b = self.bn3d_branch2b(res3d_branch2b)
res3d_branch2b_relu = F.relu(bn3d_branch2b)
res3d_branch2c = self.res3d_branch2c(res3d_branch2b_relu)
bn3d_branch2c = self.bn3d_branch2c(res3d_branch2c)
res3d = res3c_relu + bn3d_branch2c
res3d_relu = F.relu(res3d)
res4a_branch1 = self.res4a_branch1(res3d_relu)
res4a_branch2a = self.res4a_branch2a(res3d_relu)
bn4a_branch1 = self.bn4a_branch1(res4a_branch1)
bn4a_branch2a = self.bn4a_branch2a(res4a_branch2a)
res4a_branch2a_relu = F.relu(bn4a_branch2a)
res4a_branch2b_pad = F.pad(res4a_branch2a_relu, (1L, 1L, 1L, 1L))
res4a_branch2b = self.res4a_branch2b(res4a_branch2b_pad)
bn4a_branch2b = self.bn4a_branch2b(res4a_branch2b)
res4a_branch2b_relu = F.relu(bn4a_branch2b)
res4a_branch2c = self.res4a_branch2c(res4a_branch2b_relu)
bn4a_branch2c = self.bn4a_branch2c(res4a_branch2c)
res4a = bn4a_branch1 + bn4a_branch2c
res4a_relu = F.relu(res4a)
res4b_branch2a = self.res4b_branch2a(res4a_relu)
bn4b_branch2a = self.bn4b_branch2a(res4b_branch2a)
res4b_branch2a_relu = F.relu(bn4b_branch2a)
res4b_branch2b_pad = F.pad(res4b_branch2a_relu, (1L, 1L, 1L, 1L))
res4b_branch2b = self.res4b_branch2b(res4b_branch2b_pad)
bn4b_branch2b = self.bn4b_branch2b(res4b_branch2b)
res4b_branch2b_relu = F.relu(bn4b_branch2b)
res4b_branch2c = self.res4b_branch2c(res4b_branch2b_relu)
bn4b_branch2c = self.bn4b_branch2c(res4b_branch2c)
res4b = res4a_relu + bn4b_branch2c
res4b_relu = F.relu(res4b)
res4c_branch2a = self.res4c_branch2a(res4b_relu)
bn4c_branch2a = self.bn4c_branch2a(res4c_branch2a)
res4c_branch2a_relu = F.relu(bn4c_branch2a)
res4c_branch2b_pad = F.pad(res4c_branch2a_relu, (1L, 1L, 1L, 1L))
res4c_branch2b = self.res4c_branch2b(res4c_branch2b_pad)
bn4c_branch2b = self.bn4c_branch2b(res4c_branch2b)
res4c_branch2b_relu = F.relu(bn4c_branch2b)
res4c_branch2c = self.res4c_branch2c(res4c_branch2b_relu)
bn4c_branch2c = self.bn4c_branch2c(res4c_branch2c)
res4c = res4b_relu + bn4c_branch2c
res4c_relu = F.relu(res4c)
res4d_branch2a = self.res4d_branch2a(res4c_relu)
bn4d_branch2a = self.bn4d_branch2a(res4d_branch2a)
res4d_branch2a_relu = F.relu(bn4d_branch2a)
res4d_branch2b_pad = F.pad(res4d_branch2a_relu, (1L, 1L, 1L, 1L))
res4d_branch2b = self.res4d_branch2b(res4d_branch2b_pad)
bn4d_branch2b = self.bn4d_branch2b(res4d_branch2b)
res4d_branch2b_relu = F.relu(bn4d_branch2b)
res4d_branch2c = self.res4d_branch2c(res4d_branch2b_relu)
bn4d_branch2c = self.bn4d_branch2c(res4d_branch2c)
res4d = res4c_relu + bn4d_branch2c
res4d_relu = F.relu(res4d)
res4e_branch2a = self.res4e_branch2a(res4d_relu)
bn4e_branch2a = self.bn4e_branch2a(res4e_branch2a)
res4e_branch2a_relu = F.relu(bn4e_branch2a)
res4e_branch2b_pad = F.pad(res4e_branch2a_relu, (1L, 1L, 1L, 1L))
res4e_branch2b = self.res4e_branch2b(res4e_branch2b_pad)
bn4e_branch2b = self.bn4e_branch2b(res4e_branch2b)
res4e_branch2b_relu = F.relu(bn4e_branch2b)
res4e_branch2c = self.res4e_branch2c(res4e_branch2b_relu)
bn4e_branch2c = self.bn4e_branch2c(res4e_branch2c)
res4e = res4d_relu + bn4e_branch2c
res4e_relu = F.relu(res4e)
res4f_branch2a = self.res4f_branch2a(res4e_relu)
bn4f_branch2a = self.bn4f_branch2a(res4f_branch2a)
res4f_branch2a_relu = F.relu(bn4f_branch2a)
res4f_branch2b_pad = F.pad(res4f_branch2a_relu, (1L, 1L, 1L, 1L))
res4f_branch2b = self.res4f_branch2b(res4f_branch2b_pad)
bn4f_branch2b = self.bn4f_branch2b(res4f_branch2b)
res4f_branch2b_relu = F.relu(bn4f_branch2b)
res4f_branch2c = self.res4f_branch2c(res4f_branch2b_relu)
bn4f_branch2c = self.bn4f_branch2c(res4f_branch2c)
res4f = res4e_relu + bn4f_branch2c
res4f_relu = F.relu(res4f)
res5a_branch1 = self.res5a_branch1(res4f_relu)
res5a_branch2a = self.res5a_branch2a(res4f_relu)
bn5a_branch1 = self.bn5a_branch1(res5a_branch1)
bn5a_branch2a = self.bn5a_branch2a(res5a_branch2a)
res5a_branch2a_relu = F.relu(bn5a_branch2a)
res5a_branch2b_pad = F.pad(res5a_branch2a_relu, (1L, 1L, 1L, 1L))
res5a_branch2b = self.res5a_branch2b(res5a_branch2b_pad)
bn5a_branch2b = self.bn5a_branch2b(res5a_branch2b)
res5a_branch2b_relu = F.relu(bn5a_branch2b)
res5a_branch2c = self.res5a_branch2c(res5a_branch2b_relu)
bn5a_branch2c = self.bn5a_branch2c(res5a_branch2c)
res5a = bn5a_branch1 + bn5a_branch2c
res5a_relu = F.relu(res5a)
res5b_branch2a = self.res5b_branch2a(res5a_relu)
bn5b_branch2a = self.bn5b_branch2a(res5b_branch2a)
res5b_branch2a_relu = F.relu(bn5b_branch2a)
res5b_branch2b_pad = F.pad(res5b_branch2a_relu, (1L, 1L, 1L, 1L))
res5b_branch2b = self.res5b_branch2b(res5b_branch2b_pad)
bn5b_branch2b = self.bn5b_branch2b(res5b_branch2b)
res5b_branch2b_relu = F.relu(bn5b_branch2b)
res5b_branch2c = self.res5b_branch2c(res5b_branch2b_relu)
bn5b_branch2c = self.bn5b_branch2c(res5b_branch2c)
res5b = res5a_relu + bn5b_branch2c
res5b_relu = F.relu(res5b)
res5c_branch2a = self.res5c_branch2a(res5b_relu)
bn5c_branch2a = self.bn5c_branch2a(res5c_branch2a)
res5c_branch2a_relu = F.relu(bn5c_branch2a)
res5c_branch2b_pad = F.pad(res5c_branch2a_relu, (1L, 1L, 1L, 1L))
res5c_branch2b = self.res5c_branch2b(res5c_branch2b_pad)
bn5c_branch2b = self.bn5c_branch2b(res5c_branch2b)
res5c_branch2b_relu = F.relu(bn5c_branch2b)
res5c_branch2c = self.res5c_branch2c(res5c_branch2b_relu)
bn5c_branch2c = self.bn5c_branch2c(res5c_branch2c)
res5c = res5b_relu + bn5c_branch2c
res5c_relu = F.relu(res5c)
if kwargs['scda']:
scda_x = torch.sum(res5c_relu,1,keepdim=True)
mean_x = torch.mean(scda_x.view(scda_x.size(0),-1),1,True)
scda_x = scda_x - mean_x
scda_x = scda_x>0
scda_x = scda_x.float()
res5c_relu = res5c_relu * scda_x
pooling0 = F.max_pool2d(input=res5c_relu, kernel_size=res5c_relu.size()[2:])
pooling1 = F.avg_pool2d(input=res5c_relu, kernel_size=res5c_relu.size()[2:])
flatten0 = pooling0.view(pooling0.size(0), -1)
flatten1 = pooling1.view(pooling1.size(0), -1)
avg_x = F.normalize(flatten1, p=2, dim=1)
max_x = F.normalize(flatten0, p=2, dim=1)
x = torch.cat((avg_x, max_x), dim=1)
# the last fc layer can be treat as distanc
# ree compute
x = x * kwargs['scale']
if kwargs['is_train']:
x = self.class_fc(x)
return x
@staticmethod
def __conv(dim, name, **kwargs):
if dim == 1:
layer = nn.Conv1d(**kwargs)
elif dim == 2:
layer = nn.Conv2d(**kwargs)
elif dim == 3:
layer = nn.Conv3d(**kwargs)
else:
raise NotImplementedError()
layer.state_dict()['weight'].copy_(torch.from_numpy(__weights_dict[name]['weights']))
if 'bias' in __weights_dict[name]:
layer.state_dict()['bias'].copy_(torch.from_numpy(__weights_dict[name]['bias']))
return layer
@staticmethod
def __batch_normalization(dim, name, **kwargs):
if dim == 1:
layer = nn.BatchNorm1d(**kwargs)
elif dim == 2:
layer = nn.BatchNorm2d(**kwargs)
elif dim == 3:
layer = nn.BatchNorm3d(**kwargs)
else:
raise NotImplementedError()
if 'scale' in __weights_dict[name]:
layer.state_dict()['weight'].copy_(torch.from_numpy(__weights_dict[name]['scale']))
else:
layer.weight.data.fill_(1)
if 'bias' in __weights_dict[name]:
layer.state_dict()['bias'].copy_(torch.from_numpy(__weights_dict[name]['bias']))
else:
layer.bias.data.fill_(0)
layer.state_dict()['running_mean'].copy_(torch.from_numpy(__weights_dict[name]['mean']))
layer.state_dict()['running_var'].copy_(torch.from_numpy(__weights_dict[name]['var']))
return layer
| [
"zhengxiawu@126.com"
] | zhengxiawu@126.com |
ad833abfa7ebc2a0c6d29bf5cc8502e93504ecf0 | 115dfb558763fe51bc6d065435e4181057907dee | /lintcode/空格替换.py | 4bc2be3615ae4a137bf54514b44b36f8585a3a6f | [] | no_license | StSphinx/leetcode | a8f328f9f409ca57d56f2054598e2024c578c0a6 | 699e8ead451b570aed22d3a705a5ecbb7178cea1 | refs/heads/master | 2020-09-22T02:49:27.327193 | 2019-05-22T03:46:12 | 2019-05-22T03:46:12 | 37,531,684 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 510 | py | # -*- coding:utf-8 -*-
__author__ = 'Muming'
class Solution:
# @param {char[]} string: An array of Char
# @param {int} length: The true length of the string
# @return {int} The true length of new string
def replaceBlank(self, string, length):
# Write your code here
for k, v in enumerate(string):
if v == ' ':
string[k] = '%20'
length += 3
return length, string
so = Solution()
print so.replaceBlank(list("abcd efg hij"), 12) | [
"zhhljdb6014@gmail.com"
] | zhhljdb6014@gmail.com |
f44cb8ff8292065f395502b902384d138c5b9281 | f09ef05dbc335a095c3652089bd98c9ba2f0e1de | /project_model/blog/migrations/0004_blogpost_posted_by.py | 442e65869d57a4228993da0add9fc0dbedf1a905 | [] | no_license | Dzinsyah/DJANGO_MVC | be45e2900203717dba51c1221f899d1c54324c36 | 5dea4d592ffecd1918e78c2aae185076a4e21fa4 | refs/heads/master | 2020-04-22T02:50:53.661662 | 2019-02-13T11:58:45 | 2019-02-13T11:58:45 | 170,065,027 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 505 | py | # Generated by Django 2.1.5 on 2019-02-11 07:48
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('blog', '0003_blogpost'),
]
operations = [
migrations.AddField(
model_name='blogpost',
name='posted_by',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to='blog.Mentee'),
preserve_default=False,
),
]
| [
"dzinsyah@alphatech.id"
] | dzinsyah@alphatech.id |
59394e931356467d27d5d2fb4d1f788d461283b7 | 026c31969fe43e3d14029a8dbf7b4e3e435a06d2 | /users/tests/test_admin.py | 7c8be906cd6030b7fe983ed5bab088036da8a00c | [
"MIT"
] | permissive | victorfsf/github-monitor | db83c9f2ad0e725f6adbfa2c56c299aeadd5bc3a | 7192827d44ca616c0914864770f8f7910cbe55b3 | refs/heads/master | 2020-03-08T00:44:30.341022 | 2018-04-09T23:43:12 | 2018-04-09T23:43:12 | 127,812,813 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 846 | py | from django.test import TestCase
from django.test.client import RequestFactory
from model_mommy import mommy
from common.site import GithubMonitorAdminSite
from users.admin import GithubUserAdmin
class TestGithubUserAdmin(TestCase):
def setUp(self):
factory = RequestFactory()
self.user = mommy.make('users.User', username='test_username')
self.github_user = mommy.make('users.GithubUser', user=self.user)
self.admin = GithubUserAdmin(
self.github_user, GithubMonitorAdminSite()
)
self.request = factory.get('/')
def test_get_username(self):
expected = self.user.username
username = self.admin.get_username(self.github_user)
self.assertEqual(expected, username)
def tearDown(self):
self.github_user.delete()
self.user.delete()
| [
"victorfsf.dev@gmail.com"
] | victorfsf.dev@gmail.com |
84eb65886a58255c2d3c09f27ece27c9be43b5bf | d55deb7b26277a647aff5887cdbe65f002035ac7 | /jobs/models.py | b2cee414ec2ee17dd8d1ae2de7a8ea7258a12eae | [
"MIT"
] | permissive | cyndi088/recruitment | 0af95ef6028c678e49952752e65dd7e58fd9799f | e25e29b8b6724f1dce7b9ed5d9efb409744907c0 | refs/heads/main | 2023-01-22T18:38:55.403025 | 2020-12-03T06:19:16 | 2020-12-03T06:19:16 | 318,094,536 | 0 | 0 | MIT | 2020-12-03T06:19:17 | 2020-12-03T06:13:45 | Python | UTF-8 | Python | false | false | 1,226 | py | from django.db import models
from datetime import datetime
from django.contrib.auth.models import User
# Create your models here.
JobTypes = [
(0, "技术类"),
(1, "产品类"),
(2, "运营类"),
(3, "设计类")
]
Cities = [
(0, "北京"),
(1, "上海"),
(2, "深圳"),
(3, "杭州")
]
class Job(models.Model):
job_type = models.SmallIntegerField(blank=False, choices=JobTypes, verbose_name="职位类别")
job_name = models.CharField(blank=False, max_length=250, verbose_name="职位名称")
job_city = models.SmallIntegerField(blank=False, choices=Cities, verbose_name="工作地点")
job_responsibility = models.TextField(blank=False, max_length=1024, verbose_name="职位职责")
job_requirement = models.TextField(blank=False, max_length=1024, verbose_name="职位要求")
creator = models.ForeignKey(User, verbose_name="创建人", null=True, on_delete=models.SET_NULL)
created_date = models.DateTimeField(verbose_name="创建日期", auto_now_add=True)
modified_date = models.DateTimeField(verbose_name="修改日期", auto_now=True, null=True, blank=True)
class Meta:
verbose_name = '职位'
verbose_name_plural = verbose_name
| [
"cyndi088@163.com"
] | cyndi088@163.com |
4d251c34bdbf56b6e283315909d38807e090ff38 | 66746ed38e13b2d069829b1e7d963e2a66808f4e | /37 - Estrutura de repetição WHILE - Cria menu.py | 92ebdc45c4e59ce4ccd23fb7fb34b6fe0a53358f | [] | no_license | leanndropx/px-python-logica-de-programacao | 23ea8064381646623556130c29c7ecec4a845eb1 | 72d852abc8b4f85c0963909aab8e6aa0e9278ec1 | refs/heads/master | 2023-06-17T09:22:47.516634 | 2021-07-15T11:42:19 | 2021-07-15T11:42:19 | 385,781,144 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,636 | py |
# - DESCREVENDO O DESAFIO
print('37 - Crie um programa que leia DOIS valores e mostre um menu na tela:')
print('1 - somar')
print('2 - multiplicar')
print('3 - maior')
print('4 - novos números')
print('5 - sair do programa')
print()
print()
# - INICIALIZANDO O PROGRAMA
# IMPORTA BIBLIOTECAS
from time import sleep
# 1 - RECEBE DADOS
n1=int(input('digite o primeiro número: '))
n2=int(input('digite o segundo número: '))
print('O que você gostaria de fazer: ')
opcao=0
while opcao!=5:
print('\033[7m',' ','\033[m')
print(''' [ 1 ] Somar
[ 2 ] Multiplicar
[ 3 ] Maior
[ 4 ] Novos números
[ 5 ] Sair do programa''')
print('\033[7m', ' ', '\033[m')
opcao=int(input('escolha a opção: '))
# 2 - MANIPULA E CRIA NOVOS DADOS
if opcao==1:
soma=n1+n2
print('A soma é {}'.format(soma))
elif opcao==2:
multiplicar=n1*n2
print('O produto é {}'.format(multiplicar))
elif opcao==3:
if n1>n2:
maior=n1
else:
maior=n2
print('O maior número é {}'.format(maior))
elif opcao==4:
n1=int(input('digite o primeiro número: '))
n2=int(input('digite o segundo número: '))
elif opcao==5:
print('Finalizando...')
else:
print('opção inválida')
print()
sleep(1)
print('O programa foi encerrado!')
# 3 - DEVOLVE DAODS
| [
"leanndrompeixoto1@gmail.com"
] | leanndrompeixoto1@gmail.com |
552afe3365ed66d2b8652c89879abaa32a139ce6 | 548a195b2bd6e5857f008ef4b5a305983bada183 | /popular-movie-nicer.py | c207732295d01f823104ab24c3592707649b22c2 | [] | no_license | aashray18521/Udemy-Spark_Python | b404801aba3bcb9c896a34fcd3679172ef092caf | 13174529199d581598902dadcf07dcb171cadf33 | refs/heads/master | 2020-03-28T10:55:32.160605 | 2018-09-17T06:44:21 | 2018-09-17T06:44:21 | 148,159,774 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 794 | py | from pyspark import SparkConf, SparkContext
def loadMovieNames():
movieNames = {}
with open("ml-100k/u.item") as f:
for line in f:
fields = line.split('|')
movieNames[int(fields[0])] = fields[1]
return movieNames
conf = SparkConf().setMaster("local").setAppName("nicePopularMovie")
sc = SparkContext(conf = conf)
nameDict = sc.broadcast(loadMovieNames())
rdd = sc.textFile("ml-100k/u.data")
onlyMovieIds = rdd.map(lambda x: (int(x.split()[1]), 1))
countPerMovie = onlyMovieIds.reduceByKey(lambda x,y : x+y)
reverseRdd = countPerMovie.map(lambda (x,y) : (y,x))
sortedMovies = reverseRdd.sortByKey()
sortedMoviesWithNames = sortedMovies.map(lambda (count, movie) : (nameDict.value[movie], count))
results = sortedMoviesWithNames.collect()
for result in results:
print(result)
| [
"noreply@github.com"
] | noreply@github.com |
72ec7cf470d37dd39544d24e30dce4db9ee66c02 | 69d4577c856c8352f4b41a83431ca304bae3a8a2 | /model/charcnn.py | 286006f682bb6386485eaa862be0faef17d080aa | [
"Apache-2.0"
] | permissive | tagucci/NCRFpp | ecbdd6d9c6e87505bc200047eb4f2a21c651d2c9 | 3fd65685c26ed0686efde933d262b85daeb02697 | refs/heads/master | 2020-03-11T22:51:23.497036 | 2018-04-19T12:38:44 | 2018-04-19T12:38:44 | 130,304,515 | 0 | 0 | Apache-2.0 | 2018-04-20T03:35:53 | 2018-04-20T03:35:53 | null | UTF-8 | Python | false | false | 2,910 | py | # -*- coding: utf-8 -*-
# @Author: Jie Yang
# @Date: 2017-10-17 16:47:32
# @Last Modified by: Jie Yang, Contact: jieynlp@gmail.com
# @Last Modified time: 2018-03-30 16:18:23
import torch
import torch.autograd as autograd
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
class CharCNN(nn.Module):
def __init__(self, alphabet_size, embedding_dim, hidden_dim, dropout, gpu):
super(CharCNN, self).__init__()
print "build char sequence feature extractor: CNN ..."
self.gpu = gpu
self.hidden_dim = hidden_dim
self.char_drop = nn.Dropout(dropout)
self.char_embeddings = nn.Embedding(alphabet_size, embedding_dim)
self.char_embeddings.weight.data.copy_(torch.from_numpy(self.random_embedding(alphabet_size, embedding_dim)))
self.char_cnn = nn.Conv1d(embedding_dim, self.hidden_dim, kernel_size=3, padding=1)
if self.gpu:
self.char_drop = self.char_drop.cuda()
self.char_embeddings = self.char_embeddings.cuda()
self.char_cnn = self.char_cnn.cuda()
def random_embedding(self, vocab_size, embedding_dim):
pretrain_emb = np.empty([vocab_size, embedding_dim])
scale = np.sqrt(3.0 / embedding_dim)
for index in range(vocab_size):
pretrain_emb[index,:] = np.random.uniform(-scale, scale, [1, embedding_dim])
return pretrain_emb
def get_last_hiddens(self, input, seq_lengths):
"""
input:
input: Variable(batch_size, word_length)
seq_lengths: numpy array (batch_size, 1)
output:
Variable(batch_size, char_hidden_dim)
Note it only accepts ordered (length) variable, length size is recorded in seq_lengths
"""
batch_size = input.size(0)
char_embeds = self.char_drop(self.char_embeddings(input))
char_embeds = char_embeds.transpose(2,1).contiguous()
char_cnn_out = self.char_cnn(char_embeds)
char_cnn_out = F.max_pool1d(char_cnn_out, char_cnn_out.size(2)).view(batch_size, -1)
return char_cnn_out
def get_all_hiddens(self, input, seq_lengths):
"""
input:
input: Variable(batch_size, word_length)
seq_lengths: numpy array (batch_size, 1)
output:
Variable(batch_size, word_length, char_hidden_dim)
Note it only accepts ordered (length) variable, length size is recorded in seq_lengths
"""
batch_size = input.size(0)
char_embeds = self.char_drop(self.char_embeddings(input))
char_embeds = char_embeds.transpose(2,1).contiguous()
char_cnn_out = self.char_cnn(char_embeds).transpose(2,1).contiguous()
return char_cnn_out
def forward(self, input, seq_lengths):
return self.get_all_hiddens(input, seq_lengths)
| [
"jie_yang@mymail.sutd.edu.sg"
] | jie_yang@mymail.sutd.edu.sg |
9ef5be1266f315f4969221617ac232fd1647c121 | 1ed17b57788423eb62570020286daf3016749706 | /CountryGroup.py | e88e6d468fe13afca192163b40994c5c0da6a8bc | [] | no_license | yuvapriya/TopCoder | b37749e4afae7fedf5a10881ba2dd2249dc08bc5 | 6dea4cc9bb70ab26ac70a846a2cd46a82061c033 | refs/heads/master | 2021-01-19T06:37:17.735811 | 2015-05-17T21:47:30 | 2015-05-17T21:47:30 | 35,783,828 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 945 | py | #Problem Statement: http://community.topcoder.com/stat?c=problem_statement&pm=13687
def countryGroup(arr):
countryGrp = {}
prev = None
for i in range(len(arr)):
val = arr[i]
if( val in countryGrp):
countryGrp[val] +=1
if(val != 1):
if prev != None and prev !=val:
return -1
if countryGrp[val] > val:
return -1
prev = val
else:
countryGrp[val] =1
prev = val
total = 0
for key in countryGrp.keys():
if key == 1:
total+= countryGrp[key]
else:
if (countryGrp[key] != key):
return -1
else:
total +=1
return total
print countryGroup([2,2,3,3,3])
print countryGroup([1,1,1,1,1])
print countryGroup([3,3])
print countryGroup([4,4,4,4,1,1,2,2,3,3,3])
print countryGroup([2,1,2,2,1,2])
| [
"m.yuvapriya@gmail.com"
] | m.yuvapriya@gmail.com |
05c06ff5850ee1f5cbab0d42f5704ce5b0f4acb3 | 57d1580fd540b4819abb67f9db43fdfbba63725f | /hydrogen_notebooks/option_pricing/binomial_european_call_delta_hedging.py | 29f3ca209e1b50cb4571fff0cac52d807c607296 | [] | no_license | glyfish/alpaca | 49edfcb9d80551825dfa4cf071f21aeb95a3502f | 2b5b69bcf50ed081a526742658be503706af94b4 | refs/heads/master | 2023-02-22T00:24:19.293502 | 2022-09-05T17:20:23 | 2022-09-05T17:20:23 | 186,169,438 | 1 | 3 | null | 2023-02-11T00:52:12 | 2019-05-11T18:38:58 | Python | UTF-8 | Python | false | false | 2,302 | py | # %%
%load_ext autoreload
%autoreload 2
import os
import sys
import numpy
from matplotlib import pyplot
from lib import config
from scipy.stats import binom
wd = os.getcwd()
yahoo_root = os.path.join(wd, 'data', 'yahoo')
pyplot.style.use(config.glyfish_style)
# %%
def qrn(U, D, R):
return (R - D) / (U - D)
def qrn1(q, U, R):
return q*(1.0 + U) / (1.0 + R)
def binomial_tail_cdf(l, n, p):
return 1.0 - binom.cdf(l, n, p)
def cutoff(S0, U, D, K, n):
for i in range(0, n + 1):
iU = (1.0 + U)**i
iD = (1.0 + D)**(n - i)
payoff = S0*iU*iD - K
if payoff > 0:
return i
return n + 1
def european_call_payoff(U, D, R, S0, K, n):
l = cutoff(S0, U, D, K, n)
q = qrn(U, D, R)
q1 = qrn1(q, U, R)
Ψq = binomial_tail_cdf(l - 1, n, q)
Ψq1 = binomial_tail_cdf(l - 1, n, q1)
return S0*Ψq1 - K*(1 + R)**(-n)*Ψq
def delta(CU, CD, SU, SD):
return (CU - CD) / (SU - SD)
def init_borrow(S0, C0, x):
return C0 - S0 * x
def borrow(y, R, x1, x2, S):
return y * (1 + R) + (x1 - x2) * S
def portfolio_value(x, S, y):
return x * S + y
# %%
n = 3
U = 0.2
D = -0.1
R = 0.1
S0 = 100.0
K = 105.0
# %%
q = qrn(U, D, R)
q1 = qrn1(q, U, R)
l = cutoff(S0, U, D, K, n)
Ψq = binomial_tail_cdf(l - 1, n, q)
Ψq1 = binomial_tail_cdf(l - 1, n, q1)
q, q1, l, Ψq, Ψq1
binom.cdf(l, n, q)
# %
# t = 0
C0 = european_call_payoff(U, D, R, S0, K, n)
# %%
# Delta hedge
# t = 0
S1U = S0*(1.0 + U)
S1D = S0*(1.0 + D)
C1U = european_call_payoff(U, D, R, S1U, K, n - 1)
C1D = european_call_payoff(U, D, R, S1D, K, n - 1)
x1 = delta(C1U, C1D, S1U, S1D)
y1 = init_borrow(S0, C0, x1)
portfolio_value(x1, S0, y1)
# t = 1
# The price goes up S1 = S0*(1+U)
S1 = S0 * (1 + U)
S2U = S1*(1.0 + U)
S2D = S1*(1.0 + D)
C2U = european_call_payoff(U, D, R, S2U, K, n - 2)
C2D = european_call_payoff(U, D, R, S2D, K, n - 2)
x2 = delta(C2U, C2D, S2U, S2D)
y2 = borrow(y1, R, x1, x2, S1)
portfolio_value(x2, S1, y2)
# t = 2
# The price goes down S1 = S0*(1+U)*(1+D)
S2 = S0 * (1 + U) * (1 + D)
S3U = S2*(1.0 + U)
S3D = S2*(1.0 + D)
C3U = european_call_payoff(U, D, R, S3U, K, n - 3)
C3D = european_call_payoff(U, D, R, S3D, K, n - 3)
x3 = delta(C3U, C3D, S3U, S3D)
y3 = borrow(y2, R, x2, x3, S2)
portfolio_value(x3, S2, y3)
| [
"troy.stribling@gmail.com"
] | troy.stribling@gmail.com |
f1a9d5f8ac93d9af895ae5ffd7c6036d617c5d19 | 6b83e1eb08926bd2437c3a42bf53e262fda81cd3 | /algorithms/envs/flow/envs/ring/lane_change_accel.py | ea40b24414a20e24a7db6c8d2e50716d09bf8c08 | [] | no_license | TerryLiu2k/DMPO | a92d2b96458066cd441293f618aca10bd21fce22 | 060c4135973a1b9bdea0cd26ea8f78a3a7ad5d98 | refs/heads/master | 2023-08-15T16:09:08.202735 | 2021-10-16T04:07:38 | 2021-10-16T04:07:38 | 416,567,453 | 6 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,406 | py | """Environments that can train both lane change and acceleration behaviors."""
from algorithms.envs.flow.envs.ring.accel import AccelEnv
from algorithms.envs.flow.core import rewards
from gym.spaces.box import Box
import numpy as np
ADDITIONAL_ENV_PARAMS = {
# maximum acceleration for autonomous vehicles, in m/s^2
"max_accel": 3,
# maximum deceleration for autonomous vehicles, in m/s^2
"max_decel": 3,
# lane change duration for autonomous vehicles, in s. Autonomous vehicles
# reject new lane changing commands for this duration after successfully
# changing lanes.
"lane_change_duration": 5,
# desired velocity for all vehicles in the network, in m/s
"target_velocity": 10,
# specifies whether vehicles are to be sorted by position during a
# simulation step. If set to True, the environment parameter
# self.sorted_ids will return a list of all vehicles sorted in accordance
# with the environment
'sort_vehicles': False
}
class LaneChangeAccelEnv(AccelEnv):
"""Fully observable lane change and acceleration environment.
This environment is used to train autonomous vehicles to improve traffic
flows when lane-change and acceleration actions are permitted by the rl
agent.
Required from env_params:
* max_accel: maximum acceleration for autonomous vehicles, in m/s^2
* max_decel: maximum deceleration for autonomous vehicles, in m/s^2
* lane_change_duration: lane change duration for autonomous vehicles, in s
* target_velocity: desired velocity for all vehicles in the network, in m/s
* sort_vehicles: specifies whether vehicles are to be sorted by position
during a simulation step. If set to True, the environment parameter
self.sorted_ids will return a list of all vehicles sorted in accordance
with the environment
States
The state consists of the velocities, absolute position, and lane index
of all vehicles in the network. This assumes a constant number of
vehicles.
Actions
Actions consist of:
* a (continuous) acceleration from -abs(max_decel) to max_accel,
specified in env_params
* a (continuous) lane-change action from -1 to 1, used to determine the
lateral direction the vehicle will take.
Lane change actions are performed only if the vehicle has not changed
lanes for the lane change duration specified in env_params.
Rewards
The reward function is the two-norm of the distance of the speed of the
vehicles in the network from a desired speed, combined with a penalty
to discourage excess lane changes by the rl vehicle.
Termination
A rollout is terminated if the time horizon is reached or if two
vehicles collide into one another.
"""
def __init__(self, env_params, sim_params, network, simulator='traci'):
for p in ADDITIONAL_ENV_PARAMS.keys():
if p not in env_params.additional_params:
raise KeyError(
'Environment parameter "{}" not supplied'.format(p))
super().__init__(env_params, sim_params, network, simulator)
@property
def action_space(self):
"""See class definition."""
max_decel = self.env_params.additional_params["max_decel"]
max_accel = self.env_params.additional_params["max_accel"]
lb = [-abs(max_decel), -1] * self.initial_vehicles.num_rl_vehicles
ub = [max_accel, 1] * self.initial_vehicles.num_rl_vehicles
return Box(np.array(lb), np.array(ub), dtype=np.float32)
@property
def observation_space(self):
"""See class definition."""
return Box(
low=0,
high=1,
shape=(3 * self.initial_vehicles.num_vehicles, ),
dtype=np.float32)
def compute_reward(self, rl_actions, **kwargs):
"""See class definition."""
# compute the system-level performance of vehicles from a velocity
# perspective
reward = rewards.desired_velocity(self, fail=kwargs["fail"])
# punish excessive lane changes by reducing the reward by a set value
# every time an rl car changes lanes (10% of max reward)
for veh_id in self.k.vehicle.get_rl_ids():
if self.k.vehicle.get_last_lc(veh_id) == self.time_counter:
reward -= 0.1
return reward
def get_state(self):
"""See class definition."""
# normalizers
max_speed = self.k.network.max_speed()
length = self.k.network.length()
max_lanes = max(
self.k.network.num_lanes(edge)
for edge in self.k.network.get_edge_list())
speed = [self.k.vehicle.get_speed(veh_id) / max_speed
for veh_id in self.sorted_ids]
pos = [self.k.vehicle.get_x_by_id(veh_id) / length
for veh_id in self.sorted_ids]
lane = [self.k.vehicle.get_lane(veh_id) / max_lanes
for veh_id in self.sorted_ids]
return np.array(speed + pos + lane)
def _apply_rl_actions(self, actions):
"""See class definition."""
acceleration = actions[::2]
direction = actions[1::2]
# re-arrange actions according to mapping in observation space
sorted_rl_ids = [
veh_id for veh_id in self.sorted_ids
if veh_id in self.k.vehicle.get_rl_ids()
]
# represents vehicles that are allowed to change lanes
non_lane_changing_veh = \
[self.time_counter <=
self.env_params.additional_params["lane_change_duration"]
+ self.k.vehicle.get_last_lc(veh_id)
for veh_id in sorted_rl_ids]
# vehicle that are not allowed to change have their directions set to 0
direction[non_lane_changing_veh] = \
np.array([0] * sum(non_lane_changing_veh))
self.k.vehicle.apply_acceleration(sorted_rl_ids, acc=acceleration)
self.k.vehicle.apply_lane_change(sorted_rl_ids, direction=direction)
def additional_command(self):
"""Define which vehicles are observed for visualization purposes."""
# specify observed vehicles
if self.k.vehicle.num_rl_vehicles > 0:
for veh_id in self.k.vehicle.get_human_ids():
self.k.vehicle.set_observed(veh_id)
class LaneChangeAccelPOEnv(LaneChangeAccelEnv):
"""POMDP version of LaneChangeAccelEnv.
Required from env_params:
* max_accel: maximum acceleration for autonomous vehicles, in m/s^2
* max_decel: maximum deceleration for autonomous vehicles, in m/s^2
* lane_change_duration: lane change duration for autonomous vehicles, in s
* target_velocity: desired velocity for all vehicles in the network, in m/s
States
States are a list of rl vehicles speeds, as well as the speeds and
bumper-to-bumper headways between the rl vehicles and their
leaders/followers in all lanes. There is no assumption on the number of
vehicles in the network, so long as the number of rl vehicles is
static.
Actions
See parent class.
Rewards
See parent class.
Termination
See parent class.
Attributes
----------
num_lanes : int
maximum number of lanes on any edge in the network
visible : list of str
lists of visible vehicles, used for visualization purposes
"""
def __init__(self, env_params, sim_params, network, simulator='traci'):
super().__init__(env_params, sim_params, network, simulator)
self.num_lanes = max(self.k.network.num_lanes(edge)
for edge in self.k.network.get_edge_list())
self.visible = []
@property
def observation_space(self):
"""See class definition."""
return Box(
low=0,
high=1,
shape=(4 * self.initial_vehicles.num_rl_vehicles *
self.num_lanes + self.initial_vehicles.num_rl_vehicles, ),
dtype=np.float32)
def get_state(self):
"""See class definition."""
obs = [
0
for _ in range(4 * self.k.vehicle.num_rl_vehicles * self.num_lanes)
]
self.visible = []
for i, rl_id in enumerate(self.k.vehicle.get_rl_ids()):
# normalizers
max_length = self.k.network.length()
max_speed = self.k.network.max_speed()
# set to 1000 since the absence of a vehicle implies a large
# headway
headway = [1] * self.num_lanes
tailway = [1] * self.num_lanes
vel_in_front = [0] * self.num_lanes
vel_behind = [0] * self.num_lanes
lane_leaders = self.k.vehicle.get_lane_leaders(rl_id)
lane_followers = self.k.vehicle.get_lane_followers(rl_id)
lane_headways = self.k.vehicle.get_lane_headways(rl_id)
lane_tailways = self.k.vehicle.get_lane_tailways(rl_id)
headway[0:len(lane_headways)] = lane_headways
tailway[0:len(lane_tailways)] = lane_tailways
for j, lane_leader in enumerate(lane_leaders):
if lane_leader != '':
lane_headways[j] /= max_length
vel_in_front[j] = self.k.vehicle.get_speed(lane_leader) \
/ max_speed
self.visible.extend([lane_leader])
for j, lane_follower in enumerate(lane_followers):
if lane_follower != '':
lane_headways[j] /= max_length
vel_behind[j] = self.k.vehicle.get_speed(lane_follower) \
/ max_speed
self.visible.extend([lane_follower])
# add the headways, tailways, and speed for all lane leaders
# and followers
obs[4*self.num_lanes*i:4*self.num_lanes*(i+1)] = \
np.concatenate((headway, tailway, vel_in_front, vel_behind))
# add the speed for the ego rl vehicle
obs.append(self.k.vehicle.get_speed(rl_id))
return np.array(obs)
def additional_command(self):
"""Define which vehicles are observed for visualization purposes."""
# specify observed vehicles
for veh_id in self.visible:
self.k.vehicle.set_observed(veh_id)
| [
"terrylyclow@yahoo.com"
] | terrylyclow@yahoo.com |
3412b6740ab16c481792133ad7b3581fee11316c | 604756ba3da355fffb1a1cf4b882441de2d75184 | /app/util/py2mongo.py | d6d181192ddcc862daecdedcade7e3838cf2a87d | [] | no_license | gowthamlabs/python-rest-ml | 4e93f64019e28f4436b4c634d275e98b70c98939 | 3aa0a1b6fddd52037bfcdb065a9ae63105fd9f6c | refs/heads/master | 2020-07-29T01:59:28.579650 | 2019-09-30T04:46:39 | 2019-09-30T04:46:39 | 209,625,473 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 890 | py | from pymongo import MongoClient
# pprint library is used to make the output look more pretty
from pprint import pprint
# connect to MongoDB, change the << MONGODB URL >> to reflect your own connection string
# client = MongoClient("localhost:27017") --> this also works
try:
client = MongoClient(port=27017)
except Exception as inst:
print("Unexpected error in 8 :", "8: "+inst)
# Set the db object to point to the myapp database
db=client.myapp
# Showcasing the count() method of find, count the total number of 5 ratings
print('The number of products available:')
# fivestarcount = db.reviews.find({'rating': 5}).count()
#productsCount = db.products.find().count();
#print(productsCount)
def productCount():
try:
productsCount = db.products.find().count()
return str(productsCount);
except Exception as inst:
print("Unexpected error in 23:", "23: "+inst)
return str(inst);
| [
"gowtham.venugopalan@cognizant.com"
] | gowtham.venugopalan@cognizant.com |
74dd88c522c1f43180958ef5d5f77d70bc1a149a | 529b575a77c6c39714704c60e9705eaf52bd48d3 | /tictactoe.py | dea97741580f86f1c1e9f720054c2dca5e7284b3 | [] | no_license | alevi0106/AI | 82151cd5c415f0ab7d852cc8d76f88ae56490835 | c9ab47e63d8f8a0a0f0d5b1fd824a5096f3115a8 | refs/heads/master | 2020-03-22T21:58:33.549768 | 2018-09-27T10:01:02 | 2018-09-27T10:01:02 | 140,725,573 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,120 | py | board=[2]*10
board_copy=[" "]*10
#To draw a board
def draw():
for i in range(1,10):
if board[i]==3:
board_copy[i]="X"
if board[i]==5:
board_copy[i]="O"
#print(board_copy)
print(' {} | {} | {}'.format(board_copy[1],board_copy[2],board_copy[3]))
print(' {} | {} | {}'.format(board_copy[4],board_copy[5],board_copy[6]))
print(' {} | {} | {}'.format(board_copy[7],board_copy[8],board_copy[9]))
print('')
def Go(n,turn):
if turn%2==0:
board[n]=5
else: board[n]=3
#To find blank space on board
def fb(a,b,c):
if board[a]==2:
return a
elif board[b]==2:
return b
elif board[c]==2:
return c
return a
def Make():
if board[5]==2:
return 5
elif board[2]==2:
return 2
elif board[4]==2:
return 4
elif board[6]==2:
return 6
elif board[8]==2:
return 8
def Posswin(p):
if p=="X":
temp=18
elif p=="O":
temp=50
if board[1]*board[2]*board[3]==temp:
return fb(1,2,3)
elif board[4]*board[5]*board[6]==temp:
return fb(4,5,6)
elif board[7]*board[8]*board[9]==temp:
return fb(7,8,9)
elif board[1]*board[5]*board[9]==temp:
return fb(1,5,9)
elif board[3]*board[5]*board[7]==temp:
return fb(3,5,7)
elif board[1]*board[4]*board[7]==temp:
return fb(1,4,7)
elif board[2]*board[5]*board[8]==temp:
return fb(2,5,8)
elif board[3]*board[6]*board[9]==temp:
return fb(3,6,9)
return 0
def iswin(turn):
if(board[1]*board[2]*board[3]==27 or board[4]*board[5]*board[6]==27 or board[7]*board[8]*board[9]==27 or
board[1]*board[5]*board[9]==27 or board[3]*board[5]*board[7]==27 or
board[1]*board[4]*board[7]==27 or board[2]*board[5]*board[8]==27 or board[3]*board[6]*board[9]==27):
print("Winner is X")
return 1
elif(board[1]*board[2]*board[3]==125 or board[4]*board[5]*board[6]==125 or board[7]*board[8]*board[9]==125 or
board[1]*board[5]*board[9]==125 or board[3]*board[5]*board[7]==125 or
board[1]*board[4]*board[7]==125 or board[2]*board[5]*board[8]==125 or board[3]*board[6]*board[9]==125):
print("Winner is O")
return 1
elif turn==9:
print("Match Draw")
return 1
return 0
def isdraw(turn):
posx=posy=0
for i in range(1,10):
if board[i]==3:
if i%2!=0:
posx+=1
elif board[i]==5:
if i%2!=0:
posy+=1
#print(posx,posy)
if(posx==3 and posy==1 and board[5]==3):
return False
elif(posx==3 and posy==2 and board[5]==5):
return False
elif Posswin("O")==0:
return True
return False
val=int(input("Choose 3 for 'X' or 5 for 'O':\n"))
if val==3:
tempvar1=1
tempvar2=0
elif val==5:
tempvar1=0
tempvar2=1
for turn in range(1,10):
if turn%2==tempvar1:
cross=int(input("Where to mark?\n"))
board[cross]=val
draw()
var=iswin(turn)
if var==1:
break
elif turn%2==tempvar2:
print("AI turn")
if turn==1:
Go(1,turn)
draw()
var=iswin(turn)
if var==1:
break
if turn==2:
if board[5]==2:
Go(5,turn)
else:
Go(1,turn)
draw()
var=iswin(turn)
if var==1:
break
if turn==3:
if board[9]==2:
Go(9,turn)
else:
Go(3,turn)
draw()
var=iswin(turn)
if var==1:
break
if turn==4:
if Posswin("X")!=0:
Go(Posswin("X"),turn)
else: Go(Make(),turn)
draw()
var=iswin(turn)
if var==1:
break
if turn==5:
if Posswin("X")!=0:
Go(Posswin("X"),turn)
elif Posswin("O")!=0:
Go(Posswin("O"),turn)
elif board[7]==2:
Go(7,turn)
else: Go(3,turn)
draw()
var=iswin(turn)
if var==1:
break
if turn==6:
if Posswin("O")!=0:
Go(Posswin("O"),turn)
elif Posswin("X")!=0:
Go(Posswin("X"),turn)
else: Go(Make(),turn)
draw()
var=iswin(turn)
if var==1:
break
if turn==7 or turn==9:
if Posswin("X")!=0:
Go(Posswin("X"),turn)
elif Posswin("O")!=0:
Go(Posswin("O"),turn)
else: Go(fb(fb(1,2,3),fb(4,5,6),fb(7,8,9)),turn)
draw()
var=iswin(turn)
if var==1:
break
if turn==8:
if Posswin("O")!=0:
Go(Posswin("O"),turn)
elif Posswin("X")!=0:
Go(Posswin("X"),turn)
else: Go(fb(fb(1,2,3),fb(4,5,6),fb(7,8,9)),turn)
draw()
var=iswin(turn)
if var==1:
break
if turn==5:
if isdraw(turn)==True:
print("Match will be draw")
break
| [
"noreply@github.com"
] | noreply@github.com |
e30bf0195bbfc6ed9aa15d5e111172064f3af938 | bc863127e5f44ede4cfe46316ec44ce00cffb2d4 | /config.py | d27612bdaa7edb6acf23a584e67ea2f321b668d9 | [] | no_license | yogeshBsht/FeedbackForm | d33ce048261177badbed07871711da6260b93086 | 8c87505100bda14add08b1bfa3918ffc95a97525 | refs/heads/main | 2023-06-02T15:42:36.984226 | 2021-06-21T17:06:31 | 2021-06-21T17:06:31 | 378,629,987 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 796 | py | import os
from dotenv import load_dotenv
basedir = os.path.abspath(os.path.dirname(__file__))
load_dotenv(os.path.join(basedir, '.env'))
class Config(object):
SQLALCHEMY_DATABASE_URI = os.environ.get('DATABASE_URL') or \
'sqlite:///' + os.path.join(basedir, 'app.db')
SQLALCHEMY_TRACK_MODIFICATIONS = False
SECRET_KEY = 'SECRET_KEY'
# MAIL_SERVER = os.environ.get('MAIL_SERVER')
# MAIL_PORT = int(os.environ.get('MAIL_PORT') or 25)
# MAIL_USE_TLS = os.environ.get('MAIL_USE_TLS') is not None
# MAIL_USERNAME = os.environ.get('MAIL_USERNAME')
# MAIL_PASSWORD = os.environ.get('MAIL_PASSWORD')
# ADMINS = ['your-email@example.com']
# LANGUAGES = ['en', 'es']
# MS_TRANSLATOR_KEY = os.environ.get('MS_TRANSLATOR_KEY')
# POSTS_PER_PAGE = 25
| [
"ybnsit@gmail.com"
] | ybnsit@gmail.com |
9a6921acf2118b13a365d80619408f90133228ad | 12596a0809bc4ce7eba4f1f32cbf96c8191f5628 | /ProgramFlow/guessinggame.py | cd83bec8fcc24262a2d83ae812b093853753f8c7 | [] | no_license | MichaelAntropov/python-masterclass | e19c34d11eab42b9a3c58d568c866fb747ffe3ff | 037456ff3f67ae5adf16465e8882a42ada0ca6c1 | refs/heads/master | 2023-04-06T11:59:19.402503 | 2021-04-17T13:43:04 | 2021-04-17T13:43:04 | 342,575,001 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,335 | py | import random
highest = 10
lowest = 0
answer = random.randint(lowest, highest)
print(answer) # TODO: Remove after testing
print("Please guess a number between {} and {}:".format(lowest, highest))
while True:
guess = int(input())
if guess == 0:
print("U gave up :(")
elif guess < lowest or guess > highest:
print("???")
elif guess < answer:
print("Please guess higher: ")
elif guess > answer:
print("please guess lower:")
else:
print("U got it!")
break
# if guess == answer:
# print("You got it first time")
# else:
# if guess < answer:
# print("Guess higher")
# else: # guess must be greater than answer
# print("Guess lower")
# guess = int(input())
# if guess == answer:
# print("Well done")
# else:
# print("Not correct")
# if guess < answer:
# print("Right answer is higher")
# guess = int(input())
# if guess == answer:
# print("Well done, you guessed correct")
# else:
# print("Sorry, you are wrong!")
# elif guess > answer:
# print("Please guess lower")
# guess = int(input())
# if guess == answer:
# print("Well done, you guessed correct")
# else:
# print("Sorry, you are wrong!")
# else:
# print("You got it first time")
| [
"mikhael.antropov@gmail.com"
] | mikhael.antropov@gmail.com |
3b02f493286c8655b3bc6b8e6da7a3105dcca05f | a3b46d9b89bf7cf984413845db604ec2c98df8b3 | /code.py | 78d1cca2ec8f0d4db535249a5eb54e431c8c5d2a | [] | no_license | asahazmy/Field-prediction | 3c1b3cd1a057457648237db2933f5608dff6a305 | 6b7ff05371e20ac9da9cd54ca74ac606914c7f70 | refs/heads/master | 2023-01-03T06:14:01.879003 | 2020-10-25T13:08:21 | 2020-10-25T13:08:21 | 275,312,086 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,972 | py | import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow import reduce_sum
from tensorflow.keras.backend import pow
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Input, Conv2D, MaxPool2D, UpSampling2D, Concatenate, Add, Flatten
from tensorflow.keras.losses import binary_crossentropy
from sklearn.model_selection import train_test_split
import pandas as pd
import numpy as np
import os
import cv2
#Configurations
load_pretrained_model = False # load a pre-trained model
save_model = True # save the model after training
train_dir = '' # directory of training images
pretrained_model_path = '' # path of pretrained model
model_save_path = '' # path of model to save
epochs = 25
# batch size for training unet
k_size = 3 # kernel size 3x3
val_size = .20 # split of training set between train and validation set
TRAIN_LENGTH = info.splits['train'].num_examples
BATCH_SIZE = 64
BUFFER_SIZE = 1000
STEPS_PER_EPOCH = TRAIN_LENGTH // BATCH_SIZE
'''input data & mask'''
#Normalisasi data
def normalize(input_image, input_mask):
input_image = tf.cast(input_image, tf.float32) / 255.0
input_mask -= 1
return input_image, input_mask
#input data
def load_image_train(datapoint):
input_image = tf.image.resize(datapoint['image'], (256, 256))
input_mask = tf.image.resize(datapoint['segmentation_mask'], ((256, 256))
if tf.random.uniform()> 0.5:
input_image = tf.image.flip_left_right(input_image)
input_mask = tf.image.flip_left_right(input_mask)
input_image, input_mask = normalize(input_image, input_mask)
return input_image, input_mask
def load_image_test(datapoint):
input_image = tf.image.resize(datapoint['image'], (128, 128))
input_mask = tf.image.resize(datapoint['segmentation_mask'], (128, 128))
input_image, input_mask = normalize(input_image, input_mask)
return input_image, input_mask
train = dataset['train'].map(load_image_train, num_parallel_calls=tf.data.experimental.AUTOTUNE)
test = dataset['test'].map(load_image_test)
train_dataset = train.cache().shuffle(BUFFER_SIZE).batch(BATCH_SIZE).repeat()
train_dataset = train_dataset.prefetch(buffer_size=tf.data.experimental.AUTOTUNE)
test_dataset = test.batch(BATCH_SIZE)
'''checking the data'''
def display(display_list):
plt.figure(figsize=(15, 15))
title = ['Input Image', 'True Mask', 'Predicted Mask']
for i in range(len(display_list)):
plt.subplot(1, len(display_list), i+1)
plt.title(title[i])
plt.imshow(tf.keras.preprocessing.image.array_to_img(display_list[i]))
plt.axis('off')
plt.show()
for image, mask in train.take(4):
sample_image, sample_mask = image, mask
display([sample_image, sample_mask])
'''ResUnet'''
def bn_act(x, act=True):
'batch normalization layer with an optinal activation layer'
x = tf.keras.layers.BatchNormalization()(x)
if act == True:
x = tf.keras.layers.Activation('relu')(x)
return x
def conv_block(x, filters, kernel_size=3, padding='same', strides=1):
'convolutional layer which always uses the batch normalization layer'
conv = bn_act(x)
conv = Conv2D(filters, kernel_size, padding=padding, strides=strides)(conv)
return conv
def stem(x, filters, kernel_size=3, padding='same', strides=1):
conv = Conv2D(filters, kernel_size, padding=padding, strides=strides)(x)
conv = conv_block(conv, filters, kernel_size, padding, strides)
shortcut = Conv2D(filters, kernel_size=1, padding=padding, strides=strides)(x)
shortcut = bn_act(shortcut, act=False)
output = Add()([conv, shortcut])
return output
def residual_block(x, filters, kernel_size=3, padding='same', strides=1):
res = conv_block(x, filters, k_size, padding, strides)
res = conv_block(res, filters, k_size, padding, 1)
shortcut = Conv2D(filters, kernel_size, padding=padding, strides=strides)(x)
shortcut = bn_act(shortcut, act=False)
output = Add()([shortcut, res])
return output
def upsample_concat_block(x, xskip):
u = UpSampling2D((2,2))(x)
c = Concatenate()([u, xskip])
return c
def ResUNet(img_h, img_w):
f = [16, 32, 64, 128, 256]
inputs = Input((img_h, img_w, 1))
## Encoder
e0 = inputs
e1 = stem(e0, f[0])
e2 = residual_block(e1, f[1], strides=2)
e3 = residual_block(e2, f[2], strides=2)
e4 = residual_block(e3, f[3], strides=2)
e5 = residual_block(e4, f[4], strides=2)
## Bridge
b0 = conv_block(e5, f[4], strides=1)
b1 = conv_block(b0, f[4], strides=1)
## Decoder
u1 = upsample_concat_block(b1, e4)
d1 = residual_block(u1, f[4])
u2 = upsample_concat_block(d1, e3)
d2 = residual_block(u2, f[3])
u3 = upsample_concat_block(d2, e2)
d3 = residual_block(u3, f[2])
u4 = upsample_concat_block(d3, e1)
d4 = residual_block(u4, f[1])
outputs = tf.keras.layers.Conv2D(4, (1, 1), padding="same", activation="sigmoid")(d4)
model = tf.keras.models.Model(inputs, outputs)
return model
'''Loss fuction'''
def dsc(y_true, y_pred):
smooth = 1.
y_true_f = Flatten()(y_true)
y_pred_f = Flatten()(y_pred)
intersection = reduce_sum(y_true_f * y_pred_f)
score = (2. * intersection + smooth) / (reduce_sum(y_true_f) + reduce_sum(y_pred_f) + smooth)
return score
def dice_loss(y_true, y_pred):
loss = 1 - dsc(y_true, y_pred)
return loss
def bce_dice_loss(y_true, y_pred):
loss = binary_crossentropy(y_true, y_pred) + dice_loss(y_true, y_pred)
return loss
# Focal Tversky loss, brought to you by: https://github.com/nabsabraham/focal-tversky-unet
def tversky(y_true, y_pred, smooth=1e-6):
y_true_pos = tf.keras.layers.Flatten()(y_true)
y_pred_pos = tf.keras.layers.Flatten()(y_pred)
true_pos = tf.reduce_sum(y_true_pos * y_pred_pos)
false_neg = tf.reduce_sum(y_true_pos * (1-y_pred_pos))
false_pos = tf.reduce_sum((1-y_true_pos)*y_pred_pos)
alpha = 0.7
return (true_pos + smooth)/(true_pos + alpha*false_neg + (1-alpha)*false_pos + smooth)
def tversky_loss(y_true, y_pred):
return 1 - tversky(y_true,y_pred)
def focal_tversky_loss(y_true,y_pred):
pt_1 = tversky(y_true, y_pred)
gamma = 0.75
return tf.keras.backend.pow((1-pt_1), gamma)
'''Compile & Fit'''
model = ResUNet(img_h=img_h, img_w=img_w)
adam = tf.keras.optimizers.Adam(lr = 0.05, epsilon = 0.1)
model.compile(optimizer=adam, loss=focal_tversky_loss, metrics=[tversky])
if load_pretrained_model:
try:
model.load_weights(pretrained_model_path)
print('pre-trained model loaded!')
except OSError:
print('You need to run the model and load the trained model')
#history = model.fit_generator(generator=training_generator, validation_data=validation_generator, epochs=epochs, verbose=1)
if save_model:
model.save(model_save_path)
| [
"noreply@github.com"
] | noreply@github.com |
506ab3ede97c112af86c4a23956ee39a25c9aecd | 83b1a267809c08a57a3bb16c103d71539502a650 | /job/migrations/0011_apply_created_at.py | c9ebca4b68d4fe3dc9d8d3052bdac004ee5816f8 | [] | no_license | rimatechcampus/django-jobboard-project- | c66933295b4692c7d3cb055dcf0cbaef80424b38 | 8823e1e7db011a4fbaa0fc87f1810bcd5dab08c6 | refs/heads/master | 2022-11-20T16:40:56.495550 | 2020-07-19T16:52:13 | 2020-07-19T16:52:13 | 279,794,420 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 378 | py | # Generated by Django 3.0.8 on 2020-07-18 08:13
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('job', '0010_apply_job'),
]
operations = [
migrations.AddField(
model_name='apply',
name='created_at',
field=models.DateTimeField(auto_now=True),
),
]
| [
"riyamtechcampus@gmail.com"
] | riyamtechcampus@gmail.com |
55936ee6e0c535be6c763d5bbe570c3d5d24d065 | 5c7deaef83574a53416681063827cdbcb3004b7c | /PyGameMultiAgent/gameclient.py | 3dc5ddca4fd9eafea0898bdf3db2f38152da05a0 | [] | no_license | guotata1996/baselines | ba53ed2bb3d8015551f8e46dd8398a21b638ea80 | d7e2bee2ce1d98e5f2c511d6ede4e627e1112ad6 | refs/heads/master | 2020-06-22T00:00:09.431445 | 2019-08-07T03:22:39 | 2019-08-07T03:22:39 | 138,416,090 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,635 | py | import pygame
import pygame.locals
import socket
import select
import random
import numpy as np
from baselines.PyGameMultiAgent.staticworld import StaticWorld
class GameClient(object):
def __init__(self, addr="127.0.0.1", serverport=9009):
self.clientport = random.randrange(8000, 8999)
self.conn = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# Bind to localhost - set to external ip to connect from other computers
self.conn.bind(("127.0.0.1", self.clientport))
self.addr = addr
self.serverport = serverport
self.read_list = [self.conn]
self.write_list = []
self.setup_pygame()
def setup_pygame(self):
self.world = StaticWorld('../Maps/map_1.csv')
self.screen = pygame.display.set_mode((self.world.local_width * self.world.zoom, self.world.local_length * self.world.zoom))
pygame.event.set_allowed(None)
pygame.event.set_allowed([pygame.locals.QUIT,
pygame.locals.KEYDOWN])
pygame.key.set_repeat(100, 100) #move faster
def run(self):
running = True
clock = pygame.time.Clock()
tickspeed = 30
try:
# Initialize connection to server
self.conn.sendto("cz".encode('utf-8'), (self.addr, self.serverport))
while running:
clock.tick(tickspeed)
# select on specified file descriptors
readable, writable, exceptional = (
select.select(self.read_list, self.write_list, [], 0)
)
for f in readable:
if f is self.conn:
msg, addr = f.recvfrom(2048)
msg = msg.decode('utf-8') #Coordinates of all players
self_pos = None
AllZombiePose = []
for position in msg.split('|')[:-1]:
x, y, angle, tag = position.split(',')
x = float(x)
y = float(y)
angle = float(angle)
tag = int(tag)
if self_pos is None:
self_pos = (x, y, angle)
AllZombiePose.append((x, y, angle, tag))
self.world.draw_local(self.screen, self_pos, AllZombiePose)
#self.world.draw_global(self.screen)
#self.world.draw_zombie_global(self.screen, (x, y, angle))
for event in pygame.event.get():
if event.type == pygame.QUIT or event.type == pygame.locals.QUIT:
running = False
break
elif event.type == pygame.locals.KEYDOWN:
if event.key == pygame.locals.K_UP:
self.conn.sendto("uu".encode('utf-8'), (self.addr, self.serverport))
elif event.key == pygame.locals.K_LEFT:
self.conn.sendto("ul".encode('utf-8'), (self.addr, self.serverport))
elif event.key == pygame.locals.K_RIGHT:
self.conn.sendto("ur".encode('utf-8'), (self.addr, self.serverport))
pygame.event.clear(pygame.locals.KEYDOWN)
pygame.display.update()
finally:
self.conn.sendto("d".encode('utf-8'), (self.addr, self.serverport))
if __name__ == "__main__":
g = GameClient()
g.run() | [
"jg4006@columbia.edu"
] | jg4006@columbia.edu |
354f4e8b11fc7deaae648a37d207d137f827d66e | 0aa87ee2e544f56c17c2dde28a3b3feed08daa14 | /apps/users/urls.py | 6dda1d1373eadae3c77476250c17308642600204 | [] | no_license | yanshigou/mxonline | f2cc44724c1511418953e7e06d04661244b29455 | cebc3295734713846828246fc54dd33f8df14f86 | refs/heads/master | 2022-12-09T12:11:05.734326 | 2022-08-17T10:38:13 | 2022-08-17T10:38:13 | 148,120,737 | 0 | 2 | null | 2022-12-08T02:58:15 | 2018-09-10T08:06:10 | Python | UTF-8 | Python | false | false | 1,309 | py | # -*- coding: utf-8 -*-
__author__ = 'dzt'
__date__ = '2018/12/21 23:48'
from django.conf.urls import url
from .views import UserInfoView, UploadImageView, UpdatePwdView, SendEmailCodeView, UpdateEmailView, MyCourses
from .views import MyFavOrgView, MyFavTeacherView, MyFavCourseView, MyMessageView
urlpatterns = [
# 用户信息
url(r'^info/$', UserInfoView.as_view(), name='user_info'),
# 用户头像上传
url(r'^image/upload/$', UploadImageView.as_view(), name='image_upload'),
# 用户个人中心修改密码
url(r'^update/pwd/$', UpdatePwdView.as_view(), name='update_pwd'),
# 发送邮箱验证码
url(r'^sendemail_code/$', SendEmailCodeView.as_view(), name='sendemail_code'),
# 修改邮箱
url(r'^update_email/$', UpdateEmailView.as_view(), name='update_email'),
# 我的教程
url(r'^mycourses/$', MyCourses.as_view(), name='mycourses'),
# 我的收藏 直播机构
url(r'^myfav/org/$', MyFavOrgView.as_view(), name='myfav_org'),
# 我的收藏 主播
url(r'^myfav/teacher/$', MyFavTeacherView.as_view(), name='myfav_teacher'),
# 我的收藏 教程
url(r'^myfav/course/$', MyFavCourseView.as_view(), name='myfav_course'),
# 我的消息
url(r'^mymessage/$', MyMessageView.as_view(), name='mymessage'),
] | [
"569578851@qq.com"
] | 569578851@qq.com |
fdb4bdf1a20e33fa178f567d6dfa0aac72099ca5 | c6716e87bde12a870d517ebe64c6916477ef3251 | /tableFormats.py | dfc915f56ec17c0cb2887a8fce4b5c6e7c0c0ed0 | [
"BSD-3-Clause"
] | permissive | adasilva/prettytable | efca75828341319e2962727e55f7cce5519eb4b7 | 899e255a53b257cf392565dc1d9f02bef25c4c4a | refs/heads/master | 2021-01-25T04:01:43.633364 | 2015-08-19T15:26:49 | 2015-08-19T15:26:49 | 40,557,221 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 2,591 | py | from prettytable import PrettyTable
import abc
class TableString(object):
"""Metaclass for formatted table strings."""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def __unicode__(self): return
@abc.abstractmethod
def __str__(self): return
@abc.abstractmethod
def get_string(self,outfile,**kwargs):
'''return the string'''
return
class latexTable(TableString):
"""Construct and export a LaTeX table from a PrettyTable.
latexTableExporter(table,**kwargs)
Required argument:
-----------------
table - an instance of prettytable.PrettyTable
Optional keyword arguments:
--------------------------
caption - string - a caption for the table
label - string - the latex reference ID
"""
def __init__(self,table,caption='',label=''):
self.table = table
self.caption = caption
self.label = label
def __str__(self):
return self.get_string()
def __unicode__(self):
return self.get_string()
def get_string(self,**kwargs):
''' Construct LaTeX string from table'''
options = self.table._get_options(kwargs) #does not work bc of prettytable bug
s = r'\begin{table}' + '\n'
s = s + r'\centering' + '\n'
s = s + r'\caption{%s}\label{%s}' %(self.caption,self.label)
s = s + '\n'
s = s + r'\begin{tabular}{'
s = s + ''.join(['c',]*len(self.table.field_names)) + '}'
s = s + '\n'
s = s + '&'.join(self.table.field_names)+r'\\ \hline'+'\n'
rows = self.table._format_rows(self.table._rows,options)
#print rows
for i in range(len(rows)):
row = [str(itm) for itm in rows[i]]
s = s + '&'.join(row)
if i != len(self.table._rows)-1:
s = s + r'\\'
s = s + '\n'
s = s + r'\end{tabular}' + '\n'
s = s + r'\end{table}'
return s
if __name__ == "__main__":
t = PrettyTable(['a','b','c'])
t.add_row([1,2.0,3.14159])
xt = latexTable(t,caption='Testing formatted table string',label='tab:test')
print '1. Simply print the table:\n'
print xt
print '\n2. Use get_string method:\n'
print xt.get_string()
print '\n3. Format floats to two decimal points: (KNOWN ISSUE)\n'
print xt.get_string(float_format='0.2')
print '\n4. Workaround to format floats:\n'
t.float_format = '0.2'
xt2 = latexTable(t,caption='Floats are formatted to have two decimal places',label='tab:test2')
print xt2
| [
"awesomeashley527@gmail.com"
] | awesomeashley527@gmail.com |
0861548899e4e325b8f98626824a5f2a3f40c4a1 | d620b82c57adde1636826601e2b99209689ad2c4 | /model/xgboost/xgboostprocess.py | 8facdb750067deadb04ba3f2ca6276a0d2ee0326 | [] | no_license | weigebushiyao/HFData-PitchSystemModel | 1a75e2da6bef1bdbcae0eee1b1b9519bee03b56c | 58c77cdfcf85e49d7ab1f7163374c906ac0df361 | refs/heads/master | 2022-07-11T22:30:35.794840 | 2020-05-18T06:36:54 | 2020-05-18T06:36:54 | 264,849,547 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,260 | py | #-*-coding:utf-8-*-
from sklearn.model_selection import RandomizedSearchCV
import pandas as pd
from xgboost.sklearn import XGBRegressor
from model.get_data_path import get_train_data_path,get_test_data_path
from sklearn.model_selection import train_test_split
import os
from util.show_save_result import ShowAndSave
cur_path=os.path.abspath(os.path.dirname(__file__))
datafile = get_train_data_path()
class XgboostModel(ShowAndSave):
def __init__(self, params=None,jobname='xgb_model'):
super().__init__()
self.job_name=jobname
self.cur_path=cur_path
self.init_param()
self.params = params
self.model_file=self.model_path + self.job_name
def xgboostmodel(self):
df = pd.read_csv(datafile, encoding='utf-8', index_col=0)
print(df.shape)
traindata = df.iloc[:, :].values
x = traindata[:, :-1]
y = traindata[:, -1]
x_train, x_test, y_train, y_test = train_test_split(x, y, train_size=0.7) # list
if self.params is None:
params={'max_depth':80,'n_estimators':512}
else:
params=self.params
raw_model = XGBRegressor(max_depth=128,n_estimators=768,learning_rate=0.01,silence=False)
raw_model.fit(x_train, y_train)
raw_model.save_model(self.model_file)
pred = raw_model.predict(x_test)
self.true=y_test
self.pred=pred
self.show_save_figure(fig_path=self.fig_path,modelname=self.job_name, detal_idx=500)
t_mean=self.cal_mean(self.true)
p_mean=self.cal_mean(self.pred)
self.save_result(self.result_path,true_mean=t_mean, pred_mean=p_mean)
def test_model(self,model_file=None):
if model_file is None:
modelfile=self.model_file
else:
modelfile=self.single_model_path+'model_'+str(model_file)
fault_test_file_path=get_test_data_path()
df=pd.read_csv(fault_test_file_path,encoding='utf-8',index_col=0)
data=df.iloc[:,:].values
x=data[:,:-1]
y=data[:,-1]
xgb=XGBRegressor()
raw_model=xgb.load_model(modelfile)
pred=raw_model.predict(x)
self.true=y
self.pred=pred
self.show_save_figure(fig_path=self.fault_data_test_figure_path,modelname=self.job_name,detal_idx=10)
t_mean=self.cal_mean(self.true)
p_mean=self.cal_mean(self.pred)
self.save_result(self.fault_data_test_result_path,true_mean=t_mean,pred_mean=p_mean)
def params_tuned(self):
xgb=XGBRegressor(objective='reg:squarederror')
params={'max_depth':[90,100,128],'n_estimators':[768,800,850]}
grid=RandomizedSearchCV(xgb,params,cv=3,scoring='neg_mean_squared_error',n_iter=6)
df = pd.read_csv(datafile, encoding='utf-8', index_col=0)
traindata = df.iloc[100000:700000, :].values
x = traindata[:, :-1]
y = traindata[:, -1]
grid.fit(x,y)
print(grid.best_score_)
print(grid.best_params_)
self.params=grid.best_params_
df=pd.DataFrame(list(self.params.items()))
df.to_csv(self.params_file_path+'params.csv',encoding='utf-8',index=None,header=None)
xgb = XgboostModel()
#xgb.params_tuned()
xgb.xgboostmodel()
#xgb.test_model()
| [
"505456072@qq.com"
] | 505456072@qq.com |
7fdb76e70da796bb88882454749b09f5a59d1b45 | ec4586abcc179293656f0afd837b0d521d072a75 | /torchsl/mvsl/__init__.py | d61621ffcfa464dc736802882d9237e957f9b3a7 | [] | no_license | ZDstandup/mvda | e483387e0b7e50c84bc28ffd864d44a724d23762 | 13f854e063f10a9374856d0e2005b233788a645f | refs/heads/master | 2021-01-13T20:42:51.842836 | 2019-12-15T19:16:13 | 2019-12-15T19:16:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 382 | py | from .mvda import MvDA, MvDAvc, RMvDA, RMvDAvc
from .pcmvda import pcMvDA
from .mvcsda import MvCSDA, MvDAplusCS
from .mvlfda import MvLFDA, MvLFDAvc, RMvLFDA, RMvLFDAvc
from .mvccda import MvCCDA, MvDCCCDA
__all__ = [
'MvDA', 'MvDAvc', 'RMvDA', 'RMvDAvc',
'pcMvDA',
'MvCSDA', 'MvDAplusCS',
'MvLFDA', 'MvLFDAvc', 'RMvLFDA', 'RMvLFDAvc',
'MvCCDA', 'MvDCCCDA'
]
| [
"inspiros.tran@gmail.com"
] | inspiros.tran@gmail.com |
ff0ea4acf2347925603c4adec3f917e249a6c633 | eb0ff0b6979a4cef6b1d8509d10579da9a6aca90 | /main.py | 5c8cb04f647953e8faa71d2b9c1e2fbec3279661 | [] | no_license | SusaOP/Motion-Detection-Security-Camera | 71b44e2b80ddf99f611c85c375b68afa242f41e3 | 33468eb9a6743476e048fd2785c0ad82cf5feb79 | refs/heads/main | 2023-06-27T20:30:12.629899 | 2021-08-06T01:04:39 | 2021-08-06T01:04:39 | 393,207,450 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,256 | py | import os
import shutil
from datetime import datetime
from operateCamera import videoRecord
from into_frames import toFrames
from compare_frames import compare
from send_email import fromFlaggedToSent
from send_email import establishAttachment
local_max = 0
for i in range(2):
saveDir, videoFile = videoRecord()
print(f'saveDir is {saveDir} and videoFile is {videoFile}')
toFrames(saveDir, videoFile)
print(f'dir is {saveDir}')
local_max, issueID = compare(saveDir)
if (local_max > 5): #significant movement is detected
print(f'Max is {local_max}, moving to Flagged...')
os.mkdir(f'./Flagged/{saveDir}')
os.replace(f'./{saveDir}/{videoFile}', f'./Flagged/{saveDir}/{videoFile}')
os.replace(f'./{saveDir}/frame_{issueID}.jpg', f'./Flagged/{saveDir}/frame_{issueID}-Detected.jpg')
shutil.rmtree(f'./{saveDir}')
attach_path = f'./Flagged/{saveDir}/frame_{issueID}-Detected.jpg'
establishAttachment(attach_path)
fromFlaggedToSent(saveDir, videoFile, issueID)
elif (local_max <= 5): #significant movement is not detected
print(f'Insignificant max of {local_max} is found. Removing ./{saveDir}')
shutil.rmtree(f'./{saveDir}')
local_max = 0 | [
"noreply@github.com"
] | noreply@github.com |
a25c70b086e30d5453a6b2028947b60a2489d0ec | 333b2e1284be6ea06a9989bcc76fd296f5c4f0a4 | /modules/study.py | 7aff8c48c34019e170d78e05afffc4ecb7954e76 | [] | no_license | luomeng007/MyLife | 567df155a30857e2c5f03049611d83eb0a847c02 | 76447fdfeaa83d7b77964560d56c67ce2cd36905 | refs/heads/main | 2023-01-20T14:17:30.613718 | 2020-11-29T10:46:26 | 2020-11-29T10:46:26 | 309,741,680 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,352 | py | # -*- coding: utf-8 -*-
import os
import time
import speech
class Study:
def __init__(self):
while True:
print("请您选择,提示:请输入序号1或者2")
print("1. 学习30分钟")
print("2. 学习60分钟")
self.choice = input("您的决定: ")
print("")
if self.choice == "1":
self.total_time = 30 * 60
break
elif self.choice == "2":
self.total_time = 60 * 60
break
else:
print("您的输入值有误,请重新输入!提示:输入数字1或者2")
continue
self.start_time = time.time()
self.flag = True
if not os.path.exists("./time_data_study.txt"):
self.time_total_study = 0
else:
with open("./time_data_study.txt", "r") as f:
time_data = f.readline()
self.time_total_study = float(time_data)
# judge whether the total time reaches 8 hours
if self.time_total_study >= 8:
print("今天学习时间太久了,请做点儿别的事情吧!")
print("")
self.flag = False
if self.choice == "2" and self.time_total_study == 7.5:
print("今日剩余学习时间30分钟,请重新选择")
print("")
self.flag = False
def main_program(self):
if self.flag:
self.start_learning()
self.update_data()
def start_learning(self):
print("开始学习!")
speech.say("los geht's")
while round(time.time() - self.start_time) != self.total_time:
# 这里可以加入一些语音互动
pass
speech.say("fertig!")
print("学习完成!")
if self.choice == "1":
self.time_total_study += 0.5
if self.choice == "2":
self.time_total_study += 1
def update_data(self):
with open("./time_data_study.txt", "w+") as f:
f.write(str(self.time_total_study) + '\n')
if __name__ == "__main__":
# ML: My Life
s = Study()
s.main_program() | [
"noreply@github.com"
] | noreply@github.com |
a0754fefb495c8c77a0ceb23a9ff13a8cc1d720f | 0a669c18356f783fdd31ac54519b7c91f2fb3ef7 | /01-Estrutura_Sequencial/08-Salario_hora_simples.py | 8153ac315ed945727e1f63797c0f8bc3d3d8dd4d | [] | no_license | guilhermejcmarinho/Praticas_Python_Elson | 5295bca77785c22c6502c7b35988a89e6e8fba8e | 27145a99dd18c57281736079d94aef468d7276dc | refs/heads/main | 2023-07-28T22:41:58.636331 | 2021-09-10T22:09:37 | 2021-09-10T22:09:37 | 400,654,904 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 187 | py | sal_hora = float(input('Informe o valor da hora trabalhada: '))
hr_trab = float(input('Informe quantas horas trabalhou: '))
print('Salário total no mês: R$', round(sal_hora*hr_trab, 2)) | [
"gui.the.great@gmail.com"
] | gui.the.great@gmail.com |
c9593dfd5fb0088ce2c4645844975fd74e3c847e | 7b53e120dc4022b09eed0cf87a975482dc1d2056 | /M2/utils.py | 93ee422692d9b63b45deed53160a1d656ec285cf | [] | no_license | YuanKQ/DDI-VAE | 878ba120c2a61e7966bf1638680c5b39a610d690 | fe2c5a699e5294287c0b05b60fd037c21c7fddd1 | refs/heads/master | 2020-03-17T12:34:12.895257 | 2018-05-16T01:50:45 | 2018-05-16T02:11:56 | 133,594,155 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,766 | py | import prettytensor as pt
import tensorflow as tf
import numpy as np
logc = np.log(2.*np.pi)
c = - 0.5 * np.log(2*np.pi)
def tf_normal_logpdf(x, mu, log_sigma_sq):
return ( - 0.5 * logc - log_sigma_sq / 2. - tf.div( tf.square( tf.subtract( x, mu ) ), 2 * tf.exp( log_sigma_sq ) ) )
def tf_stdnormal_logpdf(x):
return ( - 0.5 * ( logc + tf.square( x ) ) )
def tf_gaussian_ent(log_sigma_sq):
return ( - 0.5 * ( logc + 1.0 + log_sigma_sq ) )
def tf_gaussian_marg(mu, log_sigma_sq):
return ( - 0.5 * ( logc + ( tf.square( mu ) + tf.exp( log_sigma_sq ) ) ) )
def tf_binary_xentropy(x, y, const = 1e-10):
return - ( x * tf.log ( tf.clip_by_value( y, const, 1.0 ) ) + \
(1.0 - x) * tf.log( tf.clip_by_value( 1.0 - y, const, 1.0 ) ) )
def feed_numpy_semisupervised(num_lab_batch, num_ulab_batch, x_lab, y, x_ulab):
size = x_lab.shape[0] + x_ulab.shape[0]
batch_size = num_lab_batch + num_ulab_batch
count = int(size / batch_size)
dim = int(x_lab.shape[1])
for i in range(count):
start_lab = int(i * num_lab_batch)
end_lab = int(start_lab + num_lab_batch)
start_ulab = int(i * num_ulab_batch)
end_ulab = int(start_ulab + num_ulab_batch)
yield [ x_lab[start_lab:end_lab,:int(dim/2)], x_lab[start_lab:end_lab,int(dim/2):dim], y[start_lab:end_lab],
x_ulab[start_ulab:end_ulab,:int(dim/2)], x_ulab[start_ulab:end_ulab,int(dim/2):dim] ]
def feed_numpy(batch_size, x):
size = x.shape[0]
count = int(size / batch_size)
dim = x.shape[1]
for i in range(count):
start = i * batch_size
end = start + batch_size
yield x[start:end]
def print_metrics(epoch, *metrics):
print(25*'-')
for metric in metrics:
print('[{}] {} {}: {}'.format(epoch, metric[0],metric[1],metric[2]))
print(25*'-') | [
"kq_yuan@outlook.com"
] | kq_yuan@outlook.com |
30049a45def159f6d425e056ab47ba6b13055d72 | 3fdf83182664bf1c5c8c5b91186ed1a476cdcae7 | /manage.py | b82fa89d927388c22b2efe834deb746bcbac493a | [] | no_license | gauravdhingra99/Webkiosk-online-Student-portal- | 7a3d47e1bd0e05d1a853685a66e28627ae04eef3 | fa1369e0e616b6688f9f906fd0c5ea42efa06368 | refs/heads/master | 2020-04-06T19:46:05.882112 | 2019-02-27T16:30:13 | 2019-02-27T16:30:13 | 157,748,816 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 540 | py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "webkiosk.settings")
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
| [
"gauravdhingra9999@gmail.com"
] | gauravdhingra9999@gmail.com |
47220864385f35b099736c3ef297a7ae7f1cbe54 | ca08100b33a78c01bf49f097f4e80ed10e4ee9ad | /intrepidboats/apps/owners_portal/utils.py | 605fe7065629b6a2f9983f3de5ed580162b6c11a | [] | no_license | elite0401/intrepidpowerboats | 347eae14b584d1be9a61ca14c014135ab0d14ad0 | d2a475b60d17aa078bf0feb5e0298c927e7362e7 | refs/heads/master | 2021-09-11T01:51:47.615117 | 2018-04-06T02:20:02 | 2018-04-06T02:20:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,654 | py | from django.conf import settings
from django.contrib.sites.models import Site
from django.core.mail import send_mail
from django.template.loader import render_to_string
from django.urls import reverse
from django.utils.translation import gettext as _
def send_report_email(user_boat):
context = {
'user': user_boat.user,
'user_boat': user_boat,
'boat': user_boat.boat,
'site': Site.objects.get_current().domain,
'dashboard_url': reverse("owners_portal:owners_portal"),
}
send_mail(
subject=_("New boat report - Intrepid Powerboats"),
message=render_to_string('owners_portal/emails/report_email.txt', context),
from_email=settings.BUILD_A_BOAT['NO_REPLY_EMAIL_REPORTS'],
recipient_list=[user_boat.user.email],
html_message=render_to_string('owners_portal/emails/report_email.html', context),
)
def send_step_feedback_email(step_feedback):
context = {
'comments': step_feedback.comments,
'user': step_feedback.user,
'step': '{title} (phase: {phase})'.format(title=step_feedback.step.title, phase=step_feedback.step.phase),
'boat': '{boat} (model: {model})'.format(boat=step_feedback.step.user_boat,
model=step_feedback.step.user_boat.boat)
}
send_mail(
subject=_("{user} has sent feedback on {step} in Owner's portal - Intrepid Powerboats".format(
user=context['user'],
step=context['step'],
)),
message=render_to_string('owners_portal/emails/step_feedback_email.txt', context),
from_email=settings.NO_REPLY_EMAIL,
recipient_list=settings.TO_EMAIL['OWNERS_PORTAL_FEEDBACK_FORM'],
html_message=render_to_string('owners_portal/emails/step_feedback_email.html', context),
)
def send_new_shared_video_uploaded_email(shared_video):
from django.contrib.auth.models import User
admins = User.objects.filter(is_superuser=True)
subject = _("New uploaded video to vimeo")
to = admins.values_list('email', flat=True)
from_email = settings.NO_REPLY_EMAIL
site = Site.objects.get_current()
ctx = {
'user': shared_video.uploader,
'site': site.domain,
'admin_url': reverse("admin:owners_portal_sharedvideo_change", args=[shared_video.pk]),
}
message = render_to_string('owners_portal/emails/new_shared_video_email.txt', ctx)
html_message = render_to_string('owners_portal/emails/new_shared_video_email.html', ctx)
send_mail(subject=subject, message=message, from_email=from_email, recipient_list=to, html_message=html_message)
| [
"elite.wisdom@gmx.com"
] | elite.wisdom@gmx.com |
fcf325192bb689fddfa24f58302b76220e0f8f1b | 9708ad482f925fb5a57df285b478602ad2749196 | /lib.py | af36fdf38a4f7b1c906176f91a90afb6c6a5b74c | [] | no_license | cczeus/project-euler | 580b6e559da23554aaab06b82b671ebbf382c26c | 57970c2f0a2b64c5e444050bb437ba3b3620bff1 | refs/heads/master | 2022-06-02T23:25:47.280066 | 2022-05-19T23:30:41 | 2022-05-19T23:30:41 | 68,892,531 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 709 | py | import math
def isPrime(num):
if num == 2:
return True
elif num % 2 == 0:
return False
elif num < 0:
return False
for j in range(3, int(math.sqrt(num)) + 1, 2):
if(num % j == 0):
return False
return True
def getFactors(num):
factors = []
i = 1
while i <= math.sqrt(num):
if num % i == 0:
factors.append(i)
factors.append(num / i)
i += 1
return factors
def getFactorsSum(num):
sum = 1
i = 2
while i <= math.sqrt(num):
if num % i == 0:
sum += i
sum += num / i
if i == num / i:
sum -= i
i += 1
return sum | [
"chriszuis@MacBook-Pro.local"
] | chriszuis@MacBook-Pro.local |
02af91d9a068eb13b6123c2f26b025668f5bb79f | 6eaf69ffd454ed6933e3395516246d878cb09781 | /repozeldapapp/tests/functional/test_authentication.py | f998f67ccdc2ccc018c17f9cecb7cb08697d7a58 | [] | no_license | ralphbean/repoze-ldap-app | 0d6658ef13b153736aaed6aa07fbdcaf65cbe1d9 | cc00fe59bcc286fd44d1e22a14c40cfc8419e21d | refs/heads/master | 2021-01-01T05:35:25.069715 | 2011-07-19T15:30:31 | 2011-07-19T15:30:31 | 2,072,811 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,583 | py | # -*- coding: utf-8 -*-
"""
Integration tests for the :mod:`repoze.who`-powered authentication sub-system.
As repoze-ldap-app grows and the authentication method changes, only these tests
should be updated.
"""
from repozeldapapp.tests import TestController
class TestAuthentication(TestController):
"""Tests for the default authentication setup.
By default in TurboGears 2, :mod:`repoze.who` is configured with the same
plugins specified by repoze.what-quickstart (which are listed in
http://code.gustavonarea.net/repoze.what-quickstart/#repoze.what.plugins.quickstart.setup_sql_auth).
As the settings for those plugins change, or the plugins are replaced,
these tests should be updated.
"""
application_under_test = 'main'
def test_forced_login(self):
"""Anonymous users are forced to login
Test that anonymous users are automatically redirected to the login
form when authorization is denied. Next, upon successful login they
should be redirected to the initially requested page.
"""
# Requesting a protected area
resp = self.app.get('/secc/', status=302)
assert resp.location.startswith('http://localhost/login')
# Getting the login form:
resp = resp.follow(status=200)
form = resp.form
# Submitting the login form:
form['login'] = u'manager'
form['password'] = 'managepass'
post_login = form.submit(status=302)
# Being redirected to the initially requested page:
assert post_login.location.startswith('http://localhost/post_login')
initial_page = post_login.follow(status=302)
assert 'authtkt' in initial_page.request.cookies, \
"Session cookie wasn't defined: %s" % initial_page.request.cookies
assert initial_page.location.startswith('http://localhost/secc/'), \
initial_page.location
def test_voluntary_login(self):
"""Voluntary logins must work correctly"""
# Going to the login form voluntarily:
resp = self.app.get('/login', status=200)
form = resp.form
# Submitting the login form:
form['login'] = u'manager'
form['password'] = 'managepass'
post_login = form.submit(status=302)
# Being redirected to the home page:
assert post_login.location.startswith('http://localhost/post_login')
home_page = post_login.follow(status=302)
assert 'authtkt' in home_page.request.cookies, \
'Session cookie was not defined: %s' % home_page.request.cookies
assert home_page.location == 'http://localhost/'
def test_logout(self):
"""Logouts must work correctly"""
# Logging in voluntarily the quick way:
resp = self.app.get('/login_handler?login=manager&password=managepass',
status=302)
resp = resp.follow(status=302)
assert 'authtkt' in resp.request.cookies, \
'Session cookie was not defined: %s' % resp.request.cookies
# Logging out:
resp = self.app.get('/logout_handler', status=302)
assert resp.location.startswith('http://localhost/post_logout')
# Finally, redirected to the home page:
home_page = resp.follow(status=302)
authtkt = home_page.request.cookies.get('authtkt')
assert not authtkt or authtkt == 'INVALID', \
'Session cookie was not deleted: %s' % home_page.request.cookies
assert home_page.location == 'http://localhost/', home_page.location
| [
"ralph.bean@gmail.com"
] | ralph.bean@gmail.com |
1d1dfcd44cf71fa592df181189c7efe1af6af40d | 7a8560742946bfb95f4a252693264c34d4d0473d | /k2/centroid.py | e09491c999915180b3830fd138110d6e2140551a | [
"MIT"
] | permissive | benmontet/K2-noise | 3781e475ed6d5e2748a7ac3ddd878b8eec334254 | a4b682cdf33f85d2dffc4cef115dcedacfccb4b4 | refs/heads/master | 2016-09-05T13:02:09.051080 | 2014-10-25T14:36:22 | 2014-10-25T14:36:22 | 22,899,258 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 604 | py | # -*- coding: utf-8 -*-
from __future__ import division, print_function
__all__ = ["centroid"]
import numpy as np
from functools import partial
from itertools import izip, imap
from .c3k import find_centroid
def centroid(tpf, **kwargs):
# Load the data.
data = tpf.read()
times = data["TIME"]
images = data["FLUX"]
quality = data["QUALITY"]
# Get rid of the bad times based on quality flags.
m = np.isfinite(times) * (quality == 0)
images[~m, :] = np.nan
f = partial(find_centroid, **kwargs)
return [times] + list(imap(np.array, izip(*(imap(f, images)))))
| [
"danfm@nyu.edu"
] | danfm@nyu.edu |
2c4cfe1cd667b7a708c96b4978b00325826dfb19 | 0987f31e64bcacb41ba3a1e20054d7b8ac0d7346 | /contests/panasonic2020/a.py | 3c85e5a3a0a4b6b5ab170b052566849aab8ae7bf | [] | no_license | masakiaota/kyoupuro | 81ae52ab3014fb2b1e10472994afa4caa9ea463b | 74915a40ac157f89fe400e3f98e9bf3c10012cd7 | refs/heads/master | 2021-06-27T04:13:52.152582 | 2020-09-20T03:21:17 | 2020-09-20T03:21:17 | 147,049,195 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,905 | py | import sys
sys.setrecursionlimit(1 << 25)
read = sys.stdin.readline
def read_ints():
return list(map(int, read().split()))
def read_a_int():
return int(read())
def read_tuple(H):
'''
H is number of rows
'''
ret = []
for _ in range(H):
ret.append(tuple(map(int, read().split())))
return ret
def read_col(H, n_cols):
'''
H is number of rows
n_cols is number of cols
A列、B列が与えられるようなとき
'''
ret = [[] for _ in range(n_cols)]
for _ in range(H):
tmp = list(map(int, read().split()))
for col in range(n_cols):
ret[col].append(tmp[col])
return ret
def read_matrix(H):
'''
H is number of rows
'''
ret = []
for _ in range(H):
ret.append(list(map(int, read().split())))
return ret
# return [list(map(int, read().split())) for _ in range(H)] # 内包表記はpypyでは遅いため
def read_map(H):
'''
H is number of rows
文字列で与えられた盤面を読み取る用
'''
return [read()[:-1] for _ in range(H)]
def read_map_as_int(H):
'''
#→1,.→0として読み込む
'''
ret = []
for _ in range(H):
ret.append([1 if s == '#' else 0 for s in read()[:-1]])
# 内包表記はpypyでは若干遅いことに注意
# #numpy使うだろうからこれを残しておくけど
return ret
# default import
from collections import defaultdict, Counter, deque
from operator import itemgetter
from itertools import product, permutations, combinations
from bisect import bisect_left, bisect_right # , insort_left, insort_right
from fractions import gcd
def lcm(a, b):
# 最小公約数
g = gcd(a, b)
return a * b // g
a = [1, 1, 1, 2, 1, 2, 1, 5, 2, 2, 1, 5, 1, 2, 1, 14,
1, 5, 1, 5, 2, 2, 1, 15, 2, 2, 5, 4, 1, 4, 1, 51]
print(a[int(input()) - 1])
| [
"aotamasakimail@gmail.com"
] | aotamasakimail@gmail.com |
dc99e0b0e9ab6f25c323a84c139ce0ec4d9fcdeb | bcd33ba045b68fe6fba6f7a3a8fd95124106ac16 | /tests/test_dates.py | d2c3b3422cc62b44cc924dd40f9e617529822d61 | [
"MIT"
] | permissive | bfontaine/Romme | 43d9ba2f6cd09f7b24f8916b121854521009cdd0 | affdfb23a6bb882c17da95ec3767710d5bebd59a | refs/heads/master | 2021-03-27T12:29:13.329232 | 2017-06-10T19:42:30 | 2017-06-10T19:42:30 | 93,895,352 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 238 | py | # -*- coding: UTF-8 -*-
import unittest
from romme.dates import RepublicanDate
class TestDates(unittest.TestCase):
def test_str(self):
rd = RepublicanDate(1, 1, 1)
self.assertEqual("1 Vendémiaire, an I", str(rd))
| [
"b@ptistefontaine.fr"
] | b@ptistefontaine.fr |
a9f2bfe4189be9732b9d1c4db1fff1baab5cbbd9 | 94b8f8f7241545e614dc51f708c3b5b876f6db0c | /test.py | ce1d52ce890c992dc59565079f5e3c9ef7ba3cca | [
"MIT"
] | permissive | aliyun/The-Blessings-of-Unlabeled-Background-in-Untrimmed-Videos | b369155d11560508a43892da1e84f46cc7ae0852 | aca214c56fc05778a1f9f382c2f634cbeca4d852 | refs/heads/master | 2023-06-23T20:32:16.577804 | 2021-07-27T06:40:45 | 2021-07-27T06:40:45 | 370,694,896 | 30 | 3 | null | null | null | null | UTF-8 | Python | false | false | 7,256 | py | import torch
import torch.nn as nn
import numpy as np
import utils
import os
import os.path as osp
import json
from eval.eval_detection import ANETdetection
from tqdm import tqdm
import sys
def test(net, config, logger, test_loader, test_info, step, model_file=None):
with torch.no_grad():
net.eval()
if model_file is not None:
net.load_state_dict(torch.load(model_file))
final_res = {}
final_res['version'] = 'VERSION 1.3'
final_res['results'] = {}
final_res['external_data'] = {'used': True, 'details': 'Features from I3D Network'}
num_correct = 0.
num_total = 0.
result_store_numpy_path = './WUM_result_numpy'
load_iter = iter(test_loader)
for i in range(len(test_loader.dataset)):
_data, _label, _, vid_name, vid_num_seg = next(load_iter)
_data = _data.cuda()
_label = _label.cuda()
vid_num_seg = vid_num_seg[0].cpu().item()
num_segments = _data.shape[1]
features_div,_,_,_,_ = net(_data)
_,project_num,_ = features_div.shape
score_act = np.load(osp.join(result_store_numpy_path,vid_name[0]+'_score.npy'))
feat_act = np.load(osp.join(result_store_numpy_path,vid_name[0]+'_feat_act.npy'))
feat_bkg = np.load(osp.join(result_store_numpy_path,vid_name[0]+'_feat_bkg.npy'))
features = np.load(osp.join(result_store_numpy_path,vid_name[0]+'_features.npy'))
cas_softmax = np.load(osp.join(result_store_numpy_path,vid_name[0]+'_cas.npy'))
score_act = torch.Tensor(score_act).cuda()
feat_act = torch.Tensor(feat_act).cuda()
feat_bkg = torch.Tensor(feat_bkg).cuda()
features = torch.Tensor(features).cuda()
cas_softmax = torch.Tensor(cas_softmax).cuda()
features_div = features_div[0]
div_max = torch.max(features_div,dim=1,keepdim=True)[0]
div_min = torch.min(features_div,dim=1,keepdim=True)[0]
features_div = (features_div-div_min)/(div_max-div_min)
features_div = features_div.permute(1,0)
features_div_mean = torch.mean(torch.unsqueeze(features_div,dim=0),2,keepdim=True)
feat_magnitudes_act = torch.mean(torch.norm(feat_act, dim=2), dim=1)
feat_magnitudes_bkg = torch.mean(torch.norm(feat_bkg, dim=2), dim=1)
label_np = _label.cpu().data.numpy()
score_np = score_act[0].cpu().data.numpy()
pred_np = np.zeros_like(score_np)
pred_np[np.where(score_np < config.class_thresh)] = 0
pred_np[np.where(score_np >= config.class_thresh)] = 1
correct_pred = np.sum(label_np == pred_np, axis=1)
num_correct += np.sum((correct_pred == config.num_classes).astype(np.float32))
num_total += correct_pred.shape[0]
feat_magnitudes = torch.norm(features, p=2, dim=2)
feat_magnitudes = utils.minmax_norm(feat_magnitudes, max_val=feat_magnitudes_act, min_val=feat_magnitudes_bkg)
feat_magnitudes = feat_magnitudes.repeat((config.num_classes, 1, 1)).permute(1, 2, 0)
cas = utils.minmax_norm(cas_softmax * feat_magnitudes)
#The following two lines is to deploy TS-PCA with WUM.
cas = cas + 0.5*features_div_mean
cas = utils.minmax_norm(cas)
pred = np.where(score_np >= config.class_thresh)[0]
if len(pred) == 0:
pred = np.array([np.argmax(score_np)])
cas_pred = cas[0].cpu().numpy()[:, pred]
cas_pred = np.reshape(cas_pred, (num_segments, -1, 1))
cas_pred = utils.upgrade_resolution(cas_pred, config.scale)
proposal_dict = {}
feat_magnitudes_np = feat_magnitudes[0].cpu().data.numpy()[:, pred]
feat_magnitudes_np = np.reshape(feat_magnitudes_np, (num_segments, -1, 1))
feat_magnitudes_np = utils.upgrade_resolution(feat_magnitudes_np, config.scale)
for i in range(len(config.act_thresh_cas)):
cas_temp = cas_pred.copy()
zero_location = np.where(cas_temp[:, :, 0] < config.act_thresh_cas[i])
cas_temp[zero_location] = 0
seg_list = []
for c in range(len(pred)):
pos = np.where(cas_temp[:, c, 0] > 0)
seg_list.append(pos)
proposals = utils.get_proposal_oic(seg_list, cas_temp, score_np, pred, config.scale, \
vid_num_seg, config.feature_fps, num_segments)
for i in range(len(proposals)):
class_id = proposals[i][0][0]
if class_id not in proposal_dict.keys():
proposal_dict[class_id] = []
proposal_dict[class_id] += proposals[i]
for i in range(len(config.act_thresh_magnitudes)):
cas_temp = cas_pred.copy()
feat_magnitudes_np_temp = feat_magnitudes_np.copy()
zero_location = np.where(feat_magnitudes_np_temp[:, :, 0] < config.act_thresh_magnitudes[i])
feat_magnitudes_np_temp[zero_location] = 0
seg_list = []
for c in range(len(pred)):
pos = np.where(feat_magnitudes_np_temp[:, c, 0] > 0)
seg_list.append(pos)
proposals = utils.get_proposal_oic(seg_list, cas_temp, score_np, pred, config.scale, \
vid_num_seg, config.feature_fps, num_segments)
for i in range(len(proposals)):
class_id = proposals[i][0][0]
if class_id not in proposal_dict.keys():
proposal_dict[class_id] = []
proposal_dict[class_id] += proposals[i]
final_proposals = []
for class_id in proposal_dict.keys():
final_proposals.append(utils.nms(proposal_dict[class_id], 0.6))
final_res['results'][vid_name[0]] = utils.result2json(final_proposals)
test_acc = num_correct / num_total
json_path = os.path.join(config.output_path, 'result.json')
with open(json_path, 'w') as f:
json.dump(final_res, f)
f.close()
tIoU_thresh = np.linspace(0.1, 0.9, 9)
#tIoU_thresh = np.linspace(0.1, 0.7, 7)
anet_detection = ANETdetection(config.gt_path, json_path,
subset='test', tiou_thresholds=tIoU_thresh,
verbose=False, check_status=False)
mAP, average_mAP = anet_detection.evaluate()
logger.log_value('Test accuracy', test_acc, step)
for i in range(tIoU_thresh.shape[0]):
logger.log_value('mAP@{:.1f}'.format(tIoU_thresh[i]), mAP[i], step)
logger.log_value('Average mAP', average_mAP, step)
test_info["step"].append(step)
test_info["test_acc"].append(test_acc)
test_info["average_mAP"].append(average_mAP)
for i in range(tIoU_thresh.shape[0]):
test_info["mAP@{:.1f}".format(tIoU_thresh[i])].append(mAP[i])
| [
"alen.ly@alibaba-inc.com"
] | alen.ly@alibaba-inc.com |
ba04885de6ca2c1a171de58f0649c0d7f07f2428 | a3e48885987e895d0d33b5dd903d51aaf4d21ce6 | /duplicate_strings.py | b6413d4a9b2d69466c9d364f828210a31a44ee9a | [] | no_license | das-amrit/helper_scripts | 2e17c0c585356dd6cf9bd84b5082f6a20dd815f7 | fdca380e55bfaee5cf5fb3365421931513501db7 | refs/heads/master | 2021-03-24T04:23:54.449186 | 2019-02-17T06:44:45 | 2019-02-17T06:44:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 139 | py | import itertools
text = "aforapple"
text_list = list(text)
new_text = [k for k,g in itertools.groupby(text_list)]
print("".join(new_text)
| [
"noreply@github.com"
] | noreply@github.com |
0249930c34da9815a0b78f5701b102bed3daa0b0 | 57a9d84e8bcf505795e7e4f2a57f096edebd0040 | /read_statistics/migrations/0002_readdetail.py | c76c1ec0c7df8243dc8104d4804adc7e95280b51 | [] | no_license | klllllsssss/mysite | 03541240c3d8013da40e3fdcaefbf9cfffabdfe3 | 14fbce7d1cb5097f16d2002da5a7a709cc7953f6 | refs/heads/master | 2022-09-18T13:30:47.399220 | 2020-06-03T11:04:23 | 2020-06-03T11:04:23 | 269,059,986 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 909 | py | # Generated by Django 2.0 on 2020-04-27 11:53
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('contenttypes', '0002_remove_content_type_name'),
('read_statistics', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='ReadDetail',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date', models.DateField(default=django.utils.timezone.now)),
('read_num', models.IntegerField(default=0)),
('object_id', models.PositiveIntegerField()),
('content_type', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='contenttypes.ContentType')),
],
),
]
| [
"549284627@qq.com"
] | 549284627@qq.com |
86595f3567adfa865a0e8806fba2bb2cd8d64109 | ade758c24cd547689012a61b55ccf77e33a2bbf2 | /93/93.py | 0ab93003b896e27ece999a42c10a5d8a9c0eaef6 | [] | no_license | danmedani/euler | 7f7dda0ee295a77eb6faca0a4aa15015850aed72 | eeef3a4d9c188f954842f7c3adc37d58588c4781 | refs/heads/master | 2023-08-17T03:26:36.864451 | 2023-08-08T02:35:46 | 2023-08-08T02:35:46 | 14,157,392 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,362 | py | import math
import copy
SENT = -999999
def doOp(num1, num2, op):
if op == '*':
return num1 * num2
elif op == '+':
return num1 + num2
elif op == '-':
return num1 - num2
elif op == '/':
if num2 == 0:
return 1.9874352345
else:
return 1.0 * num1 / num2
else:
print 'oh crap!'
def getTree1(numList, opList):
rr = doOp(numList[2], numList[3], opList[2])
r = doOp(numList[1], rr, opList[1])
c = doOp(numList[0], r, opList[0])
if c - int(c) > 0.0001:
return SENT
else:
return int(c)
def getTree2(numList, opList):
rr = doOp(numList[1], numList[2], opList[2])
r = doOp(rr, numList[3], opList[1])
c = doOp(numList[0], r, opList[0])
if c - int(c) > 0.0001:
return SENT
else:
return int(c)
def getTree3(numList, opList):
rr = doOp(numList[1], numList[2], opList[2])
r = doOp(numList[0], rr, opList[1])
c = doOp(r, numList[3], opList[0])
if c - int(c) > 0.0001:
return SENT
else:
return int(c)
def getTree4(numList, opList):
rr = doOp(numList[0], numList[1], opList[2])
r = doOp(rr, numList[2], opList[1])
c = doOp(r, numList[3], opList[0])
if c - int(c) > 0.0001:
return SENT
else:
return int(c)
def getTree5(numList, opList):
rr = doOp(numList[0], numList[1], opList[1])
r = doOp(numList[2], numList[3], opList[2])
c = doOp(rr, r, opList[0])
if c - int(c) > 0.0001:
return SENT
else:
return int(c)
opList = []
ops = ['+', '-', '/', '*']
def getOpList(soFar):
global opList
if len(soFar) == 3:
opList.append(soFar)
return
for op in ops:
soFarCop = copy.deepcopy(soFar)
soFarCop.append(op)
getOpList(soFarCop)
getOpList([])
fullList = []
def getNummySet(nums):
global fullList
fullList = []
getFullNumSet(nums, [])
def getFullNumSet(nums, numList):
global fullList
if len(nums) == 0:
fullList.append(numList)
for i in xrange(len(nums)):
numsCop = copy.deepcopy(nums)
numListCop = copy.deepcopy(numList)
numListCop.append(numsCop[i])
del(numsCop[i])
getFullNumSet(numsCop, numListCop)
def getAllNumOps(num):
global fullList
global opList
getNummySet(num)
fullListy = set([])
for nums in fullList:
for op in opList:
fullListy.add(getTree1(nums, op))
fullListy.add(getTree2(nums, op))
fullListy.add(getTree3(nums, op))
fullListy.add(getTree4(nums, op))
fullListy.add(getTree5(nums, op))
fullListy.remove(0)
return fullListy
def getOneTo(num):
allNums = getAllNumOps(num)
num = 1
while True:
if num not in allNums:
return num - 1
num = num + 1
digList = []
digs = range(10)
def getNumList(soFar, digs):
global digList
if len(soFar) == 4:
digList.append(soFar)
return
for op in digs:
soFarCop = copy.deepcopy(soFar)
soFarCop.append(op)
digCop = copy.deepcopy(digs)
digCop.remove(op)
getNumList(soFarCop, digCop)
getNumList([], digs)
digHashMap = {}
def hashIt(lis):
return lis[0] + (lis[1] * 100) + (lis[2] * 10000) + (lis[3] * 1000000)
finalDigz = []
for digL in digList:
digL.sort()
hh = hashIt(digL)
if hh not in digHashMap:
finalDigz.append(digL)
digHashMap[hh] = True
bigz = 0
for fDigz in finalDigz:
highestNum = getOneTo(fDigz)
if highestNum > bigz:
bigz = highestNum
print highestNum, fDigz
| [
"danmedani@gmail.com"
] | danmedani@gmail.com |
2b21f9bf32ec2ff92c015f407d8cc4df35ebc205 | 693431e2be60ac6f9d59996589c7023408537603 | /talk/metrics_publisher/publisher.py | 0855e20eee392393c3d46d0806aa5e8dfda83fa9 | [] | no_license | rapyuta-robotics/io_tutorials | deb547590a4519f19923dc9593399cae2e2d6683 | 88cf45629e4c02dff385048ece4b2b344a6100a3 | refs/heads/master | 2023-05-27T15:46:33.673684 | 2023-02-22T08:52:32 | 2023-02-22T08:52:32 | 118,696,902 | 7 | 24 | null | 2023-05-23T03:37:59 | 2018-01-24T02:00:39 | CMake | UTF-8 | Python | false | false | 1,629 | py | #!/usr/bin/env python
import random
import rospy
from std_msgs.msg import String
from ros_monitoring_msgs.msg import MetricList, MetricData, MetricDimension
def get_metric_list(cycle, count):
robot_dimensions = [
MetricDimension(name='cycle', value='cycle' + str(cycle)),
MetricDimension(name='random_tag', value=str(random.choice([0, 1]))),
]
return [
MetricData(
metric_name='robot.battery_charge',
unit=MetricData.UNIT_PERCENTAGE,
value=100 - (count * 10),
dimensions=robot_dimensions,
),
MetricData(
metric_name='robot.distance_traveled',
unit='meters',
value=random.uniform(count * 100.0, (count+1) * 100.0),
dimensions=robot_dimensions,
),
MetricData(
metric_name='edge.connected_robots',
unit=MetricData.UNIT_COUNT,
value=random.randint(1, 100),
),
]
def publish():
pub = rospy.Publisher('/io_metrics', MetricList, queue_size=10)
rospy.init_node('metric_publisher', anonymous=True)
rate = rospy.Rate(0.2)
cycle = 1
count = 1
while not rospy.is_shutdown():
pub.publish(MetricList(get_metric_list(cycle, count)))
rospy.loginfo('published metric list for cycle: %d, count: %d', cycle, count)
rate.sleep()
if count == 10:
cycle = 1 if cycle == 10 else cycle + 1
count = 1 # reset
else:
count += 1
if __name__ == '__main__':
try:
publish()
except rospy.ROSInterruptException:
pass
| [
"noreply@github.com"
] | noreply@github.com |
d9504d622907d7e7e0e6a6772e0fc6a072b448be | 276e6b57d182875c3c9276360dcfbd26ba542492 | /main.py | 6cb0596d6d39368e67e622a9d677e1de169562b2 | [
"MIT"
] | permissive | Charlie-kun/Loan | c9bf7ebff209dca6059bfe6141ad67420678b5be | 930835221d9eb28ba7f6a3d6e1b1b72ab280683c | refs/heads/master | 2022-12-09T07:24:40.351771 | 2020-09-21T21:55:06 | 2020-09-21T21:55:06 | 285,209,738 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,155 | py | import os
import sys
import logging
import argparse
import json
import settings
import utils
import data_manager
if __name__ == '__main__': # add parameters
parser = argparse.ArgumentParser()
parser.add_argument('--stock_code', nargs='+') # add stock_code
parser.add_argument('--ver', choices=['v1', 'v2'], default='v2') # chose policy?
parser.add_argument('--rl_method',
choices=['dqn', 'pg', 'ac', 'a2c', 'a3c'])
parser.add_argument('--net',
choices=['dnn', 'lstm', 'cnn'], default='dnn')
parser.add_argument('--num_steps', type=int, default=1) # somethings step
parser.add_argument('--lr', type=float, default=0.01)
parser.add_argument('--discount_factor', type=float, default=0.9)
parser.add_argument('--start_epsilon', type=float, default=0)
parser.add_argument('--balance', type=int, default=10000000)
parser.add_argument('--num_epoches', type=int, default=100)
parser.add_argument('--delayed_reward_threshold',
type=float, default=0.05)
parser.add_argument('--backend',
choices=['tensorflow', 'plaidml'], default='tensorflow')
parser.add_argument('--output_name', default=utils.get_time_str())
parser.add_argument('--value_network_name')
parser.add_argument('--policy_network_name')
parser.add_argument('--reuse_models', action='store_true')
parser.add_argument('--learning', action='store_true')
parser.add_argument('--start_date', default='20170101')
parser.add_argument('--end_date', default='20171231')
args = parser.parse_args()
# Keras Backend setting
if args.backend == 'tensorflow':
os.environ['KERAS_BACKEND'] = 'tensorflow'
elif args.backend == 'plaidml':
os.environ['KERAS_BACKEND'] = 'plaidml.keras.backend'
# output path setting
output_path = os.path.join(settings.BASE_DIR,
'output/{}_{}_{}'.format(args.output_name, args.rl_method, args.net))
if not os.path.isdir(output_path):
os.makedirs(output_path)
# Record of parameter.
with open(os.path.join(output_path, 'params.json'), 'w') as f:
f.write(json.dumps(vars(args)))
# log setting
file_handler = logging.FileHandler(filename=os.path.join(
output_path, "{}.log".format(args.output_name)), encoding='utf-8')
stream_handler = logging.StreamHandler(sys.stdout)
file_handler.setLevel(logging.DEBUG)
stream_handler.setLevel(logging.INFO)
logging.basicConfig(format="%(message)s",
handlers=[file_handler, stream_handler], level=logging.DEBUG)
# Log, Keras Backend setting first and RLTrader module import.
from agent import Agent
from learners import DQNLearner, PolicyGradientLearner, \
ActorCriticLearner, A2CLearner, A3CLearner
# ready for model path
value_network_path = ''
policy_network_path = ''
if args.value_network_name is not None: # when No value network name, connect network path
value_network_path = os.path.join(settings.BASE_DIR,
'models/{}.h5'.format(args.value_network_name))
else:
value_network_path = os.path.join(
output_path, '{}_{}_value_{}.h5'.format(
args.rl_method, args.net, args.output_name))
if args.policy_network_name is not None: # when No policy network name, connect network path
policy_network_path = os.path.join(settings.BASE_DIR,
'models/{}.h5'.format(args.policy_network_name))
else:
policy_network_path = os.path.join(
output_path, '{}_{}_policy_{}.h5'.format(
args.rl_method, args.net, args.output_name))
common_params = {}
list_stock_code = []
list_chart_data = []
list_training_data = []
list_min_trading_unit = []
list_max_trading_unit = []
for stock_code in args.stock_code:
# Chart data, ready for learn data.
chart_data, training_data = data_manager.load_data(
os.path.join(settings.BASE_DIR,
'data/{}/{}.csv'.format(args.ver, stock_code)),
args.start_date, args.end_date, ver=args.ver)
# Min /Max trading unit setting
min_trading_unit = max(int(100000 / chart_data.iloc[-1]['close']), 1)
max_trading_unit = max(int(1000000 / chart_data.iloc[-1]['close']), 1)
# common parameter setting.
common_params = {'rl_method': args.rl_method,
'delayed_reward_threshold': args.delayed_reward_threshold,
'net': args.net, 'num_steps': args.num_steps, 'lr': args.lr,
'output_path': output_path, 'reuse_models': args.reuse_models}
# Start for reinforce learning
learner = None
if args.rl_method != 'a3c':
common_params.update({'stock_code': stock_code,
'chart_data': chart_data,
'training_data': training_data,
'min_trading_unit': min_trading_unit,
'max_trading_unit': max_trading_unit})
if args.rl_method == 'dqn':
learner = DQNLearner(**{**common_params,
'value_network_path': value_network_path})
elif args.rl_method == 'pg':
learner = PolicyGradientLearner(**{**common_params,
'policy_network_path': policy_network_path})
elif args.rl_method == 'ac':
learner = ActorCriticLearner(**{**common_params,
'value_network_path': value_network_path,
'policy_network_path': policy_network_path})
elif args.rl_method == 'a2c':
learner = A2CLearner(**{**common_params,
'value_network_path': value_network_path,
'policy_network_path': policy_network_path})
if learner is not None:
learner.run(balance=args.balance,
num_epoches=args.num_epoches,
discount_factor=args.discount_factor,
start_epsilon=args.start_epsilon,
learning=args.learning)
learner.save_models()
else:
list_stock_code.append(stock_code)
list_chart_data.append(chart_data)
list_training_data.append(training_data)
list_min_trading_unit.append(min_trading_unit)
list_max_trading_unit.append(max_trading_unit)
if args.rl_method == 'a3c':
learner = A3CLearner(**{
**common_params,
'list_stock_code': list_stock_code,
'list_chart_data': list_chart_data,
'list_training_data': list_training_data,
'list_min_trading_unit': list_min_trading_unit,
'list_max_trading_unit': list_max_trading_unit,
'value_network_path': value_network_path,
'policy_network_path': policy_network_path})
learner.run(balance=args.balance, num_epoches=args.num_epoches,
discount_factor=args.discount_factor,
start_epsilon=args.start_epsilon,
learning=args.learning)
learner.save_models()
| [
"chlh@daum.net"
] | chlh@daum.net |
1b20703b930ae2d775880d83cd617d40c9cdfa18 | ea867a1db2b730964b471e5f198ac74988417fa5 | /steemtools/helpers.py | 5c4e3a5d73bff0aa5310093de2799d44d516835b | [
"MIT"
] | permissive | Denis007138/steemtools | 0b58fa4bb2608c0134752b0855a36464cff9073a | c7f7ad9f482ff1b56e1218ceffbf574c95cf0c1f | refs/heads/master | 2021-01-11T01:34:36.721177 | 2016-10-10T13:58:44 | 2016-10-10T13:58:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,494 | py | import datetime
import re
import time
import dateutil
from dateutil import parser
from funcy import contextmanager, decorator
from werkzeug.contrib.cache import SimpleCache
@contextmanager
def timeit():
t1 = time.time()
yield
print("Time Elapsed: %.2f" % (time.time() - t1))
@decorator
def simple_cache(func, cache_obj, timeout=3600):
if type(cache_obj) is not SimpleCache:
return func()
name = "%s_%s_%s" % (func._func.__name__, func._args, func._kwargs)
cache_value = cache_obj.get(name)
if cache_value:
return cache_value
else:
out = func()
cache_obj.set(name, out, timeout=timeout)
return out
def read_asset(asset_string):
re_asset = re.compile(r'(?P<number>\d*\.?\d+)\s?(?P<unit>[a-zA-Z]+)')
res = re_asset.match(asset_string)
return {'value': float(res.group('number')), 'symbol': res.group('unit')}
def parse_payout(payout):
return read_asset(payout)['value']
def time_diff(time1, time2):
time1 = parser.parse(time1 + "UTC").timestamp()
time2 = parser.parse(time2 + "UTC").timestamp()
return time2 - time1
def is_comment(item):
if item['permlink'][:3] == "re-":
return True
return False
def time_elapsed(time1):
created_at = parser.parse(time1 + "UTC").timestamp()
now_adjusted = time.time()
return now_adjusted - created_at
def parse_time(block_time):
return dateutil.parser.parse(block_time + "UTC").astimezone(datetime.timezone.utc)
| [
"_@furion.me"
] | _@furion.me |
a602ed4d95af34413839a7d25ad1df255e16af0c | e67ae29c22eca0e23a63f871c008c0de3b0cf1df | /Civ4 Reimagined/PublicMaps/not_too_Big_or_Small.py | fddb4814cf8770bde44e7db1b6693f74db53c0c5 | [
"CC-BY-3.0"
] | permissive | NilsBatram/Civ4-Reimagined | 16f9e24174118ee7662723230f101fb563d31b4b | a9bc57908321dd12db3417b89bd569de7b99dea1 | refs/heads/master | 2020-12-08T11:30:46.160054 | 2020-06-18T20:32:55 | 2020-06-18T20:32:55 | 66,165,954 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,732 | py |
## "not too Big or Small". A modified version of "big and small" to scale better with larger maps.
## by Karadoc. version 1.4
from CvPythonExtensions import *
import CvUtil
import CvMapGeneratorUtil
from CvMapGeneratorUtil import FractalWorld
from CvMapGeneratorUtil import TerrainGenerator
from CvMapGeneratorUtil import FeatureGenerator
def getDescription():
return "A modified version of Big and Small, designed to scale better for large maps."
def isAdvancedMap():
"This map should not show up in simple mode"
return 0
def getNumCustomMapOptions():
return 2
def getCustomMapOptionName(argsList):
[iOption] = argsList
option_names = {
0: "TXT_KEY_MAP_SCRIPT_CONTINENTS_SIZE",
1: "TXT_KEY_MAP_SCRIPT_ISLANDS_SIZE"
}
translated_text = unicode(CyTranslator().getText(option_names[iOption], ()))
return translated_text
def getNumCustomMapOptionValues(argsList):
[iOption] = argsList
option_values = {
0: 3,
1: 2
}
return option_values[iOption]
def getCustomMapOptionDescAt(argsList):
[iOption, iSelection] = argsList
selection_names = {
0: {
0: "TXT_KEY_MAP_SCRIPT_MASSIVE_CONTINENTS",
1: "TXT_KEY_MAP_SCRIPT_NORMAL_CONTINENTS",
2: "TXT_KEY_MAP_SCRIPT_SNAKY_CONTINENTS"
},
1: {
0: "TXT_KEY_MAP_SCRIPT_ISLANDS",
1: "TXT_KEY_MAP_SCRIPT_TINY_ISLANDS"
}
}
translated_text = unicode(CyTranslator().getText(selection_names[iOption][iSelection], ()))
return translated_text
def getCustomMapOptionDefault(argsList):
[iOption] = argsList
option_defaults = {
0: 1,
1: 0
}
return option_defaults[iOption]
def minStartingDistanceModifier():
return -12
def beforeGeneration():
#global xShiftRoll
gc = CyGlobalContext()
dice = gc.getGame().getMapRand()
# Binary shift roll (for horizontal shifting if Island Region Separate).
#xShiftRoll = dice.get(2, "Region Shift, Horizontal - Big and Small PYTHON")
#print xShiftRoll
class BnSMultilayeredFractal(CvMapGeneratorUtil.MultilayeredFractal):
def generatePlotsByRegion(self):
# Sirian's MultilayeredFractal class, controlling function.
# You -MUST- customize this function for each use of the class.
#global xShiftRoll
iContinentsGrain = 1 + self.map.getCustomMapOption(0)
iIslandsGrain = 4 + self.map.getCustomMapOption(1)
# Water variables need to differ if Overlap is set. Defining default here.
iWater = 74
iTargetSize = 30 + self.dice.get(min(36, self.iW/3), "zone target size (horiz)")
iHorizontalZones = max(1, (self.iW+iTargetSize/2) / iTargetSize)
iTargetSize = 30 + self.dice.get(min(34, self.iH/2), "zone target size (vert)")
iVerticalZones = max(1, (self.iH+iTargetSize/2) / iTargetSize)
# if iHorizontalZones == 1 and iVerticalZones == 1:
# iHorizontalZones = 1 + self.dice.get(2, "Saving throw vs. Pangaea")
iTotalZones = iHorizontalZones * iVerticalZones
iContinentZones = (iTotalZones+1)/2 + self.dice.get(1+(iTotalZones-1)/2, "number of 'big' zones")
iIslandZones = iTotalZones - iContinentZones
# Add a few random patches of Tiny Islands first. (originaly 1 + r(4))
numTinies = iContinentZones + self.dice.get(2 + iTotalZones, "number of Tiny Islands")
print("Patches of Tiny Islands: ", numTinies)
if numTinies:
for tiny_loop in range(numTinies):
tinyWestLon = 0.01 * self.dice.get(85, "Tiny Longitude - Custom Continents PYTHON")
tinyWestX = int(self.iW * tinyWestLon)
tinySouthLat = 0.01 * self.dice.get(85, "Tiny Latitude - Custom Continents PYTHON")
tinySouthY = int(self.iH * tinyWestLon)
tinyWidth = int(self.iW * 0.15)
tinyHeight = int(self.iH * 0.15)
self.generatePlotsInRegion(80,
tinyWidth, tinyHeight,
tinyWestX, tinySouthY,
4, 3,
0, self.iTerrainFlags,
6, 5,
True, 3,
-1, False,
False
)
zone_types = [0] * iTotalZones
i = 0
while i < iContinentZones:
x = self.dice.get(iTotalZones - i, "zone placement")
j = 0
while j <= x:
if (zone_types[j] == 1):
x = x + 1
j += 1
zone_types[x] = 1
i += 1
iZoneWidth = int(self.iW / iHorizontalZones)
iZoneHeight = int(self.iH / iVerticalZones)
xExp = 6
iMaxOverLap = 5
for i in range(iTotalZones):
iWestX = max(0, (i % iHorizontalZones) * iZoneWidth - self.dice.get(iMaxOverLap, "zone overlap (west)"))
iEastX = min(self.iW - 1, (i % iHorizontalZones + 1) * iZoneWidth + self.dice.get(iMaxOverLap, "zone overlap (east)"))
iSouthY = max(0, max(3, (i / iHorizontalZones) * iZoneHeight) - self.dice.get(iMaxOverLap, "zone overlap (south)"))
iNorthY = min(self.iH - 1, min(self.iH - 4, (i / iHorizontalZones + 1) * iZoneHeight) + self.dice.get(iMaxOverLap, "zone overlap (north)"))
iWidth = iEastX - iWestX + 1
iHeight = iNorthY - iSouthY + 1
if (zone_types[i] == 1):
# continent zone
self.generatePlotsInRegion(iWater,
iWidth, iHeight,
iWestX, iSouthY,
iContinentsGrain, 4,
self.iRoundFlags, self.iTerrainFlags,
xExp, 6,
True, 15,
-1, False,
False
)
else:
# islands zone
self.generatePlotsInRegion(iWater,
iWidth, iHeight,
iWestX, iSouthY,
iIslandsGrain, 5,
self.iRoundFlags, self.iTerrainFlags,
xExp, 6,
True, 15,
-1, False,
False
)
# All regions have been processed. Plot Type generation completed.
return self.wholeworldPlotTypes
'''
Regional Variables Key:
iWaterPercent,
iRegionWidth, iRegionHeight,
iRegionWestX, iRegionSouthY,
iRegionGrain, iRegionHillsGrain,
iRegionPlotFlags, iRegionTerrainFlags,
iRegionFracXExp, iRegionFracYExp,
bShift, iStrip,
rift_grain, has_center_rift,
invert_heights
'''
def generatePlotTypes():
NiTextOut("Setting Plot Types (Python Custom Continents) ...")
fractal_world = BnSMultilayeredFractal()
plotTypes = fractal_world.generatePlotsByRegion()
return plotTypes
def generateTerrainTypes():
NiTextOut("Generating Terrain (Python Custom Continents) ...")
terraingen = TerrainGenerator()
terrainTypes = terraingen.generateTerrain()
return terrainTypes
def addFeatures():
NiTextOut("Adding Features (Python Custom Continents) ...")
featuregen = FeatureGenerator()
featuregen.addFeatures()
return 0
| [
"Nils.Batram@gmx.de"
] | Nils.Batram@gmx.de |
ffc5de43ef8bdec5bcaa803f057de2f9ed1be0f1 | 85be26cd8c2ee8afb3d7ce2495f320e81cb7582f | /pylearn2/minimun_sample/minimum_test.py | 920a1de30bcb8c568241463c2154706aff0468da | [] | no_license | basharbme/deeplearning4windows | 368f9faf5902d943bad053de62dcc6860d20ae79 | 20fea62a2f17ba142e68a349d1cffa582eb6312a | refs/heads/master | 2020-05-24T05:31:28.835187 | 2016-02-03T10:13:04 | 2016-02-03T10:13:04 | 187,117,816 | 1 | 0 | null | 2019-05-17T00:09:27 | 2019-05-17T00:09:26 | null | UTF-8 | Python | false | false | 1,482 | py | # coding: UTF-8
import os,codecs,platform
from pylearn2.config import yaml_parse
from pylearn2.scripts.train import train
import numpy as np
from pylearn2.utils import serial
os.environ["PYLEARN2_DATA_PATH"] = os.path.dirname(os.getcwd())
if platform.system() == "Windows":
os.environ['THEANO_FLAGS'] = "floatX=float32,device=cpu"
else:
os.environ['THEANO_FLAGS'] = "floatX=float32,device=gpu"
def ccc(name):
if name.lower() == 'windows-31j':
return codecs.lookup('utf-8')
codecs.register(ccc)
# prepare training data
# topo_view = np.zeros([5,28,28])
topo_view = np.random.randint(0,1,(3,28,28)) # [0, 1)の範囲で5 * 28 * 28の行列を作る
m, r, c = topo_view.shape
assert r == 28
assert c == 28
topo_view = topo_view.reshape(m, r, c, 1) # そうか、これがデザイン行列化ってことだ!!!
serial.save("input.pkl", topo_view)
serial.save("label.pkl", np.array([[0],[1],[2]]))
yaml = open("minimum.yaml", 'r').read()
hyper_params = {'train_stop': 5,
'valid_stop': 50050,
'test_stop': 5,
'batch_size': 3, # サンプル数の倍数である必要があるらしい?(なんかエラーになった)
'output_channels_h2': 4,
'output_channels_h3': 4,
'max_epochs': 5,
'save_path': 'result'
}
yaml = yaml % (hyper_params)
train = yaml_parse.load(yaml)
train.main_loop()
# train("minimum.yaml")
| [
"jgpuauno@gmail.com"
] | jgpuauno@gmail.com |
b8d49ed51407d17cd6270de28b88938cd694fa87 | 49f875de9e18812b25ead4cd15a562e9f1347256 | /selection_sort.py | 357b07b8ad678f0c637fe113b98915da400095ef | [] | no_license | brandontarney/ds_algo_review | 020499e7e81435b2475fd50acc717f3093fc0f7f | cabd42f01bd8f027db0d0e3de870263adbf9327a | refs/heads/master | 2020-06-23T16:30:50.564707 | 2019-09-09T01:02:17 | 2019-09-09T01:02:17 | 198,680,204 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 848 | py | #!/bin/python
#NOTE - this is pseudocode, not real python code
#Algorithm
#Create sorted sublist (usually in the front of the list)
#Find the next smallest value and swap it into front position of a sorted sublist
#NOTE this is effectively iterating (inner loop) over the unsorted list vs. selection sort iterating (inner loop) over the sorted list
#Performance
#AVG O(n log n)
#BEST O(n log n)
#WORST O(n^2) - reverse order
#Form a sorted list starting at the front by simply swapping in the smallest value each iteration
def selection_sort( list ):
for (i = 0; i < list.len; i++):
min_val_idx = i
for (j=i+1; j< list.len; j++):
if (list[j] < list[min_val_idx]):
min_val_idx = j
#We found something smaller
if (min_val_idx != i):
tmpvar = list[i]
list[i] = list[min_val_idx]
list[min_val_idx] = tmpvar
| [
"brandon.tarney@gmail.com"
] | brandon.tarney@gmail.com |
88f5d9a2605d4624cc87af7e584ecdf570ac00dc | 6eb302bf3456b5fe700a4e3281ca7bb4597477bf | /student_chatbot/app.py | 646b9b82f4b4c27cc03f005615fed2b793fd3fae | [] | no_license | lekhya19311/Student-Informative-Chat-Bot-System | f39a99d219bef3e534077c2cf2abcd7dd8d4eec1 | 7cd420bb5a21383bddf0f5bea01335d3e8bcbf9e | refs/heads/master | 2021-01-26T13:58:08.538530 | 2020-02-22T09:28:17 | 2020-02-22T09:28:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,486 | py | import os
from flask import send_file
from flask import Flask, session, render_template, request, redirect, url_for, flash, jsonify
from flask_bcrypt import Bcrypt
from flask_session import Session
from database import Base, Attendance, Marks,Accounts, Profile, Feedback
from sqlalchemy import create_engine, exc
from sqlalchemy.orm import scoped_session, sessionmaker
import requests
import re
import pandas as pd
import matplotlib.pyplot as plt
app = Flask(__name__)
bcrypt = Bcrypt(app)
app.secret_key = os.urandom(24)
# Configure session to use filesystem
app.config["SESSION_PERMANENT"] = False
app.config["SESSION_TYPE"] = "filesystem"
Session(app)
# Set up database
engine = create_engine('sqlite:///database.db',connect_args={'check_same_thread': False},echo=True)
Base.metadata.bind = engine
db = scoped_session(sessionmaker(bind=engine))
@app.route("/")
def index():
if 'user' not in session:
return render_template("intro.html")
else:
return redirect(url_for('dashboard'))
# MAIN
@app.route("/dashboard")
def dashboard():
if 'user' not in session:
return redirect(url_for('index'))
else:
return render_template("menu.html")
@app.route("/query", methods=["POST"])
def quer():
if request.method == 'POST':
ss=request.form.get("msg").lower()
profile=db.execute("select sid from student_profile").fetchall()
profile_result=list([profile[i][0] for i in range(len(profile))])
if session['usert']=="Student":
if "show my attendance" in ss:
return redirect(url_for('attendance'))
else:
flash("Wrong! Try Again")
return redirect(url_for('dashboard'))
else:
if "show graph" in ss:
return redirect(url_for('plot_graph'))
if (re.search('attendance', ss) and re.search('75', ss) and (re.search('less than', ss) or re.search('lessthan', ss))) or (re.search('attendance', ss) and re.search('75', ss) and re.search('<', ss)) or re.search('attendance shortage', ss):
result=db.execute("SELECT * FROM attendance WHERE attend < 75 ORDER BY sid").fetchall()
return render_template("quer.html", results=result)
elif (re.search('attendance', ss) and re.search('65', ss) and (re.search('less than', ss) or re.search('lessthan', ss))) or (re.search('attendance', ss) and re.search('65', ss) and re.search('<', ss)) or re.search('detain', ss):
result=db.execute("SELECT * FROM attendance WHERE attend < 65 ORDER BY sid").fetchall()
return render_template("quer.html", results=result)
elif (ss.split()[-1].upper() in profile_result) and re.search('profile', ss):
result=db.execute("SELECT * from student_profile where sid = :s",{"s":ss.split()[-1].upper()})
return render_template("profile.html", results=result)
elif (ss.split()[-1].upper() in profile_result) and re.search('attendance', ss):
result=db.execute("SELECT * from attendance where sid = :s",{"s":ss.split()[-1].upper()})
return render_template("profile.html", results=result)
else:
flash("Wrong! Try Again")
return redirect(url_for('dashboard'))
@app.route("/profile")
def profile():
res=db.execute("SELECT * FROM student_profile WHERE sid = :u", {"u": session['user']}).fetchall()
return render_template("profile.html",results=res)
@app.route("/attendance")
def attendance():
result=db.execute("SELECT * FROM attendance WHERE sid = :u", {"u": session['user']}).fetchall()
return render_template("attendance.html",results=result)
@app.route("/marks")
def marks():
return render_template("marks.html")
@app.route("/attendance_display")
def attendance_update():
return render_template("attendance_form.html")
@app.route("/suggestions", methods=["GET", "POST"])
def Suggestions():
msg1=msg2=""
try:
if request.method == "POST":
sid = request.form.get("sid")
name = request.form.get("name")
subject = request.form.get("subject")
message = request.form.get("message")
result = db.execute("INSERT INTO feedback (name,subject,message,user_id) VALUES (:n,:s,:m,:u)", {"n":name,"s":subject ,"m": message,"u":session['user']})
db.commit()
msg1= "Submitted!"
msg2 = "Thank You for your Feedback"
except exc.IntegrityError:
message = "Roll Number already exists."
db.execute("ROLLBACK")
db.commit()
return render_template("feedback.html",msg1=msg1,msg2=msg2)
# To display all the complaints to the admin
@app.route("/adminfeedbacks")
def adminfeedbacks():
result=db.execute("SELECT * FROM feedback").fetchall()
return render_template('feedback.html',result=result)
@app.route("/graphs")
def plot_graph():
result=db.execute("SELECT sid,attend FROM attendance WHERE attend < 75 ORDER BY sid").fetchall()
x=["sart","ygf"]
y=[]
for i,j in result:
y.append(j)
plt.plot(x,y)
d="sath"
plt.title(d)
plt.xlabel(d, fontsize=18)
plt.ylabel(d, fontsize=16)
plt.savefig('static/graph.png')
return render_template('graphs.html',result=result)
@app.route('/download')
def download_file():
s=db.execute("select * from student_profile").fetchall()
df = pd.DataFrame(list(s))
writer = pd.ExcelWriter('outputt.xlsx')
df.to_excel(writer,sheet_name="lkjhgf")
x=writer.save()
return send_file('outputt.xlsx', as_attachment=True,mimetype='.xlsx')
# REGISTER
@app.route("/register", methods=["GET", "POST"])
def register():
if 'user' in session:
return redirect(url_for('dashboard'))
message = ""
if request.method == "POST":
try:
usern = request.form.get("username")
name = request.form.get("name").upper()
usert = request.form.get("usertyp")
passw = request.form.get("password")
passw_hash = bcrypt.generate_password_hash(passw).decode('utf-8')
result = db.execute("INSERT INTO accounts (id,name,user_type,password) VALUES (:u,:n,:t,:p)", {"u": usern,"n":name,"t":usert ,"p": passw_hash})
db.commit()
if result.rowcount > 0:
session['user'] = usern
session['namet'] = name
session['usert'] = usert
flash("Your successfully Registrated")
return redirect(url_for('dashboard'))
except exc.IntegrityError:
message = "Roll Number already exists."
db.execute("ROLLBACK")
db.commit()
return render_template("registration.html", message=message)
# Change Pasword
@app.route("/change-password", methods=["GET", "POST"])
def changepass():
if 'user' not in session:
return redirect(url_for('login'))
msg=""
if request.method == "POST":
try:
epswd = request.form.get("epassword")
cpswd = request.form.get("cpassword")
passw_hash = bcrypt.generate_password_hash(cpswd).decode('utf-8')
exist=db.execute("SELECT password FROM accounts WHERE id = :u", {"u": session['user']}).fetchone()
if bcrypt.check_password_hash(exist['password'], epswd) is True:
res=db.execute("UPDATE accounts SET password = :u WHERE id = :v",{"u":passw_hash,"v":session['user']})
db.commit()
if res.rowcount > 0:
return redirect(url_for('dashboard'))
except exc.IntegrityError:
msg = "Unable to process try again"
msg="Existing Not matching"
return render_template("change_password.html",m=msg)
# Reset
@app.route("/reset", methods=["GET", "POST"])
def reset():
msg=""
if session['usert']=="admin":
if request.method == "POST":
rollno = request.form.get("rollno")
passw_hash = bcrypt.generate_password_hash("srit").decode('utf-8')
res=db.execute("UPDATE accounts SET password = :u WHERE id = :v",{"u":passw_hash,"v":rollno})
db.commit()
if res is not None:
return redirect(url_for('dashboard'))
msg=""
return render_template("pswdreset.html",m=msg)
else:
return redirect(url_for('dashboard'))
# LOGOUT
@app.route("/logout")
def logout():
session.pop('user', None)
return redirect(url_for('dashboard'))
# LOGIN
@app.route("/login", methods=["GET", "POST"])
def login():
if 'user' in session:
return redirect(url_for('dashboard'))
message = ""
if request.method == "POST":
usern = request.form.get("username").upper()
passw = request.form.get("password").encode('utf-8')
result = db.execute("SELECT * FROM accounts WHERE id = :u", {"u": usern}).fetchone()
if result is not None:
print(result['password'])
if bcrypt.check_password_hash(result['password'], passw) is True:
session['user'] = usern
session['namet'] = result.name
session['usert'] = result.user_type
flash("Hii "+result.name)
return redirect(url_for('dashboard'))
message = "Username or password is incorrect."
return render_template("login.html", message=message)
# Main
if __name__ == '__main__':
app.secret_key = 'super_secret_key'
app.debug = True
app.run(host='0.0.0.0', port=5000) | [
"satheeshgajula22@gmail.com"
] | satheeshgajula22@gmail.com |
9994c856f9f6988cd9021ad10b5aef5d6047c41c | 09e0940849ba15a2179b4418057aac28aac81bac | /app/doyin.py | 97041f90e5e2688edc51ac2ae0f95cabb6dc9784 | [
"MIT"
] | permissive | tqdonkey/app-bot | 75886957114840877adf473e99cf05dcf424f2f5 | df01784b3f2875facfe1e79d4614a906dc8e7a7b | refs/heads/master | 2022-02-28T22:46:52.418077 | 2019-08-20T12:28:59 | 2019-08-20T12:28:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,215 | py | # -*- coding: utf-8 -*-
'''
create by: 小宝
mail: 1435682155@qq.com
create date: 2019.8.11
Purpose: hehe
Desc:just do it~
'''
import sys
import time
import os
import shutil
sys.path.append('./app')
from common.app import app
from common import config
from common import screenshot
from common import comm
# 导入支持平台的action
# from action import dy_like
class doyin(app):
def __init__(self):
super().__init__()
screenshot.check_screenshot()
self.config = config.open_accordant_config('doyin')
self.delay = float(self.config['delay']['value'])
self.retry = int(self.config['retry']['value'])
pass
def run(self):
self.run_cmd()
def run_cmd(self):
try:
key = input(
"\n=========***欢迎使用doyin-bot***=========\n"+
"请输入序号选择要操作的功能:\n"+
"> 1:寻找美女并点赞\n"+
"> 2:取消点赞\n"+
"> 0: 退出程序\n"+
"=======================================\n"
"请输入[1/2/0]:"
)
key = int(key)
if key == 1:
self.search_dest()
elif key == 2:
self.cancel_like()
elif key == 0:
exit('谢谢使用')
except KeyboardInterrupt:
exit('谢谢使用')
def search_dest(self):
from action import do_like
while True:
self.action_schedule('do_like', do_like)
def cancel_like(self):
from action import do_cancel
while True:
self.action_schedule('do_cancel', do_cancel)
def action_schedule(self, action_name, action_file):
actions = action_file.actions
flg = True
while True:
for action in actions:
if action['type'] == 'open':
if not self._open_app(action['main_activity'], self.delay):
flg = False
break
elif action['type'] == 'click':
if not self._click_operate(action['current'], action['x'], action['y'], self.delay, action['expect'], self.retry):
flg = False
break
elif action['type'] == 'custom':
self.handle_custom_operate(action_name, action)
elif action['type'] == 'swipe':
self._swipe_page(action['x1'], action['y1'], action['x2'], action['y2'])
elif action['type'] == 'back':
self.back_expect_page(action['current'], action['expect'], self.delay, self.retry)
else:
exit('未知异常')
if flg:
break
def handle_custom_operate(self, action_name, action):
if action_name == 'do_like':
return self._handle_screenshot(action)
# 滑屏翻页
def _swipe_page(self, x1, y1, x2, y2):
self.swipe_operate(x1, y1, x2, y2, self.delay)
# 截屏及相关操作
def _handle_screenshot(self, action):
# 1.截屏优化图片
time.sleep(1)
self.screen_to_img()
comm.resize_image('./tmp/screen.png', './tmp/optimized.png', 1024*1024)
# 2.调用接口
res = comm.face_detectface()
if res == False:
return False
# 3.判断处理
is_dest = self._is_dest(res['face_list'], (0, 10), (80, 100), (0, 100))
# 4.保存图片
if is_dest != False:
print('是个美人儿~点赞走一波')
# 点赞
x = self.config['like_star']['x']
y = self.config['like_star']['y']
self._click_operate(action['current'], x, y, self.delay, '', self.retry)
self._img_save(is_dest['beauty'])
return True
return False
# 满足条件的图片保存
def _img_save(self, beauty):
# 1.把图存下来
path = time.strftime('%Y-%m-%d', time.localtime(time.time()))
file_path = './tmp/screenshot/' + path + "/"
if not os.path.exists(file_path):
os.mkdir(file_path)
rq = time.strftime('%Y%m%d%H%M%S-{}'.format(beauty), time.localtime(time.time()))
screen_name = file_path + rq + '.png'
shutil.copy('./tmp/screen.png', screen_name)
# 判断是否满足设定的条件
def _is_dest(self, face_list, gender = (0, 100), beauty = (0, 100), age = (0, 100)):
'''
default:
gender: (0, 100)
beauty:(0, 100)
age:(0, 100)
'''
for face in face_list:
if face['gender'] not in range(gender[0], gender[1] + 1): continue
if face['beauty'] not in range(beauty[0], beauty[1] + 1): continue
if face['beauty'] not in range(age[0], age[1] + 1): continue
print("颜值:{}".format(face['beauty']))
return {'beauty': face['beauty']}
return False | [
"root@lbp.localdomain"
] | root@lbp.localdomain |
fa26cbfd0a0af998227fd24745c6f1b50a85ae34 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03046/s367901013.py | bd60026b909a76c85e533b517ac364ab9dac011a | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,127 | py | from sys import stdout
printn = lambda x: stdout.write(str(x))
inn = lambda : int(input())
inl = lambda: list(map(int, input().split()))
inm = lambda: map(int, input().split())
ins = lambda : input().strip()
DBG = True # and False
BIG = 999999999
R = 10**9 + 7
def ddprint(x):
if DBG:
print(x)
m,k = inm()
if m==0 and k==0:
print('0 0')
exit()
if m==0 and k>0:
print('-1')
exit()
if m==1 and k==0:
print('0 0 1 1')
exit()
if m==1 and k>0:
print('-1')
exit()
if k>=2**m:
print('-1')
exit()
if k==0:
printn('0 0')
for i in range(1,2**m):
printn(' {} {}'.format(i,i))
print('')
exit()
u = [False]*(2**m)
u[k] = True
a = []
cnt = 0
for i in range(1,2**m):
j = i^k
if not u[i] and not u[j]:
a.append(i)
u[j] = True
cnt += 1
if cnt==2**(m-1)-1:
break
s = [x for x in a]
t = [x for x in a]
t.reverse()
s.extend([0,k,0])
s.extend(t)
v = [x^k for x in a]
t = [x for x in v]
t.reverse()
s.extend(v)
s.append(k)
s.extend(t)
printn(s[0])
for i in range(1,len(s)):
printn(' ' + str(s[i]))
print("")
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
e924e94bc28ebc9f1f2c03016db85c413511282e | 965ec7b89c996c51579561e944be93f054f94301 | /test1.py | dc6a774ac0e491436d86533342ea435beb02201e | [
"MIT"
] | permissive | nayunhwan/SMaSH_Python | e26b20def4d6ca3ed042087a218150db6bac9d9a | 6e80520f43f6e014be2abc40d6f51f76338e3ff8 | refs/heads/master | 2018-12-27T11:51:39.860739 | 2018-10-24T06:55:32 | 2018-10-24T06:55:32 | 34,496,868 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 269 | py | #-*- coding: utf-8 -*-
# 예제 ex_A.py
unit_price = input("사과 1개의 가격은 얼마입니까? ")
apple_count = input("사과의 개수는 모두 몇 개 입니까? ")
price = apple_count * unit_price
print "전체 사과의 가격은 ", price, "원 입니다." | [
"kbk9288@gmail.com"
] | kbk9288@gmail.com |
b1c5a6fe4a11aa713099d0337893a6259fa2e086 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02973/s301790930.py | 280647a2fd8669a6345ecf3a1ac6c75ef906c3dc | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 269 | py | from sys import stdin
from bisect import bisect
N = int(stdin.readline().rstrip())
A = []
for i in range(N):
A.append(int(input()))
dp = []
for a in A[::-1]:
i = bisect(dp, a)
if i < len(dp):
dp[i] = a
else:
dp.append(a)
print(len(dp)) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
be1ca56a4c8e33d679fe761dc4faa412b354bfa3 | 61e68e3a4d6cc841da4350dc193315822ca4e354 | /lecture/4_정렬/4_퀵정렬.py | 45420f20a5eaaae9aafb31ff3bea12843c0068c4 | [] | no_license | sswwd95/Algorithm | 34360cd333019d6ded60f967c19aa70f1655e12a | a70bdf02580a39b9a5c282a04b0b2f8c2cb41636 | refs/heads/master | 2023-04-16T21:05:07.293929 | 2021-05-08T10:58:05 | 2021-05-08T10:58:05 | 362,651,885 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,091 | py | array = [5, 7, 9, 0, 3, 1, 6, 2, 4, 8]
def quick_sort(array, start, end):
if start >= end: # 원소가 1개인 경우 종료
return
pivot = start # 피벗은 첫 번째 원소
left = start + 1
right = end
while(left <= right):
# 피벗보다 큰 데이터를 찾을 때까지 반복
while(left <= end and array[left] <= array[pivot]):
left += 1
# 피벗보다 작은 데이터를 찾을 때까지 반복
while(right > start and array[right] >= array[pivot]):
right -= 1
if(left > right): # 엇갈렸다면 작은 데이터와 피벗을 교체
array[right], array[pivot] = array[pivot], array[right]
else: # 엇갈리지 않았다면 작은 데이터와 큰 데이터를 교체
array[left], array[right] = array[right], array[left]
# 분할 이후 왼쪽 부분과 오른쪽 부분에서 각각 정렬 수행
quick_sort(array, start, right - 1)
quick_sort(array, right + 1, end)
quick_sort(array, 0, len(array) - 1)
print(array)
# [0,1,2,3,4,5,6,7,8,9] | [
"sswwd95@gmail.com"
] | sswwd95@gmail.com |
a2de64aec718958e8d3b7c4e7137f9309a3fd152 | b79042eb362a9ba284f0c518854a3b7e6ee39284 | /learning_users/basic_app/forms.py | 7015436716565e7010f8c6798e71463f0e946b09 | [] | no_license | Austin911/django-deployment-example | 2dc2e7d8b777928b7fd6d631978581fff54aa9d6 | eded8b839224b7202902f61a35f058693242ac55 | refs/heads/master | 2020-03-11T16:24:32.888430 | 2018-04-19T01:08:00 | 2018-04-19T01:08:00 | 130,115,703 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 442 | py | from django import forms
from django.contrib.auth.models import User
from basic_app.models import UserProfileInfo
class UserForm(forms.ModelForm):
password = forms.CharField(widget= forms.PasswordInput())
class Meta():
model= User
fields = ('username','email','password')
class UserProfileInfoForm(forms.ModelForm):
class Meta():
model= UserProfileInfo
fields = ('portfolio_site','profile_pic')
| [
"austin88yang@gmail.com"
] | austin88yang@gmail.com |
e3082e2c45c280050b7ffcd31885765d334863b4 | 9424df118a26170f023a665bdf8b0dc462f91721 | /project_utils.py | a22dd72aa5ba5acfa30a6a98965efbe8875a6bfb | [] | no_license | marcelthebridge/602_ml_Project1 | 65729b4faf8746554815f8e63089a1db33fec603 | 8f4afa9a6b36920e6869673df96d9c219014c2d0 | refs/heads/main | 2022-12-28T15:50:38.187531 | 2020-10-13T20:37:25 | 2020-10-13T20:37:25 | 302,507,432 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,973 | py | # Functions for use with Project_1 notebooks
# Imports:
from matplotlib.colors import ListedColormap
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
def data_plot(hue, data):
for i, col in enumerate(data.columns):
plt.figure(i)
sns.set(rc={'figure.figsize':(20, 5)})
ax = sns.countplot(x=data[col],palette='mako',hue=hue,data=data)
def print_results(classifier, X_test, y_test):
from sklearn.metrics import accuracy_score, classification_report, confusion_matrix
print('Results of {} Model: \n'.format(classifier))
print('Accuracy of model {0:.4f}\n'.format(accuracy_score(y_test,classifier.predict(X_test))))
print('Classification Report:\n{}\n'.format(classification_report(y_test,classifier.predict(X_test))))
print('Confusion Matrix:\n{}\n'.format(confusion_matrix(y_test,classifier.predict(X_test))))
def visual_model(title, X, y, classifier, resolution=0.05):
# setup marker generator and color map
markers = ('x', 'o')
colors = ('black','cyan')
cmap = ListedColormap(colors[:len(np.unique(y))])
plt.figure(figsize=(15,10))
#plot surface
x1_min, x1_max = X[:, 0].min() - 1, X[:, 0].max() + 1
x2_min, x2_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx1, xx2 = np.meshgrid(np.arange(x1_min, x1_max, resolution),
np.arange(x2_min, x2_max, resolution))
Z = classifier.predict(np.array([xx1.ravel(), xx2.ravel()]).T)
Z = Z.reshape(xx1.shape)
plt.contourf(xx1, xx2, Z, alpha=0.4, cmap=cmap)
plt.xlim(xx1.min(), xx1.max())
plt.ylim(xx2.min(), xx2.max())
# plot class examples
for idx, cl in enumerate(np.unique(y)):
plt.scatter(x=X[y == cl, 0],
y=X[y == cl, 1],
alpha=0.8,
c=colors[idx],
marker=markers[idx],
label=cl,
edgecolor='black')
| [
"noreply@github.com"
] | noreply@github.com |
c465145274aa05d388e08fcefbcf65c084507859 | 8b9587f5548733ebf8c51af489abfae815355fd0 | /templateCode/kakao_R/kakao/chatroom_analysis.py | 032302f2db92fdc28d57fd2e172e486c310b3189 | [] | no_license | jyp0802/Butherfly | d81b525a01781dc7633eca68d577b0dd9cb1f57d | d80a1eea4103e0f2e4f1aaf91f7f425936003081 | refs/heads/master | 2021-08-20T07:50:55.580648 | 2017-11-28T15:44:49 | 2017-11-28T15:44:49 | 112,354,531 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 18,109 | py | #-*- coding: utf-8 -*-
# Revision: 6 (June 3th, 2017 3:10)
# Author: Claude Jin, Sanggyu Nam, Seunghyun Han (DiscoveryChannel)
# Logs
# Revision 1: user anonymization, dateline, firstline, message types(emoticon, photo, video, link)
# Revision 2: chatroomname, # of current participants, invitationline, multiple lines into one message
# Revision 3: Add support for chat log files exported in English and refactor the reader logic using classes.
# Revision 4: Replace calling open/close functions of file object to using a context block. This reduces the memory consumption so that it becomes possible to process large data.
# Revision 5: Add argument parsing and fix UTF-8 BOM issue.
# Revision 6: Add --username argument. It makes a specific user to be distinguished even after anonymization.
# references for regular expressionf
# http://regexr.com/
# http://devanix.tistory.com/296
from abc import ABCMeta, abstractmethod
from datetime import datetime, date, time, timedelta
from os import path
import io
import itertools
import re, sys, os
import glob
import csv
import json
class BaseChatLogReader(metaclass=ABCMeta):
"""Base reader for KakaoTalk chat log files."""
@property
@abstractmethod
def chatroomnameGroup(self):
raise NotImplementedError
@property
@abstractmethod
def chatroomnameIndividual(self):
raise NotImplementedError
@property
@abstractmethod
def dynamicsline(self):
raise NotImplementedError
@property
@abstractmethod
def dateline(self):
raise NotImplementedError
@property
@abstractmethod
def firstline(self):
raise NotImplementedError
def fileobj(self, f):
if not isinstance(f, (io.TextIOBase, str)):
raise TypeError('f should be a text I/O stream or a file name')
return (f if isinstance(f, io.TextIOBase)
else open(f, 'r', encoding='utf-8-sig'))
def readChatroomLog(self, filename, username):
usercounter = 0
dynamics = []
dates = []
messages = []
participants = dict()
msg = None
if username is not 'Empty':
usercounter += 1
participants[username] = "user" + str(usercounter)
with self.fileobj(filename) as f:
first_line = next(f)
m = self.chatroomnameGroup.match(first_line)
if m:
chatroomName = m.group("name")
participantCnt = m.group("current")
else:
m = self.chatroomnameIndividual.match(first_line)
chatroomName = m.group("name")
participantCnt = 2
lines = itertools.islice(f, 4, None)
for line in lines:
# Skip blank lines.
if len(line) <= 1:
continue
m = self.dynamicsline.match(line)
if m:
dynamics.append(m.groupdict())
continue
m = self.dateline.match(line)
if m:
msg = None
dates.append(m.groupdict())
continue
m = self.firstline.match(line)
if m:
if msg is not None:
messages.append(msg)
msg = self.firstline.match(line).groupdict()
# Anonymize users.
if msg["participant"] in participants.keys():
msg["participant"] = participants[msg["participant"]]
else:
usercounter += 1
participants[msg["participant"]] = "user" + str(usercounter)
msg["participant"] = "user" + str(usercounter)
continue
# Encountered a multi-line message.
if msg is None:
print("Multi-line Error")
print(line)
print(dynamics)
exit(1)
msg["message"] += " " + line
return [dates, messages, participants, participantCnt, chatroomName, dynamics]
class KoreanChatLogReader(BaseChatLogReader):
"""Reader for KakaoTalk chat log files exported in Korean."""
@property
def chatroomnameGroup(self):
return re.compile("^(?P<name>.*) \((?P<current>[0-9]*)명\)과 카카오톡 대화-1.txt$")
@property
def chatroomnameIndividual(self):
return re.compile("^(?P<name>.*)님과 카카오톡 대화-1.txt$")
@property
def dynamicsline(self):
return re.compile("^(?P<year>[0-9]{4})\. (?P<month>[0-9]{1,2})\. (?P<date>[0-9]{1,2})\. "
"(?P<meridiem>오전|오후) (?P<hour>[0-9]{1,2}):(?P<minute>[0-9]{1,2}): "
"(.*?)님이 (?:((?:(?:.*?)님(?:, )?(?:과 )?)+)을 초대했습니다|나갔습니다).$")
@property
def dateline(self):
return re.compile("^(?P<year>[0-9]{4})년 (?P<month>[0-9]{1,2})월 (?P<date>[0-9]{1,2})일 (?P<day>.)요일$")
@property
def firstline(self):
return re.compile("^(?P<year>[0-9]{4})\. (?P<month>[0-9]{1,2})\. (?P<date>[0-9]{1,2})\. (?P<meridiem>오전|오후) (?P<hour>[0-9]{1,2}):(?P<minute>[0-9]{1,2}), (?P<participant>.*?) : (?P<message>.*)")
class EnglishChatLogReader(BaseChatLogReader):
"""Reader for KakaoTalk chat log files exported in English."""
@property
def chatroomnameGroup(self):
return re.compile("^KakaoTalk Chats with (?P<name>.*) \((?P<current>[0-9]*) people\)-1.txt$")
@property
def chatroomnameIndividual(self):
return re.compile("^KakaoTalk Chats with (?P<name>.*)-1.txt$")
@property
def dynamicsline(self):
return re.compile("^(?P<month>Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec) "
"(?P<date>[0-9]{1,2}), (?P<year>[0-9]{4}), "
"(?P<hour>[0-9]{1,2}):(?P<minute>[0-9]{1,2}) "
"(?P<meridiem>AM|PM): "
"(.*?) (?:invited ((?:(?:.*?)(?:, )?(?: and )?)+)|left this chatroom).$"
)
@property
def dateline(self):
return re.compile("^(?P<day>.{3}).*day, "
"(?P<month>January|February|March|April|May|June|July|August|September|October|November|December) "
"(?P<date>\d{1,2}), (?P<year>\d{4})$")
@property
def firstline(self):
return re.compile("^(?P<month>Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec) "
"(?P<date>[0-9]{1,2}), (?P<year>[0-9]{4}), "
"(?P<hour>[0-9]{1,2}):(?P<minute>[0-9]{1,2}) "
"(?P<meridiem>AM|PM), "
"(?P<participant>.*?) : (?P<message>.*)")
class Analyzer:
"""Analyzer for KakaoTalk chat log"""
# message types
emoticon = re.compile("^\((?:emoticon|이모티콘)\) $")
photo = re.compile("^(사진|Photo)$")
video = re.compile("^(동영상|Video)$")
link = re.compile("^https?:\/\/.*")
maxInterval = 24
hour2Sec = 3600
day2Hour = 24
def __init__(self, lang, chatroomLogs, chatroomID):
self.chatroomLogs = chatroomLogs
self.lang = lang
self.dates = self.chatroomLogs[0]
self.messages = self.chatroomLogs[1]
self.participants = self.chatroomLogs[2]
self.participantCnt = self.chatroomLogs[3]
self.chatroomName = self.chatroomLogs[4]
self.dynamics = self.chatroomLogs[5]
self.users = dict()
self.chatroom = dict()
self.chatroom["chatroomID"] = chatroomID
self.chatroom["old"] = self.getOld(self.dates)
self.chatroom["pop"] = int(self.participantCnt)
self.chatroom["activePop"] = 0
self.chatroom["M"] = 0.0
self.chatroom["F"] = 0.0
self.chatroom["avgCharLen"] = 0
self.chatroom["dynamics"] = len(self.dynamics)
self.chatroom["avgInterval"] = self.getIntervalTime()
self.chatroom["avgReactTime"] = 0.0
if len(self.participants) == 2:
self.maxInterval = 24 # hour
else:
self.maxInterval = 8 # hour
for key, user in zip(self.participants.keys(), self.participants.values()):
self.users[user] = dict()
self.users[user]["chatroomID"] = chatroomID
self.users[user]["userID"] = user
self.users[user]["avgSeqMsg"] = []
self.users[user]["maxSeqMsg"] = 0
self.users[user]["reactionTime"] = []
self.users[user]["avgCharLen"] = []
self.users[user]["msgShare"] = 0.0
self.users[user]["msg"] = 0
self.users[user]["normal"] = 0
self.users[user]["photo"] = 0
self.users[user]["video"] = 0
self.users[user]["emoticon"] = 0
self.users[user]["link"] = 0
self.users[user]["activeness"] = 0
self.chatroom["M"] = round(self.chatroom["M"] / len(self.participants.keys()), 4)
self.chatroom["F"] = round(self.chatroom["F"] / len(self.participants.keys()), 4)
# Return all metrics
def getMetrics(self):
days = 54 # You can set the number of days that you want to measure.
self.getSequentialMsgs()
self.getReactionTimes()
self.getCharLen()
self.getActiveParticipants(days, days)
self.cntMsgType(days)
return [self.chatroom, self.users]
# Count the number of active participants for a specific period
def getActiveParticipants(self, period, n): # period : days, # n : times of activity
if self.lang == "kr":
date = "2017-05-24 00:00" # You must set the base date
FMT = '%Y-%m-%d %H:%M'
else :
date = "2017-May-24 00:00"
FMT = '%Y-%b-%d %H:%M'
for msg in reversed(self.messages):
interval = datetime.strptime(date, FMT) - datetime.strptime(self.convertTime(msg), FMT)
if timedelta.total_seconds(interval) > period * self.hour2Sec * self.maxInterval:
break
self.users[msg["participant"]]["activeness"] += 1
for user in self.users:
value = self.users[user]["activeness"]
if value >= n:
self.users[user]["activeness"] = "A"
self.chatroom["activePop"] += 1
else :
self.users[user]["activeness"] = "I"
# Count the number of messages for a specific period
def cntMsgType(self, period):
cntMsg = 0
if self.lang == "kr":
date = "2017-05-24 00:00" # You must set the base date
FMT = '%Y-%m-%d %H:%M'
else :
date = "2017-May-24 00:00"
FMT = '%Y-%b-%d %H:%M'
for msg in reversed(self.messages):
interval = datetime.strptime(date, FMT) - datetime.strptime(self.convertTime(msg), FMT)
if timedelta.total_seconds(interval) > period * self.hour2Sec * self.day2Hour:
break
cntMsg += 1
self.users[msg["participant"]]["msg"] += 1
if self.photo.match(msg["message"]):
self.users[msg["participant"]]["photo"] += 1
elif self.video.match(msg["message"]):
self.users[msg["participant"]]["video"] += 1
elif self.link.match(msg["message"]):
self.users[msg["participant"]]["link"] += 1
elif self.emoticon.match(msg["message"]):
self.users[msg["participant"]]["emoticon"] += 1
else:
self.users[msg["participant"]]["normal"] += 1
for user in self.users:
self.users[user]["msgShare"] = round(self.users[user]["msg"] / cntMsg, 4)
# Get interval from all pairs of users
def getIntervalTime(self):
intervals = []
for prev_msg, msg in zip(self.messages, self.messages[1:]):
interval = self.calculateInterval(prev_msg, msg)
if timedelta.total_seconds(interval) < self.hour2Sec * self.maxInterval:
intervals.append(interval)
if len(intervals) > 1:
avg_interval = timedelta.total_seconds(sum(intervals, timedelta()) / len(intervals))
else:
avg_interval = -1.0
return avg_interval
# Get some information about consecutive message from a user
def getSequentialMsgs(self):
cnt = 0
user = ""
for msg in self.messages:
if cnt is 0 :
cnt += 1
user = msg["participant"]
elif user == msg["participant"]:
cnt += 1
else :
self.users[user]["avgSeqMsg"].append(cnt)
cnt = 1
user = msg["participant"]
for user in self.users:
value = self.users[user]["avgSeqMsg"]
if len(value) > 0:
self.users[user]["avgSeqMsg"] = round(sum(value)/len(value), 4)
self.users[user]["maxSeqMsg"] = max(value)
else:
self.users[user]["avgSeqMsg"] = 0.0
self.users[user]["maxSeqMsg"] = 0
# Get some reaction information of a user from a latest message.
def getReactionTimes(self):
avgReactTime = 0
for prev_msg, msg in zip(self.messages, self.messages[1:]):
if prev_msg["participant"] == msg["participant"]:
continue
else:
interval = self.calculateInterval(prev_msg, msg)
# Skip the interval > 1 day
if timedelta.total_seconds(interval) < self.hour2Sec * self.maxInterval:
self.users[msg["participant"]]["reactionTime"].append(interval)
for user in self.users:
value = self.users[user]["reactionTime"]
if len(value) > 0:
self.users[user]["reactionTime"] = timedelta.total_seconds(sum(value, timedelta()) / len(value))
else:
self.users[user]["reactionTime"] = -1
avgReactTime += int(self.users[user]["reactionTime"])
self.chatroom["avgReactTime"] = round(avgReactTime / float(len(self.users)), 4)
# dynamics : Join / Exit
def getDynamics(self):
return len(self.dynamics)
# Count characters from a specific user
def getCharLen(self):
avgCharLen = 0
for msg in self.messages:
self.users[msg["participant"]]["avgCharLen"].append(len(msg["message"]))
for user in self.users:
avgCharLen += sum(self.users[user]["avgCharLen"])
if len(self.users[user]["avgCharLen"]) > 0:
self.users[user]["avgCharLen"] = round(sum(self.users[user]["avgCharLen"]) / float(len(self.users[user]["avgCharLen"])), 4)
else:
self.users[user]["avgCharLen"] = 0.0
self.chatroom["avgCharLen"] = round(avgCharLen / float(len(self.messages)), 4)
def calculateInterval(self, prev_msg, msg):
prev_time = self.convertTime(prev_msg)
time = self.convertTime(msg)
if self.lang == "kr":
FMT = '%Y-%m-%d %H:%M'
else:
FMT = '%Y-%b-%d %H:%M'
return datetime.strptime(time, FMT) - datetime.strptime(prev_time, FMT)
def convertTime(self, msg):
hour = int(msg['hour'])
if (msg['meridiem'] == "오후" or msg['meridiem'] == "PM") and hour is not 12:
hour = (hour+12)%24
elif (msg['meridiem'] == "오전" or msg['meridiem'] == "AM") and hour is 12:
hour = 0
return '{}-{}-{} {}:{}'.format(msg['year'], msg['month'], msg['date'], hour, msg['minute'])
# Estimate age of specific room
def getOld(self, datelist):
firstDate = datelist[0]
endDate = datelist[-1]
firstDate = '{}-{}-{}'.format(firstDate['year'], firstDate['month'], firstDate['date'])
endDate = '{}-{}-{}'.format(endDate['year'], endDate['month'], endDate['date'])
if self.lang == "kr":
FMT = '%Y-%m-%d'
else:
FMT = '%Y-%B-%d'
return timedelta.total_seconds(datetime.strptime(endDate, FMT) - datetime.strptime(firstDate, FMT))
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('chatlog', help='put your directory path here')
parser.add_argument('-C', '--client-lang',
choices=['kr', 'en'],
default='kr',
help='KakaoTalk client language')
parser.add_argument('-U', '--username',
default='Empty',
help='set specific KakaoTalk user to \'user1\'')
args = parser.parse_args()
ReaderClass = {
'kr': KoreanChatLogReader,
'en': EnglishChatLogReader
}
prefix = ""
cnt = 0
exportCsvDict = dict()
chatroomList = []
userList = []
f_chatroom = open(args.chatlog + "/chatroom.json", "w", encoding='utf-8')
f_user = open(args.chatlog + "/user.json", "w", encoding='utf-8')
for file in glob.glob(args.chatlog+"/*.txt"):
cnt += 1
print (file[len(args.chatlog)+1:])
if file[len(args.chatlog)+1:len(args.chatlog)+10] == "KakaoTalk":
args.client_lang = 'en'
else:
args.client_lang = 'kr'
reader = ReaderClass[args.client_lang]()
chatroomLogs = reader.readChatroomLog(file, args.username)
exportCsvDict.update(chatroomLogs[2]) # update participants
analyzer = Analyzer(args.client_lang, chatroomLogs, prefix + str(cnt))
chatroom, users = analyzer.getMetrics()
chatroomList.append(chatroom)
for user in users:
userList.append(users[user])
json.dump(chatroomList, f_chatroom)
json.dump(userList, f_user)
f_chatroom.close()
f_user.close() | [
"jyp0802@hotmail.com"
] | jyp0802@hotmail.com |
dd203b86fd8abbb163bbe54b4e921223fe92e53f | 1ed65a23ea5d9a135096bc55ea9df9b96625d909 | /core/migrations/0030_userprofile_is_active.py | 905bbf27e4e80966ed1aa6b9747b3c4b8caca345 | [] | no_license | nfishel48/simntx | 367b8323e6b4f433912eb687888a456e0959c228 | 0dc7f6c41adff1c21a52aca6e2712e7fcb3e9a48 | refs/heads/master | 2022-12-31T02:22:28.344922 | 2020-10-16T02:21:23 | 2020-10-16T02:21:23 | 271,566,126 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 390 | py | # Generated by Django 2.2 on 2020-06-25 03:36
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0029_auto_20200624_2234'),
]
operations = [
migrations.AddField(
model_name='userprofile',
name='is_active',
field=models.BooleanField(default=False),
),
]
| [
"nfishel@emich.edu"
] | nfishel@emich.edu |
b1c1eca6d9cd2f7761661a9abe7a38a71c3ffc06 | baec3aca9482e90605ac4e4ecee52b3d6eb44f1f | /21/d21.py | 3cc7219aec0f5c0137007760f688852551e5a018 | [] | no_license | grvn/aoc2017 | 9390d89dbcda7a10352ad65dae71eeec51c930ea | 48d380d8ff7000d38fdba9fb93bcfa99c1f0c447 | refs/heads/master | 2021-10-08T06:06:35.298716 | 2018-12-08T18:52:36 | 2018-12-08T18:52:36 | 112,791,442 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,623 | py | #!/usr/bin/env python3
from sys import argv
filename=argv[1]
iterations=int(argv[2])
rules={}
start=((".", "#", "."), (".", ".", "#"), ("#", "#", "#"))
def rotate(sqgr):
return tuple(tuple(x) for x in zip(*sqgr[::-1]))
with open(filename) as f:
input = f.readlines()
input = [line.strip() for line in input]
for line in input:
fro,to=line.split(' => ')
fro=tuple(map(tuple, fro.split('/')))
to=tuple(map(tuple, to.split('/')))
fro0=fro
while True:
rules[fro]=to
fro=rotate(fro)
if fro==fro0:
break
fro=tuple(reversed(fro))
fro0=fro
while True:
rules[fro]=to
fro=rotate(fro)
if fro==fro0:
break
for i in range(iterations):
size=len(start)
if size%2==0:
rwlen=2
else:
rwlen=3
tmp=(size//rwlen)*(rwlen+1)
tmplist=[[0 for _ in range(tmp)] for _ in range(tmp)]
for j in range(0,size,rwlen):
for k in range(0,size,rwlen):
if rwlen==2:
keytup=((start[j][k],start[j][k+1]),(start[j+1][k],start[j+1][k+1]))
else:
keytup=((start[j][k],start[j][k+1],start[j][k+2]),(start[j+1][k],start[j+1][k+1],start[j+1][k+2]),(start[j+2][k],start[j+2][k+1],start[j+2][k+2]))
newpart=rules[keytup]
offsetx=(j//rwlen)*(rwlen+1)
offsety=(k//rwlen)*(rwlen+1)
for l in range(rwlen+1):
for m in range(rwlen+1):
tmplist[offsetx+l][offsety+m]=newpart[l][m]
start=tuple(tuple(z) for z in tmplist)
print(sum(line.count('#') for line in start))
| [
"rickard"
] | rickard |
327203d439300f410de4e56199b07bcb7a5b1cb1 | 3ca67d69abd4e74b7145b340cdda65532f90053b | /programmers/난이도별/level01.제일_작은_수_제거하기/Jaewon0702.py | 9574b875696e370e939054a0279eb98293b8defd | [] | no_license | DKU-STUDY/Algorithm | 19549516984b52a1c5cd73e1ed1e58f774d6d30e | 6f78efdbefd8eedab24e43d74c7dae7f95c2893b | refs/heads/master | 2023-02-18T06:48:39.309641 | 2023-02-09T07:16:14 | 2023-02-09T07:16:14 | 258,455,710 | 175 | 49 | null | 2023-02-09T07:16:16 | 2020-04-24T08:42:27 | Python | UTF-8 | Python | false | false | 156 | py | def solution(arr):
arr.remove(min(arr))
return arr if len(arr) else [-1]
print(solution([4, 3, 2, 1]) == [4, 3, 2])
print(solution([10]) == [-1])
| [
"45033215+sangmandu@users.noreply.github.com"
] | 45033215+sangmandu@users.noreply.github.com |
b4ebea591ef98eba50becc2628f71215e816a37f | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_84/306.py | 0561a547b612e83a36f4cf677430a4ecdf3d37f6 | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,233 | py | import sys, math
from multiprocessing import Pool
def main(data):
R,C,s = data
for i in range(R):
for j in range(C):
try:
if s[i][j] == "#":
if s[i][j+1] == "#" and s[i+1][j] == "#" and s[i+1][j+1] == "#":
s[i][j] = "/"
s[i][j+1] = "\\"
s[i+1][j] = "\\"
s[i+1][j+1] = "/"
else:
return "Impossible"
except:
return "Impossible"
return "\n".join(["".join(l) for l in s])
if __name__ == "__main__":
mode = 0
if len(sys.argv) > 1:
f = open(sys.argv[1])
mode = 1
else:
f = open("test.txt")
T = int(f.readline())
data = []
for i in range(T):
R,C = map(int, f.readline().strip().split())
s = list()
for j in range(R):
s.append(list(f.readline().strip()))
data.append((R, C, s))
if mode == 1:
pool = Pool()
r = pool.map(main, data)
else:
r = map(main, data)
for i in range(T):
print "Case #%d: \n%s" % (i+1, r[i]) | [
"miliar1732@gmail.com"
] | miliar1732@gmail.com |
1577628297a846c2742329c8bab3cffaef031e77 | b298e8a971bf51036c61d1a2c4d5d61421fc47c5 | /projects/migrations/0003_project_image.py | 050ce465f6f7210e6eaed3e7571b1d25dc47b5ea | [] | no_license | jrusso0818/my-personal-site | 5fe6dc1111669d5c8429703a304f7c08f6358327 | dc2a179e6affb38303445d7a0c72e48c32ba6a8a | refs/heads/master | 2023-03-07T22:03:58.395368 | 2021-02-06T06:53:32 | 2021-02-06T06:53:32 | 322,690,526 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 437 | py | # Generated by Django 2.2.17 on 2020-12-18 21:36
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('projects', '0002_remove_project_image'),
]
operations = [
migrations.AddField(
model_name='project',
name='image',
field=models.FilePathField(default=0, path='/img'),
preserve_default=False,
),
]
| [
"jrusso0818@gmail.com"
] | jrusso0818@gmail.com |
7ea07cb2116811e27e177f7323f15767d451495b | 0045204c130599381ee69c771478ac1609dfe67e | /HW_1/problem_3.py | f841cdde32c6b0fa7f53ae9d6f96339525b533b4 | [] | no_license | muzhts-anton/Differential-geometry | 1dea9003a3450cbe89e5beb220804cff67a5a230 | 56da14cf1b71cd4fca2a9fa46de2ef2dedd39ac1 | refs/heads/main | 2023-06-05T21:53:58.032013 | 2021-06-25T08:34:02 | 2021-06-25T08:34:02 | 360,800,931 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,826 | py | # problem 3 diffGem HW1
from sympy import symbols, diff, sin, cos, sqrt, simplify, Matrix
X_1, X_2, X_3 = symbols('X_1 X_2 X_3', positive=True)
A = Matrix([[1, 0, 0],
[0, sqrt(3)/2, -1/2],
[0, 1/2, sqrt(3)/2]])
X = [X_1,
X_2,
X_3]
x_sphere = [X_1 * sin(X_2) * cos(X_3),
X_1 * sin(X_2) * sin(X_3),
X_1 * cos(X_2)]
Q_mixed = Matrix([[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0]])
Jacobian = Matrix([[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0]])
for i in range(3):
for k in range(3):
Jacobian[i, k] = diff(x_sphere[i], X[k])
# (1)
for i in range(3):
for k in range(3):
for j in range(3):
Q_mixed[i, k] += A[i, j] * Jacobian[j, k]
# The latex output is too ugly. TODO(Tony): rewrite it
# (2)
r_covar_1 = [Q_mixed[0, 0], Q_mixed[1, 0], Q_mixed[2, 0]]
r_covar_2 = [Q_mixed[0, 1], Q_mixed[1, 1], Q_mixed[2, 1]]
r_covar_3 = [Q_mixed[0, 2], Q_mixed[1, 2], Q_mixed[2, 2]]
r_covar = [r_covar_1, r_covar_2, r_covar_3]
# (3)
g_covar = Matrix([[1, 0, 0],
[0, X_1**2, 0],
[0, 0, X_1**2 * (sin(X_2))**2]])
g_contra = g_covar**(-1)
# (4)
Q_contra = Matrix([[0, 0, 0],
[0, 0, 0],
[0, 0, 0]])
W = Matrix([[0, 0, 0],
[0, 0, 0],
[0, 0, 0]])
for i in range(3):
for m in range(3):
for j in range(3):
Q_contra[i, m] += g_contra[i, j] * Jacobian[m, j]
for i in range(3):
for j in range(3):
for m in range(3):
W[i, j] += A[j, m] * Q_contra[i, m]
# (5)
Christoffel_mixed = [Matrix([[0, 0, 0], [0, 0, 0], [0, 0, 0]]),
Matrix([[0, 0, 0], [0, 0, 0], [0, 0, 0]]),
Matrix([[0, 0, 0], [0, 0, 0], [0, 0, 0]])]
for i in range(3):
for j in range(3):
for m in range(3):
for k in range(3):
Christoffel_mixed[m][i, j] += 1/2 * g_contra[k, m] * (diff(g_covar[k, j], X[i]) + diff(g_covar[i, k], X[j]) - diff(g_covar[i, j], X[k]))
Christoffel_covar = [Matrix([[0, 0, 0], [0, 0, 0], [0, 0, 0]]),
Matrix([[0, 0, 0], [0, 0, 0], [0, 0, 0]]),
Matrix([[0, 0, 0], [0, 0, 0], [0, 0, 0]])]
for k in range(3):
for i in range(3):
for j in range(3):
for m in range(3):
if (i == 0) & (j == 0):
print(Christoffel_mixed[m][i, j] * g_covar[m, k])
Christoffel_covar[i][j, k] += Christoffel_mixed[m][i, j] * g_covar[m, k]
print(Christoffel_covar)
# (6)
H = [0, 0, 0]
for i in range(3):
H[i] = sqrt(g_covar[i, i])
simplify(H[i])
| [
"muzhts.anton@gmail.com"
] | muzhts.anton@gmail.com |
c4315cc3d79adaa753717029196b2f1e64a56817 | 6bad224bb4c81facc0ed44d2330922d1826e23fb | /spamfilter.py | 97357ab1a60999ea37c2aaa6a1babdd0a9a6e511 | [] | no_license | zaid98/nlp | af7c83a692ec6e8cba1936a0288efa38d3a5ce26 | 4fbbcf9e35c9b882685abc1557a5e1c8cf78d565 | refs/heads/master | 2020-06-15T03:25:38.232934 | 2019-07-04T07:36:25 | 2019-07-04T07:36:25 | 195,192,157 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,134 | py | import nltk
from nltk.corpus import stopwords
import string
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.pipeline import Pipeline
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.naive_bayes import MultinomialNB
messages = pd.read_csv('spam.csv', encoding='latin-1')
messages.drop(['Unnamed: 2','Unnamed: 3','Unnamed: 4'],axis=1,inplace=True)
messages = messages.rename(columns={'v1': 'class','v2': 'text'})
def process_text(text):
np = [char for char in text if char not in string.punctuation]
np = ''.join(np)
cleaned = [word for word in np.split() if word.lower() not in stopwords.words('english')]
return cleaned
mail_train, mail_test, class_train, class_test = train_test_split(messages['text'],messages['class'],test_size=0.2)
pipeline = Pipeline([
('count',CountVectorizer(analyzer=process_text)),
('tfidf',TfidfTransformer()),
('classifier',MultinomialNB())
])
pipeline.fit(mail_train,class_train)
predictions = pipeline.predict(mail_test)
| [
"noreply@github.com"
] | noreply@github.com |
63c03bea228aaf2bf3633ee0bc769131005dcc58 | 36a1a414847c29f406416db5c3e54916c647c8c1 | /LinkedList/mergeSortedll.py | b91d2e709627080389085640261b76232b8eb207 | [] | no_license | advaitp/Data-Structures-and-Algorithms | 07c2dd18bb9892dfd4e3ea1e6ab60c6a50bebdf5 | 83567a1dbd92677eb60711865ab08a7b996f3128 | refs/heads/main | 2023-05-15T09:42:16.254654 | 2021-05-28T10:47:43 | 2021-05-28T10:47:43 | 371,667,378 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,300 | py | class Node:
def __init__(self, data):
self.data = data
self.next = None
def merge(head1,head2):
fh=None
ft=None
if head1.data<=head2.data :
fh=head1
ft=head1
head1=head1.next
else:
fh=head2
ft=head2
head2=head2.next
while head1 is not None and head2 is not None:
if head1.data<=head2.data:
ft.next=head1
ft=ft.next
head1=head1.next
elif head2.data<head1.data:
ft.next=head2
ft=ft.next
head2=head2.next
if head1 is not None:
ft.next=head1
if head2 is not None:
ft.next=head2
return fh
def ll(arr):
if len(arr)==0:
return None
head = Node(arr[0])
last = head
for data in arr[1:]:
last.next = Node(data)
last = last.next
return head
def printll(head):
while head:
print(head.data, end=' ')
head = head.next
print()
# Main
# Read the link list elements including -1
arr1=list(int(i) for i in input().strip().split(' '))
arr2=list(int(i) for i in input().strip().split(' '))
# Create a Linked list after removing -1 from list
l1 = ll(arr1[:-1])
l2 = ll(arr2[:-1])
l = merge(l1, l2)
printll(l)
| [
"advaitpatole@gmail.com"
] | advaitpatole@gmail.com |
cbf9caf78e847840f44e9cd9c6f7768baa91eee8 | de6f8466270d80d72fd696ebb8894d8ef02bca1e | /planner/search.py | ba2538bf9c59f687281c18739fd136a78549eba5 | [
"LicenseRef-scancode-warranty-disclaimer"
] | no_license | cocoflan/roundtrip | eac96231d1cc484e07317ef3068a661ee4dd474a | f2a740f7e6bb13acab979c6a7c09244c76466197 | refs/heads/master | 2022-04-30T13:59:21.736591 | 2021-07-01T21:13:26 | 2021-07-01T21:13:26 | 147,789,413 | 0 | 0 | null | 2022-04-22T20:57:11 | 2018-09-07T07:45:44 | JavaScript | UTF-8 | Python | false | false | 5,576 | py | import time
import moment
from splinter import Browser
from planner.models import Flight, FlightPrice, NoFlights
from planner.models import AirBNB
from urllib.parse import quote_plus
from math import ceil
import json
class Searcher:
_browser = None
def browser(self):
if self._browser is None:
self._browser = Browser("chrome")
return self._browser
def quit(self):
if self._browser is not None:
self._browser.quit()
def airbnb(self, air, city, adults):
def airbnburl(l):
loc = quote_plus(l)
return "https://api.airbnb.com/v2/search_results?" \
"client_id=3092nxybyb0otqw18e8nh5nty&locale=en-US¤cy=EUR&_format=for_search_results&_limit=20&" \
"_offset=0&fetch_facets=true&guests=" + str(
adults) + "&ib=false&ib_add_photo_flow=true&location=" + loc + "&min_bathrooms=0&" \
"min_bedrooms=" + str(
ceil(int(adults) / 2)) + "&min_beds=" + str(adults) + "&min_num_pic_urls=10&" \
"price_max=210&price_min=30&sort=1&user_lat=52.370216&user_lng=4.895168"
airbnbdata = dict()
air, created = AirBNB.objects.get_or_create(
city=city,
airports=air,
adults=adults
)
if created:
browser = self.browser()
browser.visit(airbnburl(city))
air.data = browser.find_by_tag("pre").first.text
air.save()
return json.loads(air.data)
def months(self, origin, destination, date, adults):
if len(NoFlights.objects.filter(origin=origin, destination=destination)) > 0:
return []
entries = FlightPrice.objects.filter(
origin=origin,
destination=destination,
date__year=moment.date(date).year,
date__month=moment.date(date).month,
adults=adults)
if len(entries) == 0:
browser = self.browser()
browser.visit(
'https://www.google.nl/flights/#search;f=' + origin + ';t=' + destination
+ ';d=' + date + ';tt=o;ti=t0800-2000;px=' + adults+";s=0")
el = browser.find_by_css('.OMOBOQD-G-q')
el.first.click()
time.sleep(3)
table = browser.find_by_css('.OMOBOQD-p-j').first
trs = [tr for tr in table.find_by_css('tr')][1:6]
count = 0
for tr in trs:
for td in tr.find_by_css('td'):
sp = td.text.split("\n")
if len(sp) == 2:
day = sp[0]
price = sp[1]
price = int(price.strip('€ ').replace('.', ''))
fdate = moment.date(date).replace(days=int(day)).strftime("%Y-%m-%d")
fp = FlightPrice(origin=origin, destination=destination, date=fdate, adults=adults, price=price)
fp.save()
count += 1
fdate = moment.date(date).replace(days=1).add(months=1)
table = browser.find_by_css('.OMOBOQD-p-o').first
trs = [tr for tr in table.find_by_css('tr')][1:6]
for tr in trs:
for td in tr.find_by_css('td'):
sp = td.text.split("\n")
if len(sp) == 2:
day = sp[0]
price = sp[1]
price = int(price.strip('€ ').replace('.', ''))
fdate = moment.date(fdate).replace(days=int(day)).strftime("%Y-%m-%d")
fp = FlightPrice(origin=origin, destination=destination, date=fdate, adults=adults, price=price)
fp.save()
count += 1
if count == 0:
NoFlights(origin=origin, destination=destination).save()
entries = FlightPrice.objects.filter(origin=origin, destination=destination, date=date, adults=adults)
return entries
#
# def flight(self, origin, destination, date, adults):
# entries = Flight.objects.filter(origin=origin, destination=destination, date=date, adults=adults)
# flight = entries.first()
# if len(entries) == 0:
# browser = self.browser()
# browser.visit(
# 'https://www.google.nl/flights/#search;f=' + origin + ';t=' + destination
# + ';d=' + date + ';tt=o;ti=t0800-2000;px=' + adults)
#
# result = browser.find_by_css(".gwt-HTML a.EESPNGB-d-W.EESPNGB-d-s").first
# if result:
# url = result['href']
# data = result.text.split("\n")
# price = int(data[0].strip('€ '))
# time = data[2]
# company = data[3]
# duration = data[4]
# info = data[5]
# flight = Flight(
# origin=origin,
# destination=destination,
# date=date, adults=adults,
# price=price,
# time=time,
# company=company,
# duration=duration,
# info=info,
# url=url
# )
# flight.save()
#
# else:
# return None
# return flight
#
| [
"nanne@mycel.nl"
] | nanne@mycel.nl |
d61b6b6aa07912fb0f5b6d10f2b0e4d67c896405 | d79f3e7df0fb9dcf23a9ae1adf3c285dd08a360f | /list.py | c25a37e7d50ab435e4c4d9ee894dae68382d6c4a | [] | no_license | ramkodgreat/python1 | 4930afb1bb7f63798bd4237e753aecf5b2ba072b | 64ee9f210aab3b177fc41d351499106876a1d3fc | refs/heads/master | 2020-11-25T18:15:04.315157 | 2019-12-18T07:51:42 | 2019-12-18T07:51:42 | 228,789,259 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 789 | py | # List can contain strings integer or float points
# Python index begins from zero
a =["string","int",1,2]
#indexing a list
a[2]
print(a[2])
# Returns all the values of a
b = a[:]
print(b)
#Overwriting values in a string
a[1] = "glo"
print(a)
print(b)
# Skipping one item from a list
a =[1,2,3,4,5,6,7,8,9,10]
# Side note first position is always inclusive while the last position is always exclusive
val = a[2:4]
print(val)
# Print subset of zero to four but skip three values while printing
val = a[0:4:3]
print(val)
# Print subset of zero to nine but skip three values while printing
#It skips three intermittently
# n:n-1
val = a[0:10:3]
print(val)
# Negative value prints backward
val = a[-2]
print(val)
# Findout how to use range of negative numbers
#val = a[-1:-3]
print(val) | [
"ramkodgreat@gmail.com"
] | ramkodgreat@gmail.com |
bd9a420a7684d527bcd274c32086f85330ec970b | 2704ad14c83050ac28f403371daa8e3148440e00 | /chiadoge/wallet/did_wallet/did_info.py | 2294be358c05f883b729c58c3c37a27b0b590ce5 | [
"Apache-2.0"
] | permissive | Bgihe/chiadoge-blockchain | d5e01a53c8e15fa17c47b44d9c95e6511aa98b7f | befb179c65ffe42aebbc47c211f78e193a095d2b | refs/heads/main | 2023-06-01T05:31:51.503755 | 2021-07-05T20:47:32 | 2021-07-05T20:47:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 919 | py | from dataclasses import dataclass
from typing import List, Optional, Tuple
from chiadoge.types.blockchain_format.sized_bytes import bytes32
from chiadoge.util.ints import uint64
from chiadoge.util.streamable import streamable, Streamable
from chiadoge.wallet.cc_wallet.ccparent import CCParent
from chiadoge.types.blockchain_format.program import Program
from chiadoge.types.blockchain_format.coin import Coin
@dataclass(frozen=True)
@streamable
class DIDInfo(Streamable):
origin_coin: Optional[Coin] # puzzlehash of this coin is our DID
backup_ids: List[bytes]
num_of_backup_ids_needed: uint64
parent_info: List[Tuple[bytes32, Optional[CCParent]]] # {coin.name(): CCParent}
current_inner: Optional[Program] # represents a Program as bytes
temp_coin: Optional[Coin] # partially recovered wallet uses these to hold info
temp_puzhash: Optional[bytes32]
temp_pubkey: Optional[bytes]
| [
"83430349+lionethan@users.noreply.github.com"
] | 83430349+lionethan@users.noreply.github.com |
093c9c5f1b37d499d6bb6486317cbdcbb89a838e | 17b63416cf2f66246e1cf655ccfa2eb9a108da3c | /abupy/AlphaBu/ABuPickStockExecute.py | f344c2ed857ae0f8c94dc194d151f49cddb60f57 | [] | no_license | cmy00cmy/qtLearning | 58aec5cf9fccf9d8f14adf1793306b8b8b5ecb7f | 2b5fee7b9bbd832b20ba4e1b508be16b606249e0 | refs/heads/master | 2020-03-20T01:42:19.882639 | 2018-06-12T14:52:00 | 2018-06-12T14:52:00 | 137,085,926 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,835 | py | # -*- encoding:utf-8 -*-
"""
包装选股worker进行,完善前后工作
"""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from .ABuPickStockWorker import AbuPickStockWorker
from ..CoreBu.ABuEnvProcess import add_process_env_sig
from ..MarketBu.ABuMarket import split_k_market
from ..TradeBu.ABuKLManager import AbuKLManager
from ..CoreBu.ABuFixes import ThreadPoolExecutor
__author__ = '阿布'
__weixin__ = 'abu_quant'
@add_process_env_sig
def do_pick_stock_work(choice_symbols, benchmark, capital, stock_pickers):
"""
包装AbuPickStockWorker进行选股
:param choice_symbols: 初始备选交易对象序列
:param benchmark: 交易基准对象,AbuBenchmark实例对象
:param capital: 资金类AbuCapital实例化对象
:param stock_pickers: 选股因子序列
:return:
"""
kl_pd_manager = AbuKLManager(benchmark, capital)
stock_pick = AbuPickStockWorker(capital, benchmark, kl_pd_manager, choice_symbols=choice_symbols,
stock_pickers=stock_pickers)
stock_pick.fit()
return stock_pick.choice_symbols
@add_process_env_sig
def do_pick_stock_thread_work(choice_symbols, benchmark, capital, stock_pickers, n_thread):
"""包装AbuPickStockWorker启动线程进行选股"""
result = []
def when_thread_done(r):
result.extend(r.result())
with ThreadPoolExecutor(max_workers=n_thread) as pool:
thread_symbols = split_k_market(n_thread, market_symbols=choice_symbols)
for symbols in thread_symbols:
future_result = pool.submit(do_pick_stock_work, symbols, benchmark, capital, stock_pickers)
future_result.add_done_callback(when_thread_done)
return result
| [
"chenmyuan@163.com"
] | chenmyuan@163.com |
c08a05fcca3a38d83fa5e5c0f599e925d0a2c97b | 56a4d0d73c349aeaca7580ca248caf0cf893a8c5 | /w2/using_find.py | af6a320679d645b836416da8a37d141b0a0c269d | [] | no_license | alejo8591/m101 | 79e62e0110bcc3e6ca82ac02ae3cdcbe13d51c67 | d93d34a161ecede77defb9a6a3db389d4a9b0de8 | refs/heads/master | 2020-05-18T21:42:46.651036 | 2012-12-17T23:36:49 | 2012-12-17T23:36:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 711 | py | #!/usr/bin/env python
import pymongo
import sys
connect = pymongo.Connection("mongodb://127.0.0.1", safe=True)
db = connect.school
scores = db.scores
def find():
print "Find, reporting for duty"
query = {'type':'exam'}
try:
iter = scores.find(query)
except:
print "Unexpected error:",sys.exc_info()[0]
sanity = 0
for doc in iter:
print doc
sanity+=1
if (sanity > 10):
break
def find_one():
print "find one, reporting for duty"
query = {'student_id':10}
try:
iter = scores.find_one(query)
except:
print "Unexpected error:",sys.exc_info()[0]
print iter
find_one()
find() | [
"alejo8591@gmail.com"
] | alejo8591@gmail.com |
42d1a243ee0f6e26eac7dbafad461f06f46e2a6c | ef51e831de7776d273b5384a5ad5b110782ed6f2 | /python script/Linux/update.py | d0917c560b2e2963023a5eb020b1940fd8ffac25 | [] | no_license | sudkumar/Summer-Project-2014 | c6f08d025a17cdb4ed5a9a383e03590368bcb36a | 27f805c5171afc44d61a6b2f877f4428967b5519 | refs/heads/master | 2021-01-10T20:39:29.224258 | 2014-08-02T08:46:19 | 2014-08-02T08:46:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,306 | py | import serial
import MySQLdb
conn = MySQLdb.connect(host="localhost", user="root", passwd="name", db="attendance")
myc = conn.cursor()
# for linux
src = serial.Serial('/dev/ttyACM0', 9600)
# we can find the path by
# but remove the arduino first
# ls /dev/tty*
# now plugIn arduino and run the command again
# if there is any change in the result then that is our port name
ids = []
# first of all take the instructure ID
instructure_id = 113
while 1:
id = src.readline()
print id
try:
int(id)
except ValueError:
x = 0
else:
id = int(id)
if id == instructure_id:
query = "UPDATE `esc101` SET class_conducted = class_conducted + 1"
myc.execute(query)
conn.commit()
print "UPdate"
while 1:
id = src.readline()
try:
int(id)
except ValueError:
x = 0
else:
if id in ids:
continue
ids.append(id)
print id
id = int(id)
query = ("SELECT `roll_no` FROM `student_id` WHERE id = %d ") % (id)
myc.execute(query)
id = myc.fetchone()
query = ("UPDATE `esc101` SET class_attended = class_attended + 1 WHERE id = %s ") % (id)
myc.execute(query)
conn.commit()
query = "UPDATE `esc101` SET percentage = (class_attended/class_conducted)*100 "
myc.execute(query)
conn.commit()
conn.close()
| [
"luckysud4@gmail.com"
] | luckysud4@gmail.com |
baf02d6d1f7af369aaccc549e18727ba85123b46 | 9737aa767b5cb2baa4e1ac6af64e3acb614e9265 | /smyt/smyt/urls.py | c5bff7d884d2a379fd6c17cdb564a90188291241 | [] | no_license | leotrubach/smyt | 1b11072a89067370f69c55465b84c5a7099d576f | 8398a60ebf5d6a2b490301276c7d6866def1d9a5 | refs/heads/master | 2016-09-05T16:45:10.698129 | 2012-06-19T23:13:56 | 2012-06-19T23:13:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 768 | py | from django.conf.urls import patterns, include, url
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
admin.autodiscover()
from dynmodels.views import HomeView, list_models, model_data
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'smyt.views.home', name='home'),
# url(r'^smyt/', include('smyt.foo.urls')),
# Uncomment the admin/doc line below to enable admin documentation:
# url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
url(r'^admin/', include(admin.site.urls)),
url(r'^$', HomeView.as_view(), name='home'),
url(r'^models/$', list_models, name='get_models'),
url(r'^modeldata/$', model_data, name='model_data'),
)
| [
"leotrubach@gmail.com"
] | leotrubach@gmail.com |
41da3a83b961f3970b11aac3c48a97022b4627c8 | 5f9c05b3bee55b0a311e7b0fba452ac13f60eefd | /py/coordinator.py | ae7cc1a7eebf83d2cbecbc980476ba3ad0f82a11 | [] | no_license | Ard1tti/serialdet | d0fb84704239e207009368b0341b6ab974fa7a29 | bc04065f1607ced571c141d59ff35084d144cb27 | refs/heads/master | 2021-01-10T23:13:02.670703 | 2016-10-12T09:03:45 | 2016-10-12T09:03:45 | 70,608,298 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 17,370 | py | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Coordinator to help multiple threads stop when requested."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import sys
import threading
import time
import six
from tensorflow.python.framework import errors
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import compat
class Coordinator(object):
"""A coordinator for threads.
This class implements a simple mechanism to coordinate the termination of a
set of threads.
#### Usage:
```python
# Create a coordinator.
coord = Coordinator()
# Start a number of threads, passing the coordinator to each of them.
...start thread 1...(coord, ...)
...start thread N...(coord, ...)
# Wait for all the threads to terminate.
coord.join(threads)
```
Any of the threads can call `coord.request_stop()` to ask for all the threads
to stop. To cooperate with the requests, each thread must check for
`coord.should_stop()` on a regular basis. `coord.should_stop()` returns
`True` as soon as `coord.request_stop()` has been called.
A typical thread running with a coordinator will do something like:
```python
while not coord.should_stop():
...do some work...
```
#### Exception handling:
A thread can report an exception to the coordinator as part of the
`should_stop()` call. The exception will be re-raised from the
`coord.join()` call.
Thread code:
```python
try:
while not coord.should_stop():
...do some work...
except Exception as e:
coord.request_stop(e)
```
Main code:
```python
try:
...
coord = Coordinator()
# Start a number of threads, passing the coordinator to each of them.
...start thread 1...(coord, ...)
...start thread N...(coord, ...)
# Wait for all the threads to terminate.
coord.join(threads)
except Exception as e:
...exception that was passed to coord.request_stop()
```
To simplify the thread implementation, the Coordinator provides a
context handler `stop_on_exception()` that automatically requests a stop if
an exception is raised. Using the context handler the thread code above
can be written as:
```python
with coord.stop_on_exception():
while not coord.should_stop():
...do some work...
```
#### Grace period for stopping:
After a thread has called `coord.request_stop()` the other threads have a
fixed time to stop, this is called the 'stop grace period' and defaults to 2
minutes. If any of the threads is still alive after the grace period expires
`coord.join()` raises a RuntimeException reporting the laggards.
```python
try:
...
coord = Coordinator()
# Start a number of threads, passing the coordinator to each of them.
...start thread 1...(coord, ...)
...start thread N...(coord, ...)
# Wait for all the threads to terminate, give them 10s grace period
coord.join(threads, stop_grace_period_secs=10)
except RuntimeException:
...one of the threads took more than 10s to stop after request_stop()
...was called.
except Exception:
...exception that was passed to coord.request_stop()
```
"""
def __init__(self, clean_stop_exception_types=None):
"""Create a new Coordinator.
Args:
clean_stop_exception_types: Optional tuple of Exception types that should
cause a clean stop of the coordinator. If an exception of one of these
types is reported to `request_stop(ex)` the coordinator will behave as
if `request_stop(None)` was called. Defaults to
`(tf.errors.OutOfRangeError,)` which is used by input queues to signal
the end of input. When feeding training data from a Python iterator it
is common to add `StopIteration` to this list.
"""
if clean_stop_exception_types is None:
clean_stop_exception_types = (errors.OutOfRangeError,)
self._clean_stop_exception_types = tuple(clean_stop_exception_types)
# Protects all attributes.
self._lock = threading.Lock()
# Event set when threads must stop.
self._stop_event = threading.Event()
# Python exc_info to report.
# If not None, it should hold the returned value of sys.exc_info(), which is
# a tuple containing exception (type, value, traceback).
self._exc_info_to_raise = None
# True if we have called join() already.
self._joined = False
# Set of threads registered for joining when join() is called. These
# threads will be joined in addition to the threads passed to the join()
# call. It's ok if threads are both registered and passed to the join()
# call.
self._registered_threads = set()
def _filter_exception(self, ex):
"""Check if the exception indicated in 'ex' should be ignored.
This method examines `ex` to check if it is an exception that should be
reported to the users. If yes, it returns `ex` as is, otherwise it returns
None.
The code returns None for exception types listed in
`_clean_stop_exception_types`.
Args:
ex: None, an `Exception`, or a Python `exc_info` tuple as returned by
`sys.exc_info()`.
Returns:
ex or None.
"""
if isinstance(ex, tuple):
ex2 = ex[1]
else:
ex2 = ex
if isinstance(ex2, self._clean_stop_exception_types):
# Ignore the exception.
ex = None
return ex
def request_stop(self, ex=None):
"""Request that the threads stop.
After this is called, calls to `should_stop()` will return `True`.
Note: If an exception is being passed in, in must be in the context of
handling the exception (i.e. `try: ... except Exception as ex: ...`) and not
a newly created one.
Args:
ex: Optional `Exception`, or Python `exc_info` tuple as returned by
`sys.exc_info()`. If this is the first call to `request_stop()` the
corresponding exception is recorded and re-raised from `join()`.
"""
with self._lock:
ex = self._filter_exception(ex)
# If we have already joined the coordinator the exception will not have a
# chance to be reported, so just raise it normally. This can happen if
# you continue to use a session have having stopped and joined the
# coordinator threads.
if self._joined:
if isinstance(ex, tuple):
six.reraise(*ex)
elif ex is not None:
# NOTE(touts): This is bogus if request_stop() is not called
# from the exception handler that raised ex.
six.reraise(*sys.exc_info())
if not self._stop_event.is_set():
if ex and self._exc_info_to_raise is None:
if isinstance(ex, tuple):
logging.info("Error reported to Coordinator: %s, %s",
type(ex[1]),
compat.as_str_any(ex[1]))
self._exc_info_to_raise = ex
else:
logging.info("Error reported to Coordinator: %s, %s",
type(ex),
compat.as_str_any(ex))
self._exc_info_to_raise = sys.exc_info()
# self._exc_info_to_raise should contain a tuple containing exception
# (type, value, traceback)
if (len(self._exc_info_to_raise) != 3 or
not self._exc_info_to_raise[0] or
not self._exc_info_to_raise[1]):
# Raise, catch and record the exception here so that error happens
# where expected.
try:
raise ValueError(
"ex must be a tuple or sys.exc_info must return the current "
"exception: %s"
% self._exc_info_to_raise)
except ValueError:
# Record this error so it kills the coordinator properly.
# NOTE(touts): As above, this is bogus if request_stop() is not
# called from the exception handler that raised ex.
self._exc_info_to_raise = sys.exc_info()
self._stop_event.set()
def clear_stop(self):
"""Clears the stop flag.
After this is called, calls to `should_stop()` will return `False`.
"""
with self._lock:
self._joined = False
self._exc_info_to_raise = None
if self._stop_event.is_set():
self._stop_event.clear()
def should_stop(self):
"""Check if stop was requested.
Returns:
True if a stop was requested.
"""
return self._stop_event.is_set()
@contextlib.contextmanager
def stop_on_exception(self):
"""Context manager to request stop when an Exception is raised.
Code that uses a coordinator must catch exceptions and pass
them to the `request_stop()` method to stop the other threads
managed by the coordinator.
This context handler simplifies the exception handling.
Use it as follows:
```python
with coord.stop_on_exception():
# Any exception raised in the body of the with
# clause is reported to the coordinator before terminating
# the execution of the body.
...body...
```
This is completely equivalent to the slightly longer code:
```python
try:
...body...
exception Exception as ex:
coord.request_stop(ex)
```
Yields:
nothing.
"""
# pylint: disable=broad-except
try:
yield
except Exception as ex:
self.request_stop(ex)
# pylint: enable=broad-except
def wait_for_stop(self, timeout=None):
"""Wait till the Coordinator is told to stop.
Args:
timeout: Float. Sleep for up to that many seconds waiting for
should_stop() to become True.
Returns:
True if the Coordinator is told stop, False if the timeout expired.
"""
return self._stop_event.wait(timeout)
def register_thread(self, thread):
"""Register a thread to join.
Args:
thread: A Python thread to join.
"""
with self._lock:
self._registered_threads.add(thread)
def join(self, threads=None, stop_grace_period_secs=120):
"""Wait for threads to terminate.
This call blocks until a set of threads have terminated. The set of thread
is the union of the threads passed in the `threads` argument and the list
of threads that registered with the coordinator by calling
`Coordinator.register_thread()`.
After the threads stop, if an `exc_info` was passed to `request_stop`, that
exception is re-raised.
Grace period handling: When `request_stop()` is called, threads are given
'stop_grace_period_secs' seconds to terminate. If any of them is still
alive after that period expires, a `RuntimeError` is raised. Note that if
an `exc_info` was passed to `request_stop()` then it is raised instead of
that `RuntimeError`.
Args:
threads: List of `threading.Threads`. The started threads to join in
addition to the registered threads.
stop_grace_period_secs: Number of seconds given to threads to stop after
`request_stop()` has been called.
Raises:
RuntimeError: If any thread is still alive after `request_stop()`
is called and the grace period expires.
"""
# Threads registered after this call will not be joined.
with self._lock:
if threads is None:
threads = self._registered_threads
else:
threads = self._registered_threads.union(set(threads))
# Copy the set into a list to avoid race conditions where a new thread
# is added while we are waiting.
threads = list(threads)
# Wait for all threads to stop or for request_stop() to be called.
while any(t.is_alive() for t in threads) and not self.wait_for_stop(1.0):
pass
# If any thread is still alive, wait for the grace period to expire.
# By the time this check is executed, threads may still be shutting down,
# so we add a sleep of increasing duration to give them a chance to shut
# down without loosing too many cycles.
# The sleep duration is limited to the remaining grace duration.
stop_wait_secs = 0.001
while any(t.is_alive() for t in threads) and stop_grace_period_secs >= 0.0:
time.sleep(stop_wait_secs)
stop_grace_period_secs -= stop_wait_secs
stop_wait_secs = 2 * stop_wait_secs
# Keep the waiting period within sane bounds.
# The minimum value is to avoid decreasing stop_wait_secs to a value
# that could cause stop_grace_period_secs to remain unchanged.
stop_wait_secs = max(min(stop_wait_secs, stop_grace_period_secs), 0.001)
# List the threads still alive after the grace period.
stragglers = [t.name for t in threads if t.is_alive()]
# Terminate with an exception if appropriate.
with self._lock:
self._joined = True
self._registered_threads = set()
if self._exc_info_to_raise:
six.reraise(*self._exc_info_to_raise)
elif stragglers:
raise RuntimeError(
"Coordinator stopped with threads still running: %s" %
" ".join(stragglers))
@property
def joined(self):
return self._joined
# Threads for the standard services.
class LooperThread(threading.Thread):
"""A thread that runs code repeatedly, optionally on a timer.
This thread class is intended to be used with a `Coordinator`. It repeatedly
runs code specified either as `target` and `args` or by the `run_loop()`
method.
Before each run the thread checks if the coordinator has requested stop. In
that case the looper thread terminates immediately.
If the code being run raises an exception, that exception is reported to the
coordinator and the thread terminates. The coordinator will then request all
the other threads it coordinates to stop.
You typically pass looper threads to the supervisor `Join()` method.
"""
def __init__(self, coord, timer_interval_secs, target=None, args=None,
kwargs=None):
"""Create a LooperThread.
Args:
coord: A Coordinator.
timer_interval_secs: Time boundaries at which to call Run(), or None
if it should be called back to back.
target: Optional callable object that will be executed in the thread.
args: Optional arguments to pass to `target` when calling it.
kwargs: Optional keyword arguments to pass to `target` when calling it.
Raises:
ValueError: If one of the arguments is invalid.
"""
if not isinstance(coord, Coordinator):
raise ValueError("'coord' argument must be a Coordinator: %s" % coord)
super(LooperThread, self).__init__()
self.daemon = True
self._coord = coord
self._timer_interval_secs = timer_interval_secs
self._target = target
if self._target:
self._args = args or ()
self._kwargs = kwargs or {}
elif args or kwargs:
raise ValueError("'args' and 'kwargs' argument require that you also "
"pass 'target'")
self._coord.register_thread(self)
@staticmethod
def loop(coord, timer_interval_secs, target, args=None, kwargs=None):
"""Start a LooperThread that calls a function periodically.
If `timer_interval_secs` is None the thread calls `target(args)`
repeatedly. Otherwise `target(args)` is called every `timer_interval_secs`
seconds. The thread terminates when a stop of the coordinator is
requested.
Args:
coord: A Coordinator.
timer_interval_secs: Number. Time boundaries at which to call `target`.
target: A callable object.
args: Optional arguments to pass to `target` when calling it.
kwargs: Optional keyword arguments to pass to `target` when calling it.
Returns:
The started thread.
"""
looper = LooperThread(coord, timer_interval_secs, target=target, args=args,
kwargs=kwargs)
looper.start()
return looper
def run(self):
with self._coord.stop_on_exception():
self.start_loop()
if self._timer_interval_secs is None:
# Call back-to-back.
while not self._coord.should_stop():
self.run_loop()
else:
# Next time at which to call run_loop(), starts as 'now'.
next_timer_time = time.time()
while not self._coord.wait_for_stop(next_timer_time - time.time()):
next_timer_time += self._timer_interval_secs
self.run_loop()
self.stop_loop()
def start_loop(self):
"""Called when the thread starts."""
pass
def stop_loop(self):
"""Called when the thread stops."""
pass
def run_loop(self):
"""Called at 'timer_interval_secs' boundaries."""
if self._target:
self._target(*self._args, **self._kwargs) | [
"blueconet@gmail.com"
] | blueconet@gmail.com |
8727efc17619467964719851112b2714b7a40e24 | 3bdad3e626daaf079d316a8ce56b79af095327d4 | /api/migrations/0002_maincycle_user.py | 575c15fda6f25e37b1e083211a8d6d71084f8e60 | [] | no_license | kirussshin/djangoClicker | 3ab2ba798c993d6a42bbb352ca2951b5f094bf82 | 1922abd13289529f0f314cd744d137e84228aaae | refs/heads/master | 2023-06-01T19:34:37.280301 | 2021-06-13T18:54:00 | 2021-06-13T18:54:00 | 376,618,804 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 577 | py | # Generated by Django 3.2.4 on 2021-06-13 17:02
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('api', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='maincycle',
name='user',
field=models.OneToOneField(default=0, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
]
| [
"egorka-knopka@mail.ru"
] | egorka-knopka@mail.ru |
49245674d0105a3e9f82730ebdb250b147a85355 | eb685438961de82301a31e0798630ae1844a82f8 | /migrations/versions/4571ea43dd63_.py | 74d7288c4070368728d1028206f97dc6bcdd35f6 | [] | no_license | Peter-White/base_64_test_python | 2fcd28f517f74b7dac6a68622ce1fd8d6d81373e | 302e5aa13260d4fe13b057fec93c3942320fc7ce | refs/heads/master | 2020-07-16T02:43:19.593580 | 2019-09-09T14:36:39 | 2019-09-09T14:36:39 | 205,700,865 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 744 | py | """empty message
Revision ID: 4571ea43dd63
Revises:
Create Date: 2019-08-26 15:47:45.221174
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '4571ea43dd63'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('place_holder_image',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('image', sa.LargeBinary(), nullable=False),
sa.PrimaryKeyConstraint('id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('place_holder_image')
# ### end Alembic commands ###
| [
"pwhitedeveloper@gmail.com"
] | pwhitedeveloper@gmail.com |
b7c734fbbfa3614e2f49ef72774e5dfc16bc2550 | 6c12904dde5ee546cb965c6c0af6901c7f89bea7 | /volume.py | 97951333780cd6388510ea93a05350fd9178a8f1 | [] | no_license | ad52825196/simple-file-system | 59eff6e1e628a5b49460b18bc339625b776bb89d | 58256b29ef6ebef6a154c831c619a340644d6b57 | refs/heads/master | 2020-04-06T03:35:16.877249 | 2017-04-25T00:11:05 | 2017-04-25T00:11:05 | 68,357,972 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,261 | py | import drive
import directoryentry
class Volume:
BITMAP_FREE_BLOCK = '-'
BITMAP_USED_BLOCK = '+'
def __init__(self, name):
self.name = name
self.drive = drive.Drive(name)
def format(self):
self.drive.format()
block = drive.Drive.EMPTY_BLK
block = Volume.modify_block(block, 0, Volume.BITMAP_FREE_BLOCK * drive.Drive.DRIVE_SIZE)
entry = str(directoryentry.DirectoryEntry())
cursor = drive.Drive.DRIVE_SIZE
flag = True
while flag:
try:
block = Volume.modify_block(block, cursor, entry)
cursor += directoryentry.DirectoryEntry.ENTRY_LENGTH
except:
flag = False
self.write_block(0, block)
def reconnect(self):
self.drive.reconnect()
def disconnect(self):
self.drive.disconnect()
def ls(self, full_pathname):
"""Return a list of DirectoryEntry objects in the given directory."""
entry, block_number_list = self.locate(full_pathname, directoryentry.DirectoryEntry.DIRECTORY, show = True)
if entry is not None:
block_number_list = entry.get_valid_blocks()
return self.get_block_number_list_directory_entry(block_number_list)
def mkfile(self, full_pathname, file_type = directoryentry.DirectoryEntry.FILE):
parent_entry, block_number_list, file_name = self.locate(full_pathname, file_type, True)
empty_entry_list = self.get_block_number_list_directory_entry(block_number_list, True)
if len(empty_entry_list) > 0:
entry = empty_entry_list[0]
elif parent_entry is not None:
# not root directory
block_number, block = self.allocate_new_directory_block()
parent_entry.add_new_block(block_number)
self.write_block(block_number, block)
parent_entry.file_length += len(block)
self.write_entry(parent_entry)
entry = directoryentry.DirectoryEntry(block_number = block_number)
else:
raise IOError("no more space in root directory")
entry.file_type = file_type
entry.file_name = file_name
self.write_entry(entry)
def mkdir(self, full_pathname):
self.mkfile(full_pathname, directoryentry.DirectoryEntry.DIRECTORY)
def append(self, full_pathname, data):
content, entry = self.get_file_content(full_pathname)
content += data
entry = self.write_file_content(entry, content)
self.write_entry(entry)
def get_file_content(self, full_pathname):
"""Return the file content along with the directory entry of this file."""
entry, block_number_list = self.locate(full_pathname, directoryentry.DirectoryEntry.FILE)
return self.get_entry_content(entry), entry
def delfile(self, full_pathname, file_type = directoryentry.DirectoryEntry.FILE):
entry, block_number_list = self.locate(full_pathname, file_type)
block_number_list = entry.get_valid_blocks()
if file_type == directoryentry.DirectoryEntry.DIRECTORY:
entry_list = self.get_block_number_list_directory_entry(block_number_list)
if len(entry_list) > 0:
raise IOError("directory is not empty")
for block_number in block_number_list:
self.write_block(block_number, release = True)
entry = directoryentry.DirectoryEntry(block_number = entry.block_number, start = entry.start)
self.write_entry(entry)
def deldir(self, full_pathname):
self.delfile(full_pathname, directoryentry.DirectoryEntry.DIRECTORY)
def modify_block(block, start, data):
end = start + len(data)
if end > len(block):
raise ValueError("invalid internal data")
return block[:start] + data + block[end:]
def write_block(self, n, data = '', release = False):
if release:
data = drive.Drive.EMPTY_BLK
data += ' ' * (drive.Drive.BLK_SIZE - len(data))
self.drive.write_block(n, data)
block = self.drive.read_block(0)
if release:
block = Volume.modify_block(block, n, Volume.BITMAP_FREE_BLOCK)
else:
block = Volume.modify_block(block, n, Volume.BITMAP_USED_BLOCK)
self.drive.write_block(0, block)
def get_path_list(full_pathname):
path_list = full_pathname.split('/')
if path_list[0] != '' or len(path_list) < 2:
raise ValueError("invalid pathname")
if len(path_list) == 2 and path_list[-1] == '':
return []
else:
return path_list[1:]
def get_block_directory_entry(self, n, empty = False):
"""Return a list of DirectoryEntry objects in block n."""
block = self.drive.read_block(n)
cursor = 0
if n == 0:
# skip bitmap
cursor += drive.Drive.DRIVE_SIZE
entry_list = []
while cursor < drive.Drive.BLK_SIZE:
entry = directoryentry.DirectoryEntry(block[cursor:cursor + directoryentry.DirectoryEntry.ENTRY_LENGTH], n, cursor)
cursor += directoryentry.DirectoryEntry.ENTRY_LENGTH
if (not empty and len(entry.file_name) > 0) or (empty and len(entry.file_name) == 0):
entry_list.append(entry)
return entry_list
def get_block_number_list_directory_entry(self, block_number_list, empty = False):
"""Return a list of DirectoryEntry objects in all blocks given in the list."""
entry_list = []
for block_number in block_number_list:
entry_list += self.get_block_directory_entry(block_number, empty)
return entry_list
def locate(self, full_pathname, file_type = directoryentry.DirectoryEntry.FILE, make = False, show = False):
"""Return the DirectoryEntry object of the final file or directory if make is False, otherwise the DirectoryEntry object of the parent directory. Also return a block number list containing all the blocks owned by the parent directory. If this is the root directory, the returning DirectoryEntry object will be None and the block number list will only contain block 0."""
path_list = Volume.get_path_list(full_pathname)
entry = None
block_number_list = [0]
if len(path_list) == 0:
# root directory
if show:
return entry, block_number_list
else:
raise ValueError("no file name specified")
directory_list = path_list[:-1]
file_name = path_list[-1]
if len(file_name) == 0:
raise ValueError("no file name specified")
if make and len(file_name) > directoryentry.DirectoryEntry.MAX_FILE_NAME_LENGTH:
raise ValueError("file name too long")
if ' ' in file_name:
raise ValueError("cannot have spaces in file name")
parent_entry = None
for directory in directory_list:
entry_list = self.get_block_number_list_directory_entry(block_number_list)
# find the directory
parent_entry = Volume.find_entry_in_entry_list(directoryentry.DirectoryEntry.DIRECTORY, directory, entry_list)
if parent_entry is None:
raise ValueError("directory '{}' dose not exist".format(directory))
block_number_list = parent_entry.get_valid_blocks()
entry_list = self.get_block_number_list_directory_entry(block_number_list)
entry = Volume.find_entry_in_entry_list(file_type, file_name, entry_list)
if make and entry is not None:
raise ValueError("'{}' already exists".format(file_name))
elif not make and entry is None:
raise ValueError("'{}' does not exist".format(file_name))
if make:
return parent_entry, block_number_list, file_name
return entry, block_number_list
def allocate_new_directory_block(self):
"""Find a free block and generate a block filled with directory entries but not write to the disk. Return the free block number and the content of the block."""
block_number = self.find_free_block()
block = drive.Drive.EMPTY_BLK
entry = str(directoryentry.DirectoryEntry())
cursor = 0
flag = True
while flag:
try:
block = Volume.modify_block(block, cursor, entry)
cursor += directoryentry.DirectoryEntry.ENTRY_LENGTH
except:
flag = False
return block_number, block
def find_free_block(self):
"""Find a free block in the volume."""
block = self.drive.read_block(0)
for i in range(drive.Drive.DRIVE_SIZE):
if block[i] == Volume.BITMAP_FREE_BLOCK:
return i
raise IOError("no more space in volume '{}'".format(self.name))
def write_entry(self, entry):
block = self.drive.read_block(entry.block_number)
block = Volume.modify_block(block, entry.start, str(entry))
self.write_block(entry.block_number, block)
def find_entry_in_entry_list(file_type, file_name, entry_list):
"""Return the found DirectoryEntry object in the entry_list or None if does not exist."""
for entry in entry_list:
if entry.file_type == file_type and entry.file_name == file_name:
return entry
def get_entry_content(self, entry):
content = ''
block_number_list = entry.get_valid_blocks()
for block_number in block_number_list:
content += self.drive.read_block(block_number)
return content[:entry.file_length]
def write_file_content(self, entry, content):
entry.file_length = 0
block_number_list = entry.get_valid_blocks()
while len(content) > 0:
if len(block_number_list) > 0:
block_number = block_number_list.pop(0)
else:
block_number = self.find_free_block()
entry.add_new_block(block_number)
self.write_block(block_number, content[:drive.Drive.BLK_SIZE])
entry.file_length += min(drive.Drive.BLK_SIZE, len(content))
content = content[drive.Drive.BLK_SIZE:]
return entry
| [
"me@zhen-chen.com"
] | me@zhen-chen.com |
db929563c62dbc7ae58ceeddfa21da2f076be5a0 | 15e0a3148c311ded5bfc24712edbdc2c712fde17 | /DigitsCombination.py | e12e42abde92fd9fb7bfbc5971266ef6af9f19ca | [] | no_license | BrindaSahoo2020/Python-Programs | f70c9887f07c9c32c249d569c5a53c65c8aff03e | 70e0af223f2cf36b7b01c0f2e7c599efb2f6473c | refs/heads/master | 2022-12-30T07:00:44.320035 | 2020-10-18T03:59:18 | 2020-10-18T03:59:18 | 256,928,406 | 0 | 1 | null | 2020-10-11T12:57:50 | 2020-04-19T06:15:07 | Python | UTF-8 | Python | false | false | 540 | py | #Python program to accept three digits and print all possible combinations from the digits
#Sample Input
'''
Enter first number:4
Enter second number:5
Enter third number:6
'''
#Sample Output
'''
4 5 6
4 6 5
5 4 6
5 6 4
6 4 5
6 5 4
'''
a = int(input("Enter first number:"))
b = int(input("Enter second number:"))
c = int(input("Enter third number:"))
d = []
d.append(a)
d.append(b)
d.append(c)
for i in range(0,3):
for j in range(0,3):
for k in range(0,3):
if(i!=j&j!=k&k!=i):
print(d[i],d[j],d[k]) | [
"brindasahoo.it@gmail.com"
] | brindasahoo.it@gmail.com |
db21ad96464e1a48621b1e1b2bac81478f8c4e9c | 34089a1005c9cc36c24a2b385a876b9500b0d2dd | /oscillate.py | 8d1368fb295c2f6253f62b4ad07c4e5a5daf0afc | [] | no_license | ericnegron/pythonScripts | d955802e9dc5aa6124507b95812acfdc433cfc98 | 9d877724f4063f472433456755eecc415101f0ec | refs/heads/master | 2021-01-17T15:03:48.634505 | 2014-03-10T23:22:27 | 2014-03-10T23:22:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,946 | py | import maya.cmds as mc
import math
# This script allows the user to create a polygon primitive and then specify how the object will oscillate.
# Creates the custom window class called EN_BaseUIWindow.
class EN_BaseUIWindow(object):
@classmethod
def showUI(cls):
win=cls()
win.create()
return win
# Initializes the handle, title, and size attributes for the window class.
def __init__(self):
self.window = "en_baseuiwindow"
self.title = "Base GUI Window"
self.size = (600, 400)
self.supportsToolAction = False
self.actionName = "Create and Close"
# Function to draw the window
def create(self):
# Checks to see if this window has already been created. If it has, it deletes the window.
if mc.window(self.window, exists=True):
mc.deleteUI(self.window, window=True)
# Creates the window using the already initialized attributes as well as the menuBar attribute from below.
self.window = mc.window(self.window, title=self.title, wh=self.size, menuBar = True)
# Establishes themain layout of the GUI.
self.mainForm = mc.formLayout(numberOfDivisions=100)
# Calls the cmmonMenu function created below.
self.commonMenu()
# Calls the button creation function that is created below.
self.commonButtons()
# Creates a central pane in the display.
self.optionsBorder = mc.tabLayout(scrollable=True, tabsVisible = False, height = 1)
# Nests the pane within the main form layout.
mc.formLayout(self.mainForm, e=True, attachForm = (
# Pins the top edge to the top of the UI with a padding of 0 pixels.
[self.optionsBorder, 'top', 0],
# Pins the left edge of pane to the left of the UI with a padding of 2 pixels.
[self.optionsBorder, 'left', 2],
# Pins the right edge of the pane to the right of the UI with a padding of 2 pixels.
[self.optionsBorder, 'right', 2]),
# Pins the bottom edge of the pane to the top edge of the buttons.
attachControl = ([self.optionsBorder, 'bottom', 5, self.createBtn]))
# Allows the panel to scale with the main UI.
self.optionsForm = mc.formLayout(numberOfDivisions=100)
# Calls the display option function from below.
self.displayOptions()
# Shows (displays) the window.
mc.showWindow()
# Adds menu items to the window.
def commonMenu(self):
# Creates a drop down menu labeled "Edit".
self.editMenu = mc.menu(label="Edit")
# Creates the option to either save settings or reset the settings. This is in the drop down menu "Edit".
self.editMenuSave = mc.menuItem(label = "Save Settings")
self.editMenuReset = mc.menuItem(label = "Reset Settings")
# Creates another drop down menu for the user to get help. Labels it "Help".
self.helpMenu = mc.menu(label = "Help")
# Creates an option to get help on the menu/script.
self.helpMenuItem = mc.menuItem(label = "Help on %s" %self.title)
# Function for the creation of the command buttons.
def commonButtons(self):
# Creates a button size parameter with a padding of 18 pixels. The width is the size of the UI width minus the padding
# divided by three. The height is 26 pixels.
self.commonBtnSize = ((self.size[0]-18)/3, 26)
# Establishes the layout of the buttons. Sets them into a row, with three buttons in the row. Also establishes their size.
# Creates the "create and close" button.
self.actionBtn = mc.button(label = self.actionName, height = self.commonBtnSize[1], command = self.actionBtnCmd)
# Creates the "create" button.
self.createBtn = mc.button(label = "Create", height = self.commonBtnSize[1], command = self.createBtnCmd)
# Creates the "close" button.
self.closeBtn = mc.button(label = "Close", height = self.commonBtnSize[1], command = self.closeBtnCmd)
# Dictates how the buttons scale when the user scales the UI.
# First sets the main form to edit mode.
mc.formLayout(self.mainForm, e=True, attachForm=(
# Then takes each button, specifies the edge to adjust, and then specifies the value to adjust by.
# Pins the action button to the left of the UI with a padding of 5 pixels.
[self.actionBtn, 'left', 5],
# Pins the action button to the bottom of the UI with a padding of 5 pixels.
[self.actionBtn, 'bottom', 5],
# Pins the create button to the bottom of the UI with a padding of 5 pixels.
[self.createBtn, 'bottom', 5],
# Pins the close botton to the bottom of the UI with a padding of 5 pixels.
[self.closeBtn, 'bottom', 5],
# Pins the close button to the right of the UI with a padding of 5 pixels.
[self.closeBtn, 'right', 5]),
# Pins buttons relative to the coordinates specified in the create(self) function according to the
# numberOfDivisions flag in the mainForm command.
attachPosition = ([self.actionBtn, 'right', 1, 33], [self.closeBtn, 'left', 0, 67]),
# Pins the middle button to the outer two buttons. Allows it to scale along with the other two buttons.
attachControl = ([self.createBtn, 'left', 4, self.actionBtn], [self.createBtn, 'right', 4, self.closeBtn]),
# Makes sure that the the top edges of the buttons scale according to the above parameters.
attachNone = ([self.actionBtn, 'top'], [self.createBtn, 'top'], [self.closeBtn, 'top']))
# Function for the help menu goes here. This will load a help text file explaining the options of the GUI.
# Place holder commands for the menu items
def editMenuSaveCmd(self, *args):
pass
def editMenuResetCmd(self, *args):
pass
# Creates function for the create and close button. When user clicks button, action happens and UI closes.
def actionBtnCmd(self, *args):
self.createBtnCmd()
self.closeBtnCmd()
# Creates a function for the create button. When user clicks button, UI creates something.
def createBtnCmd(self, *args):
pass
# Creates a function for the close button. When user clicks button, UI closes.
def closeBtnCmd(self, *args):
mc.deleteUI(self.window, window=True)
# Creates a display options function. This is a placeholder
def displayOptions(self):
pass
# This portion of the script allows the user to create a geometric primitive and then specify how it wants it to move.
# It follows the module example expression but adds the ability to choose your object. It also changes the sin function to a cos function.
# Establishes the new window class based on the base GUI window class.
class EN_ModuleThirteenWindow(EN_BaseUIWindow):
# Initializes the new window values
def __init__(self):
EN_BaseUIWindow.__init__(self)
# Overrides the base window class window name
self.title = "Module Thirteen Window"
# Overrides the base window class window size
self.size = (300, 350)
# Initializes the action buttons to make sure that the "Create and Close" button closes after executing the command.
self.actionName = "Create and Close"
# Creates the layout within the base gui.
def displayOptions(self):
# Creates a column layout within the established base GUI.
mc.columnLayout()
# Creates a label for the radio button group.
self.labelZero=mc.text(label="Select the Object to Create")
# Creates the radio button group.
self.objType=mc.radioButtonGrp(labelArray4=['Cube', 'Cone', 'Cylinder', 'Sphere'], numberOfRadioButtons=4, select=1)
# Creates a label for the text field.
self.labelOne=mc.text(label="Name of Attribute to Effect")
# Creates the attribute field.
self.attribute = mc.textField(width=150)
# Creates a label for the Max value field.
self.labelTwo=mc.text(label="Maximum Value")
# Creates the maximum value field.
self.max = mc.floatField(minValue = 0)
# Creates a label for the min value field.
self.labelThree=mc.text(label="Minimum Value")
# Creates the minimum value field.
self.min = mc.floatField(minValue = 0)
# Creates a label for the time field.
self.labelFour=mc.text(label="Number of seconds per cycle")
# Creates the time field.
self.time = mc.floatField(minValue = 0.001, value = 1)
# Creates a label for the type of oscillation.
self.labelFive=mc.text(label="Type of Oscillation")
# Creates a radio button group that allows the user to specify the type of oscillation the object will do.
self.oscillate=mc.radioButtonGrp(labelArray2=['sin', 'cos'], numberOfRadioButtons=2, select=1)
# Creates a new column layout for the notes. Makes it collapsable and gives it a label of "Notes".
self.xformGrp = mc.frameLayout(label="Notes", collapsable=True)
# Creates the notes text field.
self.notes=mc.scrollField(wordWrap=True,text="To use this tool:\n" +
"First select the type of object you wish to create.\n" +
"Next, enter the attribute you wish to be effected.\n" +
"Third, enter the values for the maximum and minimum values you wish to effect as well as the number of seconds per oscillation.\n" +
"Finally, select whether you want the object to oscillate using a sin or cos function.",
edit = False, ed= False, width = 400, height = 200)
# This is the function that the create button will execute when clicked.
def createBtnCmd(self, *args):
# Creates a function for the radio button group.
self.objIndAsCmd = {1:mc.polyCube, 2:mc.polyCone, 3:mc.polyCylinder, 4:mc.polySphere}
# Creates the object variable for the create function.
objIndex = mc.radioButtonGrp(self.objType, query = True, select = True)
# Creates a variable for the new object to be created based on the above array and objIndex variable.
newObject = self.objIndAsCmd[objIndex]()
# Following section creates necessary variables for the expression.
# Creates a variable to select the previously created object.
sel = mc.ls(sl=True)
# Creates the attribute variable using the user input from the GUI.
att = mc.textField(self.attribute, query = True, text = True)
# Creates the minimum value variable using the user input from the GUI.
minimum = mc.floatField(self.min, query = True, value = True)
# Creates the maximum value variable using the user input from the GUI.
maximum = mc.floatField(self.max, query = True, value = True)
# Creates the time period variable using the user input from the GUI.
period = mc.floatField(self.time, query = True, value = True)
# Creates the variable for the type of oscillation radio group.
oscType = mc.radioButtonGrp(self.oscillate, query = True, select = True)
# Creates the speed variable for which the created object will travel at.
speed = 6.28/period
# Creates the random variable. It is created as 'ran' because random is a pre-defined function in python.
ran = (maximum - minimum)/2.0
# Creates the start variable.
start = minimum + ran
# Creates the expression that will drive the script.
# objectName.attributeName = sin(time*speed) * range + start.
# Creates the expression for the cos oscillation.
expressionTextCos = (sel[0] + "." + str(att)
+ " = cos(time * " + str(speed)
+ " ) * " + str(ran) + " + "
+ str(start) + ";")
# Creates the expression for sin
expressionTextSin = (sel[0] + "." + str(att)
+ " = sin(time * " + str(speed)
+ " ) * " + str(ran) + " + "
+ str(start) + ";")
# Calls the expression.
if oscType == 1:
mc.expression(string=expressionTextSin)
print "ExpressionSin has sucessfully run."
elif oscType == 2:
mc.expression(string=expressionTextCos)
print "ExpressionCos has successfully run."
else:
print "Expression didn't properly execute."
# Calls the GUI.
EN_ModuleThirteenWindow.showUI()
| [
"esnegron@icloud.com"
] | esnegron@icloud.com |
dad02ea4f0608d5be5fe199a38b3181d93ce7718 | dc91e5c22ed3b9128c649392c3dfb04ec74a3976 | /Meter-Distributed/Meter_cfba.py | 27f4d7a28382ad20b632b72672dbbb06fc4e1af1 | [] | no_license | Agrawalayushi/Closeness-Factor-Based-Algorithm-for-Incremental-Clustering-of-Images- | 5744ac2774f0e0d54ba2ffbf6998bdcc80554f79 | 544bda25d861a8ee656afd60e26bfbbde07016fd | refs/heads/main | 2023-04-13T12:38:53.455953 | 2021-04-27T17:04:03 | 2021-04-27T17:04:03 | 361,854,229 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,509 | py | import pandas as pd
import math as mp
import time
from sklearn.preprocessing import MinMaxScaler
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
def visualize(df_basic,df_incremental,df_merge):
ax = df_basic.groupby(['CNumber'])['CNumber'].count().plot.bar(title = "Basic...")
ax.set_xlabel('Clusters')
ax.set_ylabel('Frequency')
plt.show()
ax = df_incremental.groupby(['CNumber'])['CNumber'].count().plot.bar(title = "Incremental")
ax.set_xlabel('Clusters')
ax.set_ylabel('Frequency')
plt.show()
ax = df_merge.groupby(['CNumber','Cluster_Type'])['Cluster_Type'].count().unstack(0).plot.bar(stacked=True, figsize=(8, 6))
ax.legend(loc = 'center right',bbox_to_anchor = (1.4,0.5),ncol = 1)
plt.title('iteration 1')
plt.xlabel('clusters')
plt.ylabel('No of Records')
plt.show()
# merging basic and incremental dataset
def mergefile_graph(df_basic,df_incremental):
df_basic['Cluster_Type'] = 'Basic_cluster'
df_incremental['Cluster_Type'] = 'Incremental_1'
df_basic = df_basic.append(df_incremental)
df_basic=df_basic.sort_values(by = ['CNumber'])
df_basic.to_csv('record.csv',index = False)
print("df_basic length", len(df_basic))
return df_basic
#merging training and test dataset
def mergefile_representative(dftrain,dftest):
dftrain = dftrain.append(dftest)
dftrain = dftrain.sort_values(by = ['CNumber'])
dftrain.to_csv('record.csv',index = False)
#(dftrain.groupby(['CNumber'],as_index = False).mean()).to_csv('record.csv')
#basic clustering code using cfba
def basic_cluster_lone(df,df1):
df['row_total'] = df.sum(axis = 1)
print("after row total",df.head())
count = 1
closeness_val= []
for i in range(len(df)):
df.loc[i,'Flag']=False
c1 = []
for i in range(len(df)):
if(df.Flag[i]==False):
countercheck = []
df1.loc[i,'CNumber'] = count
df1.loc[i,'Closeness_Value'] = 0
df.loc[i,'Flag']=True
df.loc[i,'CNumber'] = count
for j in range(i+1,len(df)):
if(df.Flag[j]==False):
c1 = df.row_total[i]/(df.row_total[i]+df.row_total[j])
d1 = df.T1[i]+df.T1[j]
d2=c1*d1-df.T1[i]
d3 = mp.sqrt(d1*c1*(1-c1))
prob1 = d2/d3
c_square = mp.pow(prob1,2)
weight = mp.sqrt(d1)
c = c_square * weight
#second feature
col2 = df.V1[i]+df.V1[j]
col21 = (c1*col2-df.V1[i])/mp.sqrt(col2*c1*(1-c1))
e2 = mp.pow(col21,2)
wei2 = mp.sqrt(col2)
c2 = e2 * wei2
#third feature
col4 = df.W1[i]+df.W1[j]
col41 = (c1*col4-df.W1[i])/mp.sqrt(col4*c1*(1-c1))
e4 = mp.pow(col41,2)
wei4 = mp.sqrt(col4)
c4 = e4 * wei4
close1 = c+c2+c4
close2 = weight+wei2+wei4
close = close1/close2
counter = 1
if close<=1:
df1.loc[j,'CNumber'] = count
df1.loc[j,'Closeness_Value']=close
df.loc[j,'Flag']=True
df.loc[j,'CNumber']=count
if(close < 0.00056200894733631):
df1.loc[j,'CNumber']=counter
df.loc[j,'CNumber']=counter
elif(0.0014036781659371 < close < 0.0169289160263237):
df1.loc[j,'CNumber']=counter+1
df.loc[j,'CNumber']=counter+1
elif(0.0169289160263237 < close < 0.0450943423407067):
df1.loc[j,'CNumber']=counter+2
df.loc[j,'CNumber']=counter+2
elif(0.0450943423407067 < close < 0.128604750357539):
df1.loc[j,'CNumber']=counter+3
df.loc[j,'CNumber']=counter+3
elif(0.128604750357539 < close < 0.248836893559896):
df1.loc[j,'CNumber']=counter+4
df.loc[j,'CNumber']=counter+4
elif(0.248836893559896 < close < 0.486936879396661):
df1.loc[j,'CNumber']=counter+5
df.loc[j,'CNumber']=counter+5
elif(0.486936879396661 < close < 0.619630852965444):
df1.loc[j,'CNumber']=counter+6
df.loc[j,'CNumber']=counter+6
else:
df1.loc[j,'CNumber']=counter+7
df1.to_csv('record.csv')
df1 = df1.sort_index()
df1 = df1.sort_values(by = 'CNumber')
df1.to_csv('record.csv')
#add name of csv
df =df.drop(['Flag','row_total'],axis=1)
return df1,df
# incremental clustering code using cfba
def incremental_cluster(dftest,df2):
df = pd.read_csv('record.csv')
print("test data",df.head())
df_rep = df.iloc[:,1:]
df_rep['row_total'] = df_rep.sum(axis =1)
print(df_rep.head())
whole = []
outlier = []
fclose=[]
outlierclose=[]
dftest['row_total'] = dftest.sum(axis =1)
for i in range(len(dftest)):
dftest.loc[i,'Flag']=False
c1 = []
for i in range(len(df_rep)):
whole.append(i)
for j in range(len(dftest)):
if(dftest.Flag[j]==False):
c1 = df_rep.row_total[i]/(df_rep.row_total[i]+dftest.row_total[j])
d1 = df_rep.T1[i]+dftest.T1[j]
d2=c1*d1-df_rep.T1[i]
d3 = mp.sqrt(d1*c1*(1-c1))
prob1 = d2/d3
c_square = mp.pow(prob1,2)
weight = mp.sqrt(d1)
c = c_square * weight
# feature - Department
col2 = df_rep.V1[i]+dftest.V1[j]
col21 = (c1*col2-df_rep.V1[i])/mp.sqrt(col2*c1*(1-c1))
e2 = mp.pow(col21,2)
wei2 = mp.sqrt(col2)
c2 = e2 * wei2
#feature
col4 = df_rep.W1[i]+dftest.W1[j]
col41 = (c1*col4-df_rep.W1[i])/mp.sqrt(col4*c1*(1-c1))
e4 = mp.pow(col41,2)
wei4 = mp.sqrt(col4)
c4 = e4 * wei4
close1 = c+c2+c4
close2 = weight+wei2+wei4
close = close1/close2
if close<=1:
whole.append(j)
df2.loc[j,'CNumber'] = df.CNumber[i]
df2.loc[j,'Closeness Value']=close
dftest.loc[j,'Flag']=True
dftest.loc[j,'CNumber']=df.CNumber[i]
#add name of csv of incremental
df2.to_csv('record.csv')
else:
outlier.append(j)
outlierclose.append(close)
fclose.append(0)
resultant_list = list(set(outlier)-set(whole))
if(len(resultant_list)!=None):
dftest.loc[resultant_list,'CNumber']=i+2
dftest.loc[resultant_list,'Flag']=True
df2.loc[resultant_list,'CNumber']=i+2
df2 = df2.fillna(-1)
df2 = df2.sort_index()
df2 = df2.sort_values(by = 'CNumber')
#add name of csv
df2.to_csv('record.csv')
dftest =dftest.drop(['Flag','row_total'],axis=1)
return df2,dftest
def scale(pandas_df):
features = ['T1','V1','W1']
features_v = pandas_df[features]
scaler = MinMaxScaler(feature_range = (0,10))
scaler_features = scaler.fit_transform(features_v)
print("normalised dataset with MinMaxScaler",scaler_features)
features_train, features_test = train_test_split(scaler_features, test_size =0.2)
train1 = pd.DataFrame(features_train,columns = ['T1','V1','W1'])
test1 = pd.DataFrame(features_test,columns = ['T1','V1','W1'])
print("length of training and testing data",len(train1),len(test1))
df_inverse = scaler.inverse_transform(features_train)
df1 = pd.DataFrame(df_inverse,columns = ['T1','V1','W1'])
df2 = scaler.inverse_transform(features_test)
df2 = pd.DataFrame(df2,columns = ['T1','V1','W1'])
print("length of Inversed data",len(df1),len(df2))
return train1,test1,df1,df2
| [
"noreply@github.com"
] | noreply@github.com |
8a58387defcb67cfd652d2f25881520516b9f458 | f741f7f070d150cffbb63f13666fec5dceb4c7c4 | /3.massives/5.py | 2c448a2722cdcbb8ded28131ad1b8cfff5321d19 | [] | no_license | mahhets/algorithms-and-data-structures | 7e2359c15a1cfd46200f9f2008a3a4050e2a5f40 | d1d198020e9d1f7f5085188678828520e4da36c8 | refs/heads/main | 2023-05-29T02:38:05.716730 | 2021-06-10T20:26:34 | 2021-06-10T20:26:34 | 367,147,905 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,096 | py | # Ассоциативный массив, словарь (ключ - значение других столбцов, как в SQL)
"""
Пользователь вводит кол-во предприятий, названия, плановую и фактическую прибыль каждого предприятия
Вычислить процент выполнения плана и вывести данные с предварительной фильтрацией
"""
k = int(input('Введите кол-во предприятий: '))
enterprises = {}
for i in range(1, k+1):
name = input('Название предприятия: ')
enterprises[name] = [float(input('Введите плановую прибыль:')),
float(input('Введите фактическую прибыль: '))]
enterprises[name].append(enterprises[name][1]/enterprises[name][0])
for i,item in enterprises.items():
if item[1] > 0:
print(f'Предприятие {i} заработало {item[1]} что составило {item[2] * 100:.2f}%')
| [
"the_mahh@mail.ru"
] | the_mahh@mail.ru |
42e64f9eabca1da6ef4c05025e9e1c63b6a4cea6 | 1b037639fad280142ee84d10412c4bc1d729148c | /act_complementaryMedicine/db_method/insert.py | fb71ce5dc7599f5a985194c1f37fbb51b23a7644 | [] | no_license | Tohsaka-Rin/act-cm | fb36f5a16638f52646c2834a0d73c1fb1fab1f1d | c99dae527510fc0352fac29df36bd3090d361b89 | refs/heads/master | 2021-01-23T01:55:42.703190 | 2017-03-22T04:27:28 | 2017-03-22T04:27:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,500 | py | # -*- coding:UTF-8 -*-
from act_db.models import DoctorInfo,GroupInfo,PatientGroup,PatientInfo,RelationInfo,OutPatientServiceInfo,EmergCallInfo,InHospitalInfo,Clinic,ESS,MBQ,SGRO,AttachInfo,AccessoryExamination
import time
import datetime
# 添加新用户
# 参数是一个字典,包含医生的所有信息
# 成功返回True,失败返回False
def addDoctorInfo(data):
#TODO
# birthday 需要处理成Date格式
d = datetime.datetime.strptime(data['birthday'], "%Y-%m-%d").date()
# registerDate会自动生成
try:
newObj = DoctorInfo(name = data['name'], sex = data['sex'], birthday = d, userName = data['userName'],
password = data['password'], cellphone = data['cellphone'], weChat = data['weChat'],
mail = data['mail'], title = data['title'], hospital = data['hospital'],
department = data['department'], userGroup = data['userGroup'])
newObj.save()
return True
except :
return False
# 添加新的实验组
# 成功返回True,失败返回False
def addExpGroup(D_id,name,info):
# TODO
try:
newObj = GroupInfo(D_id=D_id,name=name,information=info)
newObj.save()
return True
except :
return False
# 向实验组中添加患者
#注意判断一下各种id的正确性
# 成功返回True,失败返回False
def addPatientToExpGroup(G_id,P_id):
# TODO
try:
newObj = PatientGroup(G_id=G_id,P_id=P_id)
newObj.save()
return True
except :
return False
# 添加新患者
# 参数是一个字典,包含患者的所有信息。同时也包含D_id与G_id,需要添加对应的关系表
# 成功返回True,失败返回False
def addPatientInfo(data):
#TODO
try:
d = datetime.datetime.strptime(data['birthday'], "%Y-%m-%d").date()
newObj = PatientInfo(P_id = data['P_id'], sign = data['sign'], name = data['name'], sex = data['sex'],
birthday = d, age = data['age'], nation = data['nation'], height = data['height'],
weight = data['weight'], education = data['education'], career = data['career'],
marriage = data['marriage'], photo = data['photo'], homeAddr = data['homeAddr'],
birthAddr = data['birthAddr'], activityAddr1 = data['activityAddr1'],
activityAddr2 = data['activityAddr2'], actionAddr = data['actionAddr'],
diastolicPressure = data['diastolicPressure'],
systolicPressure = data['systolicPressure'], neckCircu = data['neckCircu'],
payment = data['payment'], telephone = data['telephone'],
cellphone = data['cellphone'], partnerPhone = data['partnerPhone'])
newObj.save()
newP = PatientGroup(G_id=data['G_id'], P_id=data['P_id'])
newP.save()
return True
except :
return False
# 添加新家属
# 参数是一个字典,包含患者的所有信息。同时也包含D_id与P_id
# 成功返回True,失败返回False
def addRelationInfo(data):
try:
newObj = RelationInfo(P_id = data['P_id'], name = data['name'], sex = data['sex'],
telephone = data['telephone'], cellphone = data['cellphone'],
weChat = data['weChat'], mail = data['mail'], homeAddr = data['homeAddr'])
newObj.save()
return True
except :
return False
#添加门诊信息
def addOutPatientServiceInfo(data):
try:
data['date'] = datetime.datetime.strptime(data['date'], "%Y-%m-%d").date()
newObj = OutPatientServiceInfo(P_id = data['P_id'], date = data['date'], place = data['place'],
isStabel = data['isStabel'], symptom = data['symptom'],
physicalExam = data['physicalExam'], breathErr = data['breathErr'],
acuteExac = data['acuteExac'], disease = data['disease'],
use_abt = data['use_abt'], useJmzs = data['useJmzs'],
hospital = data['hospital'], airRelate = data['airRelate'],
treatMethod = data['treatMethod'], medicine = data['medicine'])
newObj.save()
return True
except:
return False
#添加急诊信息
def addEmergCallInfo(data):
try:
data['date'] = datetime.datetime.strptime(data['date'], "%Y-%m-%d").date()
newObj = EmergCallInfo(P_id = data['P_id'], date = data['date'], place = data['place'],
symptom = data['symptom'], acuteExac = data['acuteExac'],disease = data['disease'],
byxCheck = data['byxCheck'],byxResult = data['byxResult'], ycWcTreat = data['ycWcTreat'],
useAbt = data['useAbt'], abtType = data['abtType'], useJmzs = data['useJmzs'],
ecMethod = data['ecMethod'], ecDate = data['ecDate'],hospital = data['hospital'],
treatMethod = data['treatMethod'],airRelate = data['airRelate'])
newObj.save()
return True
except:
return False
#添加住院信息
def addInHospitalInfo(data):
try:
data['date'] = datetime.datetime.strptime(data['date'], "%Y-%m-%d").date()
newObj = InHospitalInfo(P_id = data['P_id'], date = data['date'], place = data['place'],
commonIcu = data['commonIcu'], symptom = data['symptom'],acuteExac = data['acuteExac'],
disease = data['disease'],byxCheck = data['byxCheck'], byxResult = data['byxResult'],
ycWcTreat = data['ycWcTreat'], useAbt = data['useAbt'],abtType = data['abtType'],
useJmzs = data['useJmzs'],hospitalDays = data['hospitalDays'],
airRelate = data['airRelate'],treatMethod = data['treatMethod'],
reason = data['reason'],docAdvice = data['docAdvice'])
newObj.save()
return True
except:
return False
#添加临床信息
def addClinicInfo(data):
try:
newObj = Clinic(P_id = data['P_id'], type = data['type'], S_id = data['S_id'],dangerType = data['dangerType'],
smoke1 = data['smoke1'],smoke2 = data['smoke2'], smoke3 = data['smoke3'],
smoke4 = data['smoke4'],smoke5 = data['smoke5'], smoke6 = data['smoke6'],
smoke7 = data['smoke7'],smoke8 = data['smoke8'], smoke9 = data['smoke9'],
smoke10 = data['smoke10'],powder1 = data['powder1'], powder2 = data['powder2'],
powder3 = data['powder3'],biology1 = data['biology1'], biology2 = data['biology2'],
hAir1 = data['hAir1'],hAir2 = data['hAir2'], gm1 = data['gm1'], gm2 = data['gm2'],
drink1 = data['drink1'], drink2 = data['drink2'], drink3 = data['drink3'],
drink4 = data['drink4'], lung1 = data['lung1'], lung2 = data['lung2'],lung3 = data['lung3'],
lung4 = data['lung4'], lung5 = data['lung5'],lung6 = data['lung6'], lung7 = data['lung7'],
cure1 = data['cure1'],cure2 = data['cure2'], cure3 = data['cure3'], cure4 = data['cure4'],
cure5 = data['cure5'], cure6 = data['cure6'], cure7 = data['cure7'],cure8 = data['cure8'],
cure9 = data['cure9'], cure10 = data['cure10'],cure11 = data['cure11'], cure12 = data['cure12'],
cure13 = data['cure13'],cure14 = data['cure14'], cure15 = data['cure15'],
cure16 = data['cure16'],cure17 = data['cure17'], cure18 = data['cure18'],
cure19 = data['cure19'],cure20 = data['cure20'], cure21 = data['cure21'],
cure22 = data['cure22'],cure23 = data['cure23'], cure24 = data['cure24'],
cure25 = data['cure25'],cure26 = data['cure26'], comp1 = data['comp1'], comp2 = data['comp2'],
comp3 = data['comp3'], comp4 = data['comp4'], comp5 = data['comp5'],comp6 = data['comp6'])
newObj.save()
return True
except:
return False
#添加问卷信息
def addQuestionnaireInfo(type,data):
try:
if type == 0:
newObj = ESS(P_id = data['P_id'], type = data['type'], S_id = data['S_id'], ess4 = data['ess4'],
ess5 = data['ess5'], ess6 = data['ess6'], ess7 = data['ess7'], ess8 = data['ess8'],
score = data['score'])
newObj.save()
elif type == 1:
newObj = MBQ(P_id = data['P_id'], type = data['type'], S_id = data['S_id'], q4 = data['q4'],
q5 = data['q5'], q6 = data['q6'], q7 = data['q7'], q8 = data['q8'], q9 = data['q9'],
q10 = data['q10'], BMI = data['BMI'])
newObj.save()
elif type == 2:
newObj = SGRO(P_id = data['P_id'], type = data['type'], S_id = data['S_id'], q4 = data['q4'],
q5 = data['q5'], q6 = data['q6'], q7 = data['q7'], q8 = data['q8'], q9 = data['q9'],
q10 = data['q10'], BMI = data['BMI'])
newObj.save()
else:
return False
return True
except:
return False
#添加附件信息
def addAttachInfo(data):
try:
newObj = AttachInfo(P_id = data['P_id'], type = data['type'], S_id = data['S_id'],D_id = data['D_id'],
name = data['name'], information = data['information'],dir = data['dir'])
#TODO
# img context没有加
newObj.save()
return True
except:
return False
#添加附件信息
def addAccessoryExamination(data):
try:
newObj = AccessoryExamination(S_id = data['S_id'], type = data['type'], date = data['date'],
AE_type = data['AE_type'], name = data['name'],
description = data['description'], D_id = data['D_id'])
newObj.save()
return True
except:
return False | [
"1021369745@qq.com"
] | 1021369745@qq.com |
eba364f9af767f3702b519b7192b96c2b9890d8d | cc08f8eb47ef92839ba1cc0d04a7f6be6c06bd45 | /Personal/Developent/advance-django-blog-master/venv/bin/coverage | 49a1df365828a4beab01a74ad814ac7cc6b66a9d | [
"Apache-2.0"
] | permissive | ProsenjitKumar/PycharmProjects | d90d0e7c2f4adc84e861c12a3fcb9174f15cde17 | 285692394581441ce7b706afa3b7af9e995f1c55 | refs/heads/master | 2022-12-13T01:09:55.408985 | 2019-05-08T02:21:47 | 2019-05-08T02:21:47 | 181,052,978 | 1 | 1 | null | 2022-12-08T02:31:17 | 2019-04-12T17:21:59 | null | UTF-8 | Python | false | false | 281 | #!/root/PycharmProjects/Developent/advance-django-blog-master/venv/bin/python3.7
# -*- coding: utf-8 -*-
import re
import sys
from coverage.cmdline import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"prosenjitearnkuar@gmail.com"
] | prosenjitearnkuar@gmail.com | |
1e1ab5399a265c7f7c0489fb511803501de552fd | 3f556df088c1f074a8c5d4eaf2b45dcbcd8c91a2 | /thrift/lib/py/server/TServer.py | aca37957e65a349cfce8d045348688d4b8870398 | [
"Apache-2.0"
] | permissive | rezacute/fbthrift | cc622c408dde9cf273cb1cfef4179678d7673ac6 | 7ea5e86a95eea80805bf99eb88371dbcb32f96e1 | refs/heads/master | 2020-04-28T07:54:07.094567 | 2014-11-19T21:18:53 | 2014-11-19T21:18:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,000 | py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import logging
import sys
import os
import traceback
import threading
if sys.version_info[0] >= 3:
import queue
Queue = queue
else:
import Queue
import warnings
from thrift.Thrift import TProcessor, TApplicationException
from thrift.transport import TTransport
from thrift.protocol import TBinaryProtocol
from thrift.protocol.THeaderProtocol import THeaderProtocolFactory
class TConnectionContext:
def getPeerName(self):
"""Gets the address of the client.
Returns:
The equivalent value of socket.getpeername() on the client socket
"""
raise NotImplementedError
class TRpcConnectionContext(TConnectionContext):
"""Connection context class for thrift RPC calls"""
def __init__(self, client_socket, iprot=None, oprot=None):
"""Initializer.
Arguments:
client_socket: the TSocket to the client
"""
self._client_socket = client_socket
self.iprot = iprot
self.oprot = oprot
def setProtocols(self, iprot, oprot):
self.iprot = iprot
self.oprot = oprot
def getPeerName(self):
"""Gets the address of the client.
Returns:
Same value as socket.peername() for the TSocket
"""
return self._client_socket.getPeerName()
class TServerEventHandler:
"""Event handler base class.
Override selected methods on this class to implement custom event handling
"""
def preServe(self, address):
"""Called before the server begins.
Arguments:
address: the address that the server is listening on
"""
pass
def newConnection(self, context):
"""Called when a client has connected and is about to begin processing.
Arguments:
context: instance of TRpcConnectionContext
"""
pass
def clientBegin(self, iprot, oprot):
"""Deprecated: Called when a new connection is made to the server.
For all servers other than TNonblockingServer, this function is called
whenever newConnection is called and vice versa. This is the old-style
for event handling and is not supported for TNonblockingServer. New
code should always use the newConnection method.
"""
pass
def connectionDestroyed(self, context):
"""Called when a client has finished request-handling.
Arguments:
context: instance of TRpcConnectionContext
"""
pass
class TServer:
"""Base interface for a server, which must have a serve method."""
""" constructors for all servers:
1) (processor, serverTransport)
2) (processor, serverTransport, transportFactory, protocolFactory)
3) (processor, serverTransport,
inputTransportFactory, outputTransportFactory,
inputProtocolFactory, outputProtocolFactory)
Optionally, the handler can be passed instead of the processor,
and a processor will be created automatically:
4) (handler, serverTransport)
5) (handler, serverTransport, transportFacotry, protocolFactory)
6) (handler, serverTransport,
inputTransportFactory, outputTransportFactory,
inputProtocolFactory, outputProtocolFactory)
The attribute serverEventHandler (default: None) receives
callbacks for various events in the server lifecycle. It should
be set to an instance of TServerEventHandler.
"""
def __init__(self, *args):
if (len(args) == 2):
self.__initArgs__(args[0], args[1],
TTransport.TTransportFactoryBase(),
TTransport.TTransportFactoryBase(),
TBinaryProtocol.TBinaryProtocolFactory(),
TBinaryProtocol.TBinaryProtocolFactory())
elif (len(args) == 4):
self.__initArgs__(args[0], args[1], args[2], args[2], args[3],
args[3])
elif (len(args) == 6):
self.__initArgs__(args[0], args[1], args[2], args[3], args[4],
args[5])
def __initArgs__(self, processor, serverTransport,
inputTransportFactory, outputTransportFactory,
inputProtocolFactory, outputProtocolFactory):
self.processor = self._getProcessor(processor)
self.serverTransport = serverTransport
self.inputTransportFactory = inputTransportFactory
self.outputTransportFactory = outputTransportFactory
self.inputProtocolFactory = inputProtocolFactory
self.outputProtocolFactory = outputProtocolFactory
self.serverEventHandler = TServerEventHandler()
def _getProcessor(self, processor):
""" Check if a processor is really a processor, or if it is a handler
auto create a processor for it """
if isinstance(processor, TProcessor):
return processor
elif hasattr(processor, "_processor_type"):
handler = processor
return handler._processor_type(handler)
else:
raise TApplicationException(
message="Could not detect processor type")
def setServerEventHandler(self, handler):
self.serverEventHandler = handler
def _clientBegin(self, context, iprot, oprot):
self.serverEventHandler.newConnection(context)
self.serverEventHandler.clientBegin(iprot, oprot)
def handle(self, client):
itrans = self.inputTransportFactory.getTransport(client)
otrans = self.outputTransportFactory.getTransport(client)
iprot = self.inputProtocolFactory.getProtocol(itrans)
if isinstance(self.inputProtocolFactory, THeaderProtocolFactory):
oprot = iprot
else:
oprot = self.outputProtocolFactory.getProtocol(otrans)
context = TRpcConnectionContext(client, iprot, oprot)
self._clientBegin(context, iprot, oprot)
try:
while True:
self.processor.process(iprot, oprot, context)
except TTransport.TTransportException as tx:
pass
except Exception as x:
logging.exception(x)
self.serverEventHandler.connectionDestroyed(context)
itrans.close()
otrans.close()
def serve(self):
pass
class TSimpleServer(TServer):
"""Simple single-threaded server that just pumps around one transport."""
def __init__(self, *args):
warnings.warn("TSimpleServer is deprecated. Please use one of "
"Nonblocking, Twisted, or Gevent server instead.",
DeprecationWarning)
TServer.__init__(self, *args)
def serve(self):
self.serverTransport.listen()
for name in self.serverTransport.getSocketNames():
self.serverEventHandler.preServe(name)
while True:
client = self.serverTransport.accept()
self.handle(client)
class TThreadedServer(TServer):
"""Threaded server that spawns a new thread per each connection."""
def __init__(self, *args, **kwargs):
TServer.__init__(self, *args)
self.daemon = kwargs.get("daemon", False)
def serve(self):
self.serverTransport.listen()
for name in self.serverTransport.getSocketNames():
self.serverEventHandler.preServe(name)
while True:
try:
client = self.serverTransport.accept()
t = threading.Thread(target=self.handle, args=(client,))
t.daemon = self.daemon
t.start()
except KeyboardInterrupt:
raise
except Exception as x:
logging.exception(x)
class TThreadPoolServer(TServer):
"""Server with a fixed size pool of threads which service requests."""
def __init__(self, *args, **kwargs):
warnings.warn("TThreadPoolServer is deprecated. Please use one of "
"Nonblocking, Twisted, or Gevent server instead.",
DeprecationWarning)
TServer.__init__(self, *args)
queue_size = kwargs.get("queueSize", 0)
self.clients = Queue.Queue(queue_size)
self.threads = 10
self.daemon = kwargs.get("daemon", False)
self.timeout = kwargs.get("timeout", None)
def setNumThreads(self, num):
"""Set the number of worker threads that should be created"""
self.threads = num
def serveThread(self):
"""
Loop around getting clients from the shared queue and process them.
"""
while True:
try:
client = self.clients.get()
if self.timeout:
client.setTimeout(self.timeout)
self.handle(client)
except Exception as x:
logging.exception(x)
def serve(self):
"""
Start a fixed number of worker threads and put client into a queue
"""
for i in range(self.threads):
try:
t = threading.Thread(target=self.serveThread)
t.daemon = self.daemon
t.start()
except Exception as x:
logging.exception(x)
# Pump the socket for clients
self.serverTransport.listen()
for name in self.serverTransport.getSocketNames():
self.serverEventHandler.preServe(name)
while True:
client = None
try:
client = self.serverTransport.accept()
self.clients.put(client)
except Exception as x:
logging.exception(x)
if client:
itrans = self.inputTransportFactory.getTransport(client)
otrans = self.outputTransportFactory.getTransport(client)
itrans.close()
otrans.close()
class TForkingServer(TServer):
"""A Thrift server that forks a new process for each request"""
"""
This is more scalable than the threaded server as it does not cause
GIL contention.
Note that this has different semantics from the threading server.
Specifically, updates to shared variables will no longer be shared.
It will also not work on windows.
This code is heavily inspired by SocketServer.ForkingMixIn in the
Python stdlib.
"""
def __init__(self, *args):
TServer.__init__(self, *args)
self.children = []
def serve(self):
def tryClose(file):
try:
file.close()
except IOError as e:
logging.warning(e, exc_info=True)
self.serverTransport.listen()
for name in self.serverTransport.getSocketNames():
self.serverEventHandler.preServe(name)
while True:
client = self.serverTransport.accept()
try:
itrans = self.inputTransportFactory.getTransport(client)
otrans = self.outputTransportFactory.getTransport(client)
iprot = self.inputProtocolFactory.getProtocol(itrans)
if isinstance(self.inputProtocolFactory,
THeaderProtocolFactory):
oprot = iprot
else:
oprot = self.outputProtocolFactory.getProtocol(otrans)
context = TRpcConnectionContext(client, iprot, oprot)
self._clientBegin(context, iprot, oprot)
pid = os.fork()
if pid: # parent
# add before collect, otherwise you race w/ waitpid
self.children.append(pid)
self._collectChildren()
# Parent must close socket or the connection may not get
# closed promptly
tryClose(itrans)
tryClose(otrans)
else:
ecode = 0
try:
try:
while True:
self.processor.process(iprot, oprot, context)
except TTransport.TTransportException as tx:
pass
except Exception as e:
logging.exception(e)
ecode = 1
finally:
self.serverEventHandler.connectionDestroyed(context)
tryClose(itrans)
tryClose(otrans)
os._exit(ecode)
except TTransport.TTransportException as tx:
pass
except Exception as x:
logging.exception(x)
def _collectChildren(self):
while self.children:
try:
pid, status = os.waitpid(0, os.WNOHANG)
except os.error:
pid = None
if pid:
self.children.remove(pid)
else:
break
| [
"davejwatson@fb.com"
] | davejwatson@fb.com |
323e6dbcabc39627ea900df0e1e0063631c79908 | 9222f00f40ae22ec71a61cd6627b11ec89958c5b | /v0_2017/lib/common.py | 386dee3834e99a3849fe778b1623f1ff1c74da46 | [] | no_license | ebensh/pinball_cv | 635d051b296dcdfb6c7c001709ef30a710f96549 | e2e67fd3f7e33f9583a46f2a63130c1cec791315 | refs/heads/master | 2021-07-15T16:35:07.878569 | 2018-12-27T05:03:57 | 2018-12-27T05:03:57 | 106,141,146 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,640 | py | from collections import namedtuple
import cv2
import inspect
import json
import matplotlib.pyplot as plt
import numpy as np
def eprint(*args, **kwargs):
print(*args, file=sys.stderr, **kwargs)
def numpy_print_cols(cols=160):
np.core.arrayprint._line_width = cols
# http://ipython-books.github.io/featured-01/
def get_data_base(arr):
"""For a given Numpy array, finds the
base array that "owns" the actual data."""
base = arr
while isinstance(base.base, np.ndarray):
base = base.base
return base
def arrays_share_data(x, y): return get_data_base(x) is get_data_base(y)
def print_by_channel(img):
rows, cols, channels = img.shape
for channel in xrange(channels):
print(img[:,:,channel])
def display_image(img, title=None, show=True):
if show:
if title is None:
# Get the caller's line number so we can identify which point in the
# process we're at without uniquely naming each one.
frame, filename, line_num, function_name, lines, index = inspect.stack()[1]
title = "{0}:{1}".format(filename, line_num)
cv2.imshow(title, img)
# This is *INEFFICIENT* and is only intended for quick experimentation.
# http://blog.hackerearth.com/descriptive-statistics-with-Python-NumPy
# TODO(ebensh): Add a wrapper class around the named tuple.
#NamedStatistics = namedtuple('NamedStatistics', ['minimum', 'maximum', 'ptp', 'mean'])
NamedStatistics = namedtuple('NamedStatistics', ['mean'])
def get_named_statistics(planes):
#minimum = np.amin(planes, axis=0)
#maximum = np.amax(planes, axis=0)
return NamedStatistics(
#minimum=minimum,
#maximum=maximum,
#ptp=maximum - minimum,
mean=np.mean(planes, axis=0, dtype=np.float64).astype(np.uint8))
#median=cv2.convertScaleAbs(np.median(frames, axis=0)),
#variance=cv2.convertScaleAbs(np.var(frames, axis=0, dtype=np.float64)))
def print_statistics(statistics, printer):
for field in statistics._fields:
printer.add_image(getattr(statistics, field), field)
class FrameBuffer(object):
def __init__(self, num_frames=1, shape=(640, 480, 3), dtype=np.uint8):
# Create our frame buffers. We don't store them together because while it
# would make the rolling easier it would also require the gray version to
# be stored with three channels.
self._BUFFER_LENGTH = 2 * num_frames # Left here in case we want to increase.
self._num_frames = num_frames
self._idx = 0
self._shape = shape
self._frames = np.zeros((self._BUFFER_LENGTH,) + shape, dtype=dtype)
self._frames_gray = np.zeros((self._BUFFER_LENGTH,) + shape[0:2], dtype=dtype)
def append(self, frame):
idx_to_insert = (self._idx + self._num_frames) % self._BUFFER_LENGTH
self._frames[idx_to_insert] = frame
self._frames_gray[idx_to_insert] = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
self._idx = (self._idx + 1) % self._BUFFER_LENGTH
def get_view(self, start, stop, color=True):
view = None
if start is None: start = 0
if stop is None: stop = self._num_frames
start += self._idx
stop += self._idx
if color:
view = self._frames.take(range(start, stop), axis=0, mode='wrap').view()
else:
view = self._frames_gray.take(range(start, stop), axis=0, mode='wrap').view()
view.setflags(write=False)
return view
def get_shape(self, color=True):
if color: return self._shape
return self._shape[0:2]
# Useful for debugging.
def get_buffers(self):
return cv2.hconcat(self._frames), cv2.hconcat(self._frames_gray)
class FramePrinter(object):
def __init__(self):
self._images = []
def add_image(self, img, caption):
if len(img.shape) < 3 or img.shape[2] != 3:
img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
self._images.append((img, caption))
def get_combined_image(self):
font = cv2.FONT_HERSHEY_SIMPLEX
space = 10 # pixels between images
max_rows = 0
total_cols = 0
for img, _ in self._images:
shape = img.shape
rows, cols = shape[0], shape[1]
max_rows = max(max_rows, rows)
total_cols += cols
total_cols += (len(self._images) - 1) * space
combined_image = np.zeros((rows, total_cols, 3), dtype=np.uint8)
current_col = 0
for img, caption in self._images:
shape = img.shape
rows, cols = shape[0], shape[1]
combined_image[0:rows, current_col:current_col+cols] = img
cv2.putText(combined_image, caption, (current_col, rows), font,
1, (255,255,255), 2, cv2.LINE_AA)
current_col += cols + space
return combined_image
def get_region_as_mask(rows, cols, region):
mask = np.zeros((rows, cols), dtype=np.uint8)
cv2.fillConvexPoly(mask, region, 255)
return mask
def get_perspective_transform(rows, cols, region):
corners = np.array([
(0, 0), # top left
(cols-1, 0), # top right
(cols-1, rows-1), # bottom right
(0, rows-1)], dtype=np.float32) # bottom left
return cv2.getPerspectiveTransform(region[:-1].astype(np.float32), corners)
# IMPORTANT!!! Subtraction will WRAP with uint8 if it goes negative!
def trim_to_uint8(arr): return np.clip(arr, 0, 255).astype(np.uint8)
def extrapolate(xy1, xy2):
x1, y1 = xy1
x2, y2 = xy2
vx = x2 - x1
vy = y2 - y1
return (x2 + vx, y2 + vy)
def lerp(xy1, xy2):
x1, y1 = xy1
x2, y2 = xy2
return ((x1 + x2) / 2, (y1 + y2) / 2)
def dist(xy1, xy2):
x1, y1 = xy1
x2, y2 = xy2
return (x2 - x1)**2 + (y2 - y1)**2
def in_bounds(rows, cols, xy):
x, y = xy
return (x >= 0 and x < cols and y >= 0 and y < rows)
# https://matplotlib.org/users/image_tutorial.html
# http://jakevdp.github.io/mpl_tutorial/tutorial_pages/tut2.html
def p_gray(*args, path=None):
imgs = list(args)
#plt.figure(figsize=(20,10))
fig, axs = plt.subplots(1, len(imgs), squeeze=False)
fig.set_size_inches(20, 10)
for img, ax in zip(imgs, axs[0]):
ax.imshow(img, cmap = 'gray')
if path: plt.savefig(path, bbox_inches='tight')
plt.show()
def p_bgr(img, path=None):
plt.figure(figsize=(20,10))
plt.imshow(img[:,:,::-1])
if path: plt.savefig(path, bbox_inches='tight')
plt.show()
def p_heat(img, path=None):
plt.figure(figsize=(20,10))
plt.imshow(1.0 * img / img.max(), cmap='inferno', interpolation='nearest')
if path: plt.savefig(path, bbox_inches='tight')
plt.show()
def p_histogram(img, path=None):
plt.figure(figsize=(6, 3))
plt.hist(img, bins=32)
if path: plt.savefig(path, bbox_inches='tight')
plt.show()
def load_json_keypoints_as_dict(path):
with open(path, 'r') as keypoints_file:
frame_to_keypoints_str = json.load(keypoints_file)
frame_to_keypoints = {}
for frame_index_str, keypoints_str in frame_to_keypoints_str.items():
frame_to_keypoints[int(frame_index_str)] = [
[int(round(x)), int(round(y)), int(round(size))]
for x, y, size in keypoints_str]
assert set(frame_to_keypoints.keys()) == set(range(len(frame_to_keypoints)))
return frame_to_keypoints
def load_json_keypoints_as_list(path):
# The dict is guaranteed to be dense, but potentially out of order.
# Here we sort them and return as a list of lists.
keypoints_dict = load_json_keypoints_as_dict(path)
return [keypoints_dict[frame_ix] for frame_ix in sorted(keypoints_dict.keys())]
def get_all_frames_from_video(path):
cap = cv2.VideoCapture(path)
video_frames = []
while cap.isOpened():
grabbed, raw_frame = cap.read()
if not grabbed: break
video_frames.append(raw_frame)
cap.release()
return np.array(video_frames)
def keypoints_to_mask(rows, cols, keypoints, fixed_radius=None, thickness=-1):
mask = np.zeros([rows, cols], np.uint8)
for x, y, size in keypoints:
if fixed_radius: size = fixed_radius
if size == 1: mask[y, x] = 255
else: cv2.circle(mask, (x, y), size, color=255, thickness=thickness)
return mask
def get_all_keypoint_masks(rows, cols, frame_to_keypoints_list, fixed_radius=None, thickness=-1):
video_masks = []
for keypoints in frame_to_keypoints_list:
video_masks.append(keypoints_to_mask(rows, cols, keypoints, fixed_radius,
thickness))
return np.array(video_masks)
def hconcat_ndarray(imgs):
num_imgs, rows, cols = imgs.shape[:3]
return imgs.swapaxes(0, 1).reshape([rows, num_imgs * cols])
def convert_bgr_planes_to_gray(planes):
plns, rows, cols, chs = planes.shape
flattened = planes.reshape((plns * rows, cols, chs))
flattened_gray = cv2.cvtColor(flattened, cv2.COLOR_BGR2GRAY)
return flattened_gray.reshape((plns, rows, cols))
def add_bgr_and_gray(img_color, img_gray):
return cv2.add(img_color, cv2.cvtColor(img_gray, cv2.COLOR_GRAY2BGR))
| [
"ebensh@gmail.com"
] | ebensh@gmail.com |
58015f5e17eecc12024afa29b2a1038ae3cc9c8a | 843c06cb6838d3c79efc91003b633866b2b9d839 | /Kaggle_SVHN_Final/codes/test_CNN.py | ff27214f48a0be1ada24748d7583c240a5aeb9bf | [] | no_license | semper21/StatisticalML | 901fd36b5497ca73e145a67807b0b4bc694bc9ff | e8d1e80430babf924f75397a2986f0487a207c9d | refs/heads/master | 2020-06-01T13:44:12.965501 | 2019-06-07T20:27:26 | 2019-06-07T20:27:26 | 190,798,666 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,279 | py | #! /home/vision/anaconda2/bin/python
'''
Created on Apr 12, 2017
@author: ywkim
- ReLU neurons
- stride = 1
- zero padded
- max pooling over 2x2 blocks
- conv1: 32 filters (5x5)
- conv2: 64 filters (5x5)
'''
import csv
from csv import reader
from sys import argv
import numpy as np
import tensorflow as tf
import cv2
import matplotlib.pyplot as plt
# Load CSV file
def load_csv(filename):
dataset = []
labelset = []
with open(filename, 'r') as file:
csv_reader = reader(file)
next(csv_reader, None) #skip header
for row in csv_reader:
if not row:
continue
if row[0] == '10':
label = 0
else:
#print row[0]
label = int(row[0])
data = row[1:]
data = map(lambda x: float(x), data)
dataset.append(data)
labelset.append(label)
X = np.array(dataset)
X_train = X.reshape(32, 32, 3, -1).transpose(3,0,1,2)
Y_train = np.array(labelset)
return X_train, Y_train
def load_csv_test(filename):
dataset = []
with open(filename, 'r') as file:
csv_reader = reader(file)
next(csv_reader, None) #skip header
for row in csv_reader:
if not row:
continue
row = map(lambda x: float(x), row)
dataset.append(row)
X = np.array(dataset)
X_test = X.reshape(32, 32, 3, -1).transpose(3,0,1,2)
#X_test = np.vsplit(X_test, 8)
#split into 8 groups and return
return X_test
def convert2grayscale(numSample, X):
#convert rgb->grayscale
X_new = np.zeros((numSample, 32, 32))
for i in range(numSample):
image = X[i]
gray = np.zeros((image.shape[0], image.shape[1]))
for rownum in range(len(image)):
for colnum in range(len(image[rownum])):
gray[rownum][colnum] = weightedAverage(image[rownum][colnum])
#X[i] = np.array([gray]*3).reshape(32,32,3)
X_new[i] = gray
return X_new
def grayscale(numSample, X):
X_new = np.zeros((numSample, 32, 32))
for i in range(numSample):
img = X[i]
img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
X_new[i] = img
return X_new
def rgb2YUV(rgb):
rgb2yuv = np.array([[0.299, 0.587, 0.114],
[-0.14713, -0.28886, 0.436],
[0.615, -0.51499, -0.10001]])
return np.dot(rgb[...,:3], rgb2yuv.T)
def equalize(X):
X_new = np.ndarray((X.shape[0], 32, 32), dtype=np.uint8)
X = (X).astype(np.uint8)
for i, img in enumerate(X):
img = cv2.equalizeHist(img)
X_new[i] = img
X_new = (X_new).astype(np.float64)
return X_new
def visualize(X):
# Visualize some examples from the dataset.
# We show a few examples of training images from each class.
plt.figure(figsize=(20,2))
for i in range(10):
print X[i]
plt.subplot(1, 10, i+1)
plt.imshow(X[i].astype('uint8'))
#plt.savefig('train_first10.png')
plt.show()
def normalize(X_train, X_val, X_test):
# Preprocessing: reshape the image data into rows
original_Xtrain = X_train.shape
original_Xval = X_val.shape
original_Xtest = X_test.shape
X_train = np.reshape(X_train, (X_train.shape[0], -1))
X_val = np.reshape(X_val, (X_val.shape[0], -1))
X_test = np.reshape(X_test, (X_test.shape[0], -1))
# As a sanity check, print out the shapes of the data
print 'Training data shape: ', X_train.shape
print 'Validation data shape: ', X_val.shape
print 'Test data shape: ', X_test.shape
# Preprocessing: subtract the mean image
# first: compute the image mean based on the training data
mean_image = np.mean(X_train, axis=0)
std_image = np.std(X_train, axis=0)
# second: subtract the mean image from train and test data
X_train -= mean_image
X_val -= mean_image
X_test -= mean_image
# third: divide by std
X_train = X_train/std_image
X_val = X_val/std_image
X_test = X_test/std_image
X_train = X_train.reshape(original_Xtrain[0], original_Xtrain[1], original_Xtrain[2])
X_val = X_val.reshape(original_Xval[0], original_Xval[1], original_Xval[2])
X_test = X_test.reshape(original_Xtest[0], original_Xtest[1], original_Xtest[2])
return X_train, X_val, X_test
def normalize2(X_train, X_val, X_test):
# Preprocessing: reshape the image data into rows
original_Xtrain = X_train.shape
original_Xval = X_val.shape
original_Xtest = X_test.shape
X_train = np.reshape(X_train, (X_train.shape[0], -1))
X_val = np.reshape(X_val, (X_val.shape[0], -1))
X_test = np.reshape(X_test, (X_test.shape[0], -1))
# As a sanity check, print out the shapes of the data
print 'Training data shape: ', X_train.shape
print 'Validation data shape: ', X_val.shape
print 'Test data shape: ', X_test.shape
# Preprocessing: subtract the mean image
# first: compute the image mean based on the training data
mean_imageTrain = np.mean(X_train, axis=1, keepdims = True)
std_imageTrain = np.std(X_train, axis=1, keepdims = True)
mean_imageVal = np.mean(X_val, axis=1, keepdims = True)
std_imageVal = np.std(X_val, axis=1, keepdims = True)
mean_imageTest = np.mean(X_test, axis=1, keepdims = True)
std_imageTest = np.std(X_test, axis=1, keepdims = True)
# second: subtract the mean image from train and test data
X_train -= mean_imageTrain
X_val -= mean_imageVal
X_test -= mean_imageTest
# third: divide by std
X_train = X_train/std_imageTrain
X_val = X_val/std_imageVal
X_test = X_test/std_imageTest
X_train = X_train.reshape(original_Xtrain[0], original_Xtrain[1], original_Xtrain[2])
X_val = X_val.reshape(original_Xval[0], original_Xval[1], original_Xval[2])
X_test = X_test.reshape(original_Xtest[0], original_Xtest[1], original_Xtest[2])
return X_train, X_val, X_test
def normalize3(X_train, X_val, X_test):
# Preprocessing: reshape the image data into rows
original_Xtrain = X_train.shape
original_Xval = X_val.shape
original_Xtest = X_test.shape
X_train = np.reshape(X_train, (X_train.shape[0], -1))
X_val = np.reshape(X_val, (X_val.shape[0], -1))
X_test = np.reshape(X_test, (X_test.shape[0], -1))
# As a sanity check, print out the shapes of the data
print 'Training data shape: ', X_train.shape
print 'Validation data shape: ', X_val.shape
print 'Test data shape: ', X_test.shape
# Preprocessing: subtract the mean image
# first: compute the image mean based on the training data
min_Train = np.min(X_train, axis=1, keepdims = True)
max_Train = np.max(X_train, axis=1, keepdims = True)
range_Train = max_Train - min_Train
min_Val = np.min(X_val, axis=1, keepdims = True)
max_Val = np.max(X_val, axis=1, keepdims = True)
range_Val = max_Val - min_Val
min_Test = np.min(X_test, axis=1, keepdims = True)
max_Test = np.max(X_test, axis=1, keepdims = True)
range_Test = max_Test - min_Test
# second: subtract the mean image from train and test data
X_train -= min_Train
X_val -= min_Val
X_test -= min_Test
# third: divide by std
X_train = X_train/range_Train
X_val = X_val/range_Val
X_test = X_test/range_Test
X_train = X_train.reshape(original_Xtrain[0], original_Xtrain[1], original_Xtrain[2])
X_val = X_val.reshape(original_Xval[0], original_Xval[1], original_Xval[2])
X_test = X_test.reshape(original_Xtest[0], original_Xtest[1], original_Xtest[2])
return X_train, X_val, X_test
def subsample(num_training,num_validation,X_train,y_train):
# Our validation set will be num_validation points from the original
# training set.
mask = range(num_training, num_training + num_validation)
X_val = X_train[mask]
Y_val = y_train[mask]
# Our training set will be the first num_train points from the original
# training set.
mask = range(num_training)
X_train = X_train[mask]
Y_train = y_train[mask]
return X_train, Y_train, X_val, Y_val
def weightedAverage(pixel):
return 0.299*pixel[0] + 0.589*pixel[1] + 0.114*pixel[2]
def weight_variable(shape):
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial)
def bias_variable(shape):
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial)
def conv2d(x, W):
return tf.nn.conv2d(x, W, strides=[1,1,1,1], padding = 'SAME')
def max_pool_2x2(x):
return tf.nn.max_pool(x, ksize=[1,2,2,1], strides=[1,2,2,1], padding = 'SAME')
def next_batch(arrayX, arrayY, sampSize):
assert len(arrayX) == len(arrayY)
shuffledX = np.empty((sampSize, 32, 32), dtype=arrayX.dtype)
shuffledY = np.empty((sampSize, 10), dtype=arrayY.dtype)
p = np.random.choice(len(X_train), sampSize, replace = False)
for i in range(sampSize):
shuffledX[i] = arrayX[p[i]]
shuffledY[i] = arrayY[p[i]]
return shuffledX, shuffledY
def dense_to_one_hot(labels_dense, num_classes=10):
"""Convert class labels from scalars to one-hot vectors."""
num_labels = labels_dense.shape[0]
index_offset = np.arange(num_labels) * num_classes
labels_one_hot = np.zeros((num_labels, num_classes))
labels_one_hot.flat[index_offset + labels_dense.ravel()] = 1
return labels_one_hot
if __name__ == '__main__':
#trainfile = argv[1]
#testfile = argv[2]
trainfile = '/home/vision/Documents/Kaggle_SVHN/train.csv'
testfile= '/home/vision/Documents/Kaggle_SVHN/test.csv'
X_train, Y_train = load_csv(trainfile)
X_test = load_csv_test(testfile)
print '---finished loading data---'
'''
X_train = rgb2YUV(X_train)
X_test = rgb2YUV(X_test)
'''
print 'shape before conversion: ', X_train.shape
X_train = convert2grayscale(73257, X_train)
print 'shape after conversion: ', X_train.shape
X_test = convert2grayscale(26032, X_test)
visualize(X_train)
#X_train = equalize(X_train)
#X_test = equalize(X_test)
X_train, Y_train, X_val, Y_val = subsample(55257, 18000, X_train, Y_train)
X_train, X_val, X_test = normalize3(X_train, X_val, X_test)
print X_train[0]
print '---finished preprocessing---'
##########################################
# Training data shape: (66257, 32, 32) #
# Validation data shape: (7000, 32, 32) #
# Test data shape: (26032, 1024) #
##########################################
sess = tf.InteractiveSession()
#implementing TF
x = tf.placeholder(tf.float32, shape=[None, 32, 32]) #grayscaled, so (32x32x1)
y_ = tf.placeholder(tf.float32, shape=[None, 10]) #[None, 10] for one-hot 10-dimensional vectors
#first layer (conv + max)
W_conv1 = weight_variable([5, 5, 1, 32]) #change 32 to desired # of filters
b_conv1 = bias_variable([32]) #change 32 to desired # of filters
x_image = tf.reshape(x, [-1, 32, 32, 1]) #1 = # of color channels
#convolve x_image with the weight tensor, add the bias, apply the ReLU function
h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)
#max_pool_2x2 method will reduce the image size to 16x16.
h_pool1 = max_pool_2x2(h_conv1)
#second layer (64 features for each 5x5 patch -> image size will be reduced to 8x8)
W_conv2 = weight_variable([3, 3, 32, 64])
b_conv2 = bias_variable([64])
h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)
h_pool2 = max_pool_2x2(h_conv2)
'''
#third layer
W_conv3 = weight_variable([3, 3, 64, 128])
b_conv3 = bias_variable([128])
h_conv3 = tf.nn.relu(conv2d(h_pool2, W_conv3) + b_conv3)
h_pool3 = max_pool_2x2(h_conv3)
#fourth layer
W_conv4 = weight_variable([3, 3, 128, 256])
b_conv4 = bias_variable([256])
h_conv4 = tf.nn.relu(conv2d(h_pool3, W_conv4) + b_conv4)
h_pool4 = max_pool_2x2(h_conv4)
'''
#fully-connected layer with 4096 neurons
W_fc1 = weight_variable([8 * 8 * 64, 1024]) #might be better with 500
b_fc1 = bias_variable([1024])
#reshape the tensor from the pooling layer into a batch of vectors
h_pool2_flat = tf.reshape(h_pool2, [-1, 8 * 8 * 64])
#multiply by a weight matrix, add a bias, and apply a ReLU
h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)
'''
#fully-connected layer with 4096 neurons
W_fc2 = weight_variable([4096, 4096])
b_fc2 = bias_variable([4096])
#multiply by a weight matrix, add a bias, and apply a ReLU
h_fc2 = tf.nn.relu(tf.matmul(h_fc1, W_fc2) + b_fc2)
#fully-connected layer with 1000 neurons
W_fc3 = weight_variable([4096, 1000])
b_fc3 = bias_variable([1000])
#multiply by a weight matrix, add a bias, and apply a ReLU
h_fc3 = tf.nn.relu(tf.matmul(h_fc2, W_fc3) + b_fc3)
'''
#dropout
keep_prob = tf.placeholder(tf.float32)
h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)
#softmax (readout layer)
W_fc4 = weight_variable([1024, 10]) #why 10?
b_fc4 = bias_variable([10]) #why 10?
y_conv = tf.matmul(h_fc1_drop, W_fc4) + b_fc4
#Train and Evaluate the Model
cross_entropy = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y_conv))
train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)
correct_prediction = tf.equal(tf.argmax(y_conv,1), tf.argmax(y_,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
predict = tf.argmax(y_conv, 1)
sess.run(tf.global_variables_initializer())
Y_train = dense_to_one_hot(Y_train, 10)
Y_val = dense_to_one_hot(Y_val, 10)
for i in range(20000):
batchX, batchY = next_batch(X_train, Y_train, 100)
#batchY = tf.one_hot(batchY, 10)
if i%100 == 0:
train_accuracy = accuracy.eval(feed_dict={
x:batchX, y_: batchY, keep_prob: 1.0})
print("step %d, training accuracy %g"%(i, train_accuracy))
train_step.run(feed_dict={x: batchX, y_: batchY, keep_prob: 1.0})
print("Validation accuracy %g"%accuracy.eval(feed_dict={
x: X_val, y_: Y_val, keep_prob: 1.0}))
saver = tf.train.Saver()
saver.save(sess, 'CNN_twoLayer')
#predict
y_pred = []
batchsize=100
for i in range(0, len(X_test), batchsize):
X_batch = X_test[i:i+batchsize]
pred = predict.eval(feed_dict={x: X_batch, keep_prob: 1.0})
y_pred += list(pred)
#print y_pred
outputFile = 'pred_CNN3Layer_5x32n3x64n1024nfc_withEqualizer_withDiffNorm'
with open(outputFile, 'w') as f:
writer = csv.writer(f, delimiter=',')
writer.writerow(['ImageId','label'])
for l in range(len(y_pred)):
ImageID = l
if y_pred[l] == 0:
label = 10
else:
label = y_pred[l]
info = ([ImageID, label])
writer.writerow(info)
| [
"ywkim@bcm.edu"
] | ywkim@bcm.edu |
6a2ed3c32e208f9299ed262c00312288f929adf8 | 4811d601e11528b1bef7d20429c20447288b608e | /coolscrapy/pipelines.py | 02f9ca965030d086564e399bf5a629b69742d689 | [] | no_license | dover-xu/coolscrapy | 8c97d790e90aceeb47fecaaa15767f5a8b20827a | ace564be07177992629ee58331ea26e079ac4067 | refs/heads/master | 2020-12-30T14:12:40.981166 | 2017-05-21T08:14:19 | 2017-05-21T08:14:19 | 91,288,288 | 0 | 0 | null | 2017-05-21T08:14:20 | 2017-05-15T02:43:36 | null | UTF-8 | Python | false | false | 2,395 | py | # -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
import datetime
import json
import urllib2
import pymongo
from scrapy.conf import settings
from scrapy import log
from scrapy.exceptions import DropItem
from gridfs import *
import os
class CoolscrapyPipeline(object):
def process_item(self, item, spider):
return item
class ArticleDataBasePipeline(object):
def __init__(self):
client = pymongo.MongoClient(
settings['MONGODB_SERVER'],
settings['MONGODB_PORT']
)
db = client[settings['MONGODB_DB']]
self.collection = db[settings['MONGODB_COLLECTION']]
# self.fs = GridFS(db, settings.MONGODB_IMAGES_COLLECTION)
def open_spider(self, spider):
pass
def process_item(self, item, spider):
valid = True
for data in item:
if not data:
valid = False
raise DropItem("Missing {0}".format(data))
if valid:
image_url = item['image_url'] if item['image_url'] else ''
if image_url:
dir = settings['IMAGES_DIR']
if not os.path.exists(dir):
os.makedirs(dir)
url_split = image_url.split('?')[0].split('/')[3:]
filename = '_'.join(url_split)
filepath = '%s/%s' % (dir, filename)
if os.path.exists(filepath):
return item
try:
with open(filepath, 'wb') as file:
response = urllib2.urlopen(image_url)
file.write(response.read())
except Exception as reason:
log.msg("Save image error: {0}".format(reason), level=log.ERROR, spider=spider)
else:
log.msg("Download image to MongoDB database!", level=log.DEBUG, spider=spider)
if filepath:
item['image_local_path'] = filepath
self.collection.insert(dict(item))
log.msg("Article added to MongoDB database!", level=log.DEBUG, spider=spider)
return item
def close_spider(self, spider):
pass | [
"v-doxu1@microsoft.com"
] | v-doxu1@microsoft.com |
8606cc74a4c66fcb0fdeafb43e19ff547862ccda | 590b366fe2bfa8bd009992a5d3d358af4570367b | /test_api_2gis.py | 73202c4ac40bd471984ce9b901c0cfebd456aba0 | [] | no_license | potemkuh/test_api_2gis | eaece6d5b71a5d9f2599fdb7458f256785cccf4b | 64b8143a81254011444c5b0125910cbcd95f95bb | refs/heads/main | 2023-07-16T22:04:31.575081 | 2021-08-28T08:40:50 | 2021-08-28T08:40:50 | 400,744,033 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,000 | py | import requests
import pytest
import data_test
def test_total_count():
response = requests.get(data_test.url)
response = response.json()
assert response['total'] == 22
def test_default_page_size():
response = requests.get(data_test.url)
response = response.json()
assert len(response['items']) == 15
@pytest.mark.parametrize('value, expected_result', data_test.positive_page_size_list)
def test_positive_page_size(value, expected_result):
params = {'page_size': value}
response = requests.get(data_test.url, params=params)
response = response.json()
assert len(response['items']) == expected_result
@pytest.mark.parametrize('value', data_test.negative_page_size_list)
def test_negative_page_size(value):
with pytest.raises(KeyError):
params = {'page_size': value}
response = requests.get(data_test.url, params=params)
response = response.json()
len(response['items'])
def test_positive_substr_search():
params = {'q': 'рск'}
response = requests.get(data_test.url, params=params)
response = response.json()
query = len(response['items'])
assert query > 0
def test_negative_substr_search():
with pytest.raises(KeyError):
params = {'q': 'ск'}
response = requests.get(data_test.url, params=params)
response = response.json()
query = len(response['items'])
@pytest.mark.parametrize('search, expected_result', data_test.full_name_data_list)
def test_register(search, expected_result):
params = {'q': search}
response = requests.get(data_test.url, params=params)
response = response.json()
for item in response['items']:
assert item.get('name') == expected_result
@pytest.mark.parametrize('query, value, expected_result', data_test.ignore_query_param)
def test_ignoring_other_parameters(query, value, expected_result):
params = {'q': 'москва', query: value}
response = requests.get(data_test.url, params=params)
response = response.json()
response = response['items']
if len(response) == 1:
for item in response:
assert expected_result in item.get('name')
@pytest.mark.parametrize('value', data_test.country_code)
def test_ignoring_other_parameters_substr(value):
params = {'q': 'рск', 'country_code': value}
response = requests.get(data_test.url, params=params)
response = response.json()
items = response['items']
for item in items:
assert item['country']['code'] in data_test.country_code
@pytest.mark.parametrize('value, expected_result', data_test.list_country_code)
def test_search_country_code(value, expected_result):
param = {'country_code': value}
response = requests.get(data_test.url, params=param)
response = response.json()
items = response['items']
for item in items:
assert item['country']['code'] == expected_result
| [
"noreply@github.com"
] | noreply@github.com |
f34fe8f4f6f06313a6b776461d3fc8e3572bcfa5 | f45a9dcb4660e2af6239ee599198bcb264f666d0 | /Lab2/Homework/Youtube/ex1.py | af4a8b1745566a21a5849a7a021da78dc37fef16 | [] | no_license | duyvukhanh/vukhanhduy-lab-c4e18 | 89c6ff648f03c928520a8af7a7ac65f98084cda2 | 8047479944efa91f1de90a4012a7ba25fd2592e2 | refs/heads/master | 2020-03-19T16:55:05.303978 | 2018-06-15T09:04:23 | 2018-06-15T09:04:23 | 136,736,193 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,267 | py | from youtube_dl import YoutubeDL
# Sample 1: Download a single youtube video
dl = YoutubeDL()
dl.download(['https://www.youtube.com/watch?v=WHK5p7JL7g4'])
# Sample 2: Download multiple youtube videos
# Put list of song urls in download function to download them, one by one
dl.download(['https://www.youtube.com/watch?v=wNVIn-QS4DE', 'https://www.youtube.com/watch?v=JZjRrg2rpic'])
# Sample 3: Download audio
options = {
'format': 'bestaudio/audio' # Tell the downloader to download only the best quality of audio
}
dl = YoutubeDL(options)
dl.download(['https://www.youtube.com/watch?v=c3jHlYsnEe0'])
# Sample 4: Search and then download the first video
options = {
'default_search': 'ytsearch', # tell downloader to search instead of directly downloading
'max_downloads': 1 # Tell downloader to download only the first entry (video)
}
dl = YoutubeDL(options)
dl.download(['con điên TAMKA PKL'])
# Sample 5: Search and then download the first audio
options = {
'default_search': 'ytsearch', # tell downloader to search instead of directly downloading
'max_downloads': 1, # Tell downloader to download only the first entry (audio)
'format': 'bestaudio/audio'
}
dl = YoutubeDL(options)
dl.download(['Nhớ mưa sài gòn lam trường']) | [
"duy@Duys-MacBook-Air.local"
] | duy@Duys-MacBook-Air.local |
edcd8e182d7a8857d017633df27c9b861b594cb8 | 3d7e122dae6ed0e71fec825960f928051e75b68b | /Jakob_CameraCode/Pathfinding/simPathMap.py | fff0ab4d2cffad2d946793e9137ccf391d9c0dd8 | [] | no_license | Hochbotaniker/Fugaintegrum | dd250691b5c6a24ee76d75ac86a31926e0173eda | f0423d51b35881c8ffae36572b2e06b833e07626 | refs/heads/master | 2023-08-23T20:47:30.373464 | 2021-10-06T14:29:22 | 2021-10-06T14:29:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,380 | py | import matplotlib.pyplot as plt
import numpy as np
from math import atan, degrees
def getDist(pos_drone, pos_dest):
return sum((pos_drone - pos_dest) ** 2) ** 0.5 # 6366 -> Earth radius at ~50° lat
class simPathMap:
def __init__(self, pos_drone, pos_dest, radius=5, pixel_size=1.0):
self.root = pos_drone
self.cord_drone = pos_drone
self.pixel_size = pixel_size
# Entfernungen auf die pixel size anpassen
height = self.horiDist(pos_drone, pos_dest)
width = self.vertDist(pos_drone, pos_dest)
# print("Before", width, height)
# Kann zusammengefasst werden, aber unübersichtlich
width = round(width + (2 * radius * width) / (abs(width) * pixel_size))
height = round(height + (2 * radius * height) / (abs(height) * pixel_size))
# print("After", height, width)
# 0 und 1 tauschen ? - ! -done
self.drone_pos = [int(round(-height * (radius / pixel_size) / abs(height))),
int(round(width * (radius / pixel_size) / abs(width)))]
self.dest_pos = [int(round(height * (radius / pixel_size) / abs(height))),
int(round(-width * (radius / pixel_size) / abs(width)))]
self.width = int(abs(width))
self.height = int(abs(height))
# Anpassen der negativen werte auf richtige Positionen
if self.dest_pos[0] < 0:
self.dest_pos[0] = self.height + self.dest_pos[0]
if self.dest_pos[1] < 0:
self.dest_pos[1] = self.width + self.dest_pos[1]
if self.drone_pos[0] < 0:
self.drone_pos[0] = self.height + self.drone_pos[0]
if self.drone_pos[1] < 0:
self.drone_pos[1] = self.width + self.drone_pos[1]
# print(self.drone_pos, self.dest_pos)
self.path_map = np.array([
[0.5 if ((i != 0) and (i != self.width - 1) and (j != 0) and (j != self.height - 1)) else 1 for i in
range(self.width)] for j in range(self.height)])
# range normalisieren ([0,1]), damit der Plot die Farben richtig anfängt
self.path_map[0][0] = 0
# Farbe des Ziels und der Drohne setzen
self.path_map[self.drone_pos[0]][self.drone_pos[1]] = 0.75
self.path_map[self.dest_pos[0]][self.dest_pos[1]] = 0.25
# print(len(self.path_map), len(self.path_map[0]))
def vertDist(self, pos_root, pos_dest):
return int(round((pos_dest[0] - pos_root[0]) / self.pixel_size))
def horiDist(self, pos_root, pos_dest):
return int(round((pos_dest[1] - pos_root[1]) / self.pixel_size))
def get_angle(self, pos_root, pos_dest):
hd = self.horiDist(pos_root, pos_dest)
vd = self.vertDist(pos_root, pos_dest)
if vd == 0:
if hd > 0:
return 0
else:
return 180
if vd < 0:
return 270 - degrees(atan(hd / vd))
return 90 - degrees(atan(hd / vd))
def change_drone_pos(self, pos_new):
"""
Ändert die Position der Drohne auf der Karte
:param pos_new: Neue Koordinate der Drohne (lat, log)
"""
vDist = self.vertDist(self.cord_drone, pos_new)
hDist = self.horiDist(self.cord_drone, pos_new)
# print("vertical and horizontal distances:", vDist, hDist)
if (0 < (self.drone_pos[0] - hDist) < self.height) and (0 < (self.drone_pos[1] + vDist) < self.width):
self.cord_drone = pos_new
# remove current drone point
self.path_map[self.drone_pos[0]][self.drone_pos[1]] = 0.5
# add new drone point
self.path_map[self.drone_pos[0] - hDist][self.drone_pos[1] + vDist] = 0.75
# update pos_drone
self.drone_pos = [self.drone_pos[0] - hDist, self.drone_pos[1] + vDist]
def add_vec(self, vec, label=1):
"""
Fügt ein vector hinzu. Der Vektor enthält die x/y Abstände des Punkten im Verhältnis zur Drohne
:param vec: Vektor mit (x,y) Abstände in Metern
:param label: Label der Koordinate (1 = Hinderniss, 0.75 = Drohne, 0.25 = Ziel)
"""
x = self.drone_pos[1] + int(round(vec[0] / self.pixel_size))
y = self.drone_pos[0] - int(round(vec[1] / self.pixel_size))
# Point
p = None
if (0 < y < self.height) and (0 < x < self.width):
if self.path_map[y][x] == 0.5:
self.path_map[y][x] = label
p = (y, x)
return p
def add_vac_arr(self, array, label=1):
p = []
for i in array:
t = self.add_vec(i, label)
if t is not None:
p.append(t)
# Fügt den Sicherheitsrand für alle Punkte ein, die nicht vollständig von anderen umgeben sind
for i, j in p:
# Nachbar check
if not ((i + 1, j) in p and (i - 1, j) in p and (i, j - 1) in p and (i, j + 1) in p):
self.mark_boarder(vec=(i, j), radius=1.3 / self.pixel_size)
def visualize_path(self, path):
temp_map = self.path_map.copy()
# [1:-1] nicht erstes und letztes element
for i in path[1:-1]:
temp_map[i[0]][i[1]] = 0.15
plt.imshow(temp_map, cmap='twilight_shifted')
plt.xticks(())
plt.yticks(())
plt.show()
def plot_map(self):
plt.imshow(self.path_map, cmap='twilight_shifted')
plt.xticks(())
plt.yticks(())
plt.show()
def checkpoints_to_pos(self, checkpoints, drone_cord):
pos_points = []
for i in checkpoints:
t = np.array([self.drone_pos[1] - i[1], i[0] - self.drone_pos[0], 0])
pos_points.append(np.array(drone_cord) - t * self.pixel_size)
return np.array(pos_points)
def drone_illegal(self):
self.path_map[self.drone_pos[0]][self.drone_pos[1]] = 0.75
for j in [[1, 1], [1, 0], [1, -1], [0, -1], [-1, -1], [-1, 0], [-1, 1], [0, 1]]:
self.path_map[self.drone_pos[0] + j[0]][self.drone_pos[1] + j[1]] = 0.5
def mark_boarder(self, vec, radius):
array = self.path_map
dist_array = np.array(
[[np.sum((np.array((j, i)) - vec) ** 2) ** 0.5 for i in range(len(array[0]))] for j in range(len(array))])
self.path_map[np.logical_and(dist_array < radius, self.path_map == 0.5)] = 0.9
| [
"MarcLorenz.Doehmer@googlemail.com"
] | MarcLorenz.Doehmer@googlemail.com |
0b32c13516210faf3744f8bf6adf1ffdc47891a2 | 7bed907b24f9a12daab675028c356fffc3bef25b | /setup.py | 9aa0ce1c21b8bd4a588c69f05d7c93673610ff0f | [
"MIT"
] | permissive | bakera81/siuba | 9ff6c5812aabfbb17b38af65a528361777b1c307 | 568729989333193ff38c26ac68604aa8ba9b490b | refs/heads/master | 2020-07-06T07:30:00.049844 | 2019-08-16T20:09:55 | 2019-08-16T20:09:55 | 202,940,072 | 0 | 0 | MIT | 2019-08-17T23:26:46 | 2019-08-17T23:26:46 | null | UTF-8 | Python | false | false | 996 | py | from setuptools import setup, find_packages
# parse version ---------------------------------------------------------------
import re
import ast
_version_re = re.compile(r'__version__\s+=\s+(.*)')
with open('siuba/__init__.py', 'rb') as f:
version = str(ast.literal_eval(_version_re.search(
f.read().decode('utf-8')).group(1)))
# setup -----------------------------------------------------------------------
setup(
name='siuba',
packages=find_packages(),
version=version,
description='A package for quick, scrappy analyses with pandas and SQL',
author='Michael Chow',
license='MIT',
author_email='mc_al_gh_siuba@fastmail.com',
url='https://github.com/machow/siuba',
keywords=['package', ],
install_requires = [
"pandas"
],
include_package_data=True,
classifiers=[
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
],
)
| [
"machow@princeton.edu"
] | machow@princeton.edu |
f78b97bc9bb5530abd9d49f9255ef8b5c1d50c60 | 64ecdfd4fdde040b5fc6f576e2409fae25f4d30f | /Tarea/logica_proyecto.py | 1c4b8aa5d63ef965f3372d7920d32b52f319e375 | [] | no_license | AldoAlonsoS/EjerciciosPython3_AldoAlonso | 974ebdeb02e3fc00b25fe95e03e206e5590122d9 | a43d616294a77a40e81b36fcb5c05a0cd99372ae | refs/heads/master | 2022-11-05T12:59:03.690242 | 2020-07-03T16:57:08 | 2020-07-03T16:57:08 | 271,856,769 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,759 | py | from PySide2.QtWidgets import QApplication, QMainWindow, QFileDialog
from PySide2.QtCore import Slot
from ui_proyecto import Ui_MainWindow
from estudiante import Estudiante
import socket as s
import pickle
import sys
class MainWindow(QMainWindow):
def __init__(self):
super(MainWindow, self).__init__()
self.ui = Ui_MainWindow()
self.ui.setupUi(self)
self.ui.conectar.clicked.connect(self.conexion_servidor)
self.ui.enviarinfo.clicked.connect(self.registrar)
self.ui.buscar.clicked.connect(self.buscarArchivo)
self.ui.ennviararchivo.clicked.connect(self.buscarArchivo)
@Slot()
def conexion_servidor(self):
if self.ui.conectar.text() == 'CONECTAR':
try:
self.cliente = s.socket()
self.cliente.connect((self.ui.ip.text(), int(self.ui.puerto.text())))
#self.ui.estado.setText('Conectado')
self.ui.conectar.setText('Desconectar')
except:
e = sys.exc_info()[1]
self.show_error(str(e))
elif self.ui.conectar.text() == 'DESCONECTAR':
self.cliente.close()
self.ui.setText('Desconectado')
self.ui.conectar.setText('CONECTAR')
#print('Conectado al servidor...')
#msg = 'Iniciozip'
#bytes = msg.encode()
#cliente.send(bytes)
#while True:
#data = cliente.recv(500)
#print(data)
#if data == b'':
#print('Finzip')
#break
#cliente.close()
#pass
@Slot()
def registrar(self):
tmp = Estudiante(self.ui.nombre.text(), self.ui.correo.text(), self.ui.contrasenia.text())
print(f'Persona Registrada {tmp.nombre()}')
file = open('RegistroProyecto.txt', 'wb')
pickle.dump(tmp, file)
file.close()
@Slot()
def buscarArchivo(self):
filename = QFileDialog.getOpenFileName(self, 'Abrir archivo', '.', 'Image Files(*.txt)')
file = open(filename[0], 'rb')
print(f'Variable File:{file}')
count = 0
size = 0
f2 = open('copiaimg.png', 'wb')
# for i in file:
i = file.read(500)
msg = 'Iniciozip'
bytes = msg.encode()
self.cliente.send(bytes)
while i:
f2.write(i)
print(f'[{count + 1}:{len(i)}] {i}')
count += 1
size += len(i)
i = file.read(500)
msg2 = 'Finzip'
bytes2 = msg2.encode()
self.cliente.send(bytes2)
f2.close()
file.close()
if __name__=='__main__':
app = QApplication()
window = MainWindow()
window.show()
app.exec_() | [
"aldoalonsos18@gmail.com"
] | aldoalonsos18@gmail.com |
963aa4e4fb0d5b0a6e95edc7422839ca52cfaa72 | 45cc43cacadbcb53af8e41d42b9fe5d170b75f17 | /steps/post_pics_to_pr.py | 8977d51cceca52d316aef544c731510b9f73f4e7 | [
"Apache-2.0"
] | permissive | pollockm/buildsystem | 71e3197044c8f2b9ea0966ededc0bb1e09e0b177 | 94dde7f27bd265d41dd71b14b04363dfacb3d5f7 | refs/heads/master | 2020-03-25T16:15:09.366428 | 2018-08-07T20:44:17 | 2018-08-07T20:44:17 | 143,921,918 | 0 | 0 | Apache-2.0 | 2018-08-07T20:09:59 | 2018-08-07T20:09:59 | null | UTF-8 | Python | false | false | 2,503 | py | #!/usr/bin/env python3
import requests
import json
import base64
import os
import datetime
import logging
_moduleLogger = logging.getLogger(__name__)
def _create_header(token):
return {'Authorization': 'token %s' % token.strip()}
def _read_picture(file_name):
with open(file_name, 'rb') as text:
return base64.b64encode(text.read()).decode()
def _post_file(file_data, folder, file_name, header,picRepo):
# put encoded data into json request
new_file_data = json.dumps({"message": "commit message", "content":file_data})
# post a picture to a repo
url = 'https://api.github.com/repos/%s/contents/%s/%s' % (picRepo, folder, file_name)
r=requests.put(url, data=new_file_data, headers=header)
if (r.ok):
_moduleLogger.info('Response code: %s', r.status_code)
else:
_moduleLogger.error('Bad response code: %s', r.status_code)
_moduleLogger.error('Bad response text: %s', r.text)
return r.json()['content']['download_url']
# post a comment on an issue
def _post_comment_to_pr(urlPicPairs, pullRequestInfo, prNumber, header):
formatString = "### %s: \n\n"
body = """Bleep bloop!
LabVIEW Diff Robot here with some diffs served up hot for your pull request.
Notice something funny? Help fix me on [my GitHub repo.](https://github.com/LabVIEW-DCAF/buildsystem)
"""
for pair in urlPicPairs:
body += formatString % pair
org, repo, _ = pullRequestInfo.split('/')
url = "https://api.github.com/repos/%s/%s/issues/%s/comments" % (org, repo, prNumber)
data = json.dumps({"body":body})
r = requests.post(url, data=data, headers=header)
if (r.ok):
_moduleLogger.info('Response code: %s', r.status_code)
else:
_moduleLogger.error('Bad response code: %s', r.status_code)
_moduleLogger.error('Bad response text: %s', r.text)
def post_pics_to_pr(token, localPicfileDirectory, pullRequestInfo, prNumber, picRepo):
header = _create_header(token)
pics = [f for f in os.listdir(localPicfileDirectory) if f.endswith(".png")]
folder = pullRequestInfo + '/' + datetime.datetime.now().strftime('%Y-%m-%d/%H:%M:%S')
picUrls = []
for pic in pics:
picData = _read_picture(os.path.join(localPicfileDirectory, pic))
picUrl = _post_file(picData, folder, os.path.split(pic)[1], header, picRepo)
picUrls.append((pic, picUrl))
if picUrls != []:
_post_comment_to_pr(picUrls, pullRequestInfo, prNumber, header)
| [
"john.boyd@ni.com"
] | john.boyd@ni.com |
93210f2559dedc461d06fd3785d80e20905b0916 | 39f4cbaee6d219c715a1919b40d32c39592a7796 | /plot_percentile_elev.py | 713198c8a9f2274bcb6ddfefd5d43c2b209a793f | [] | no_license | XUEYANZHANG9/IS_snowfire | 3e46bfe782e8a591d3992c0671668d9f2a47a1b1 | 257c70ec1238510b1940e5460c6a0655e5f0d624 | refs/heads/master | 2021-12-12T13:57:55.397049 | 2017-01-19T22:38:30 | 2017-01-19T22:38:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,798 | py | ### plot elevation percentiles
# basins=['cascades','california','southernrockies','northernrockies','whites']
import numpy as np
import sys
from snowpack_functions import unpack_netcdf_swe_ensavg
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import netCDF4
from netCDF4 import num2date
import os
fig=plt.figure()
'''
args = sys.argv[0]
basin = args[0]
'''
basin='whites'
scenarios = ["historical","rcp45","rcp85"]
for s in np.arange(3):
lats, lons, swe, datess = unpack_netcdf_swe_ensavg(basin,scenarios[s])
filename = '/raid9/gergel/agg_snowpack/%s/percentiles_elev_ensavg_SWE_%s.npz' %(scenarios[s],basin)
data = np.load(filename)
ax=fig.add_subplot(3,1,s+1)
e_10 = data['e_10']
e_25 = data['e_25']
e_50 = data['e_50']
e_75 = data['e_75']
e_90 = data['e90']
ax.plot_date(datess[9:],e_10,fmt='r-',label='10th')
ax.plot_date(datess[9:],e_25,fmt='b-',label='25th')
ax.plot_date(datess[9:],e_50,fmt='k-',label='50th')
ax.plot_date(datess[9:],e_75,fmt='g-',label='75th')
ax.plot_date(datess[9:],e_90,fmt='m-',label='90th')
if (basin == 'california'):
ax.set_ylim([1400,3600])
elif (basin == 'northernrockies'):
ax.set_ylim([1100,3000])
elif (basin == 'southernrockies'):
ax.set_ylim([2000,3700])
else:
ax.set_ylim([800,2000])
ax.set_ylabel('elev (m)')
ax.legend(loc='center left', prop={'size':6},bbox_to_anchor=(1,0.5),ncol=1,fancybox=True,shadow=True)
# plt.ylabel('elev (m)')
plt.suptitle('10-year EnsAvg Elevations for %s' %(basin))
## save plot
direc = '/raid9/gergel/agg_snowpack/plots/'
plotname = 'percentiles_elev_ensavg_SWE_%s' % (basin)
savepath = os.path.join(direc, plotname)
print ("saving figure to '%s'" % savepath)
plt.savefig(savepath)
| [
"gergel@hydra.hydro.washington.edu"
] | gergel@hydra.hydro.washington.edu |
e0f48ba17a7c20b092c105fc0d304198b54a984a | 05876e85e73eb1df6c377d37a5c45b78abab5c1e | /batallas_turnos/main.py | 1a6824194a5c44d13d076856e5fcb771230ab04b | [] | no_license | DiegoFHG/batallas_turnos | e50fddf054ed9c0ad7115d9fa8101267a6dc6b51 | 30e2100399c0ac3d4260f629761cb2612c26e73e | refs/heads/master | 2023-03-30T07:02:53.239849 | 2019-10-08T15:41:45 | 2019-10-08T15:41:45 | 213,608,968 | 0 | 0 | null | 2021-03-29T20:38:53 | 2019-10-08T10:04:29 | Python | UTF-8 | Python | false | false | 8,941 | py | import random
from os import system, name
from time import sleep
from PyInquirer import prompt
class Game:
def __init__(self):
self.hero_life = 150
self.vampire_life = 60
self.game_over = False
self.defense = False
self.hero_fainted = False
self.vampire_fainted = False
self.potion_timeout = 0
self.weapons = {
'hero': [
{
'name': "Punch",
'attack_damage': 7,
'accuracy': 50/100,
},
{
'name': "Basic sword",
'attack_damage': 15,
'accuracy': 25/100,
},
{
'name': "Hero's sword",
'attack_damage': 30,
'accuracy': 12/100,
},
{
'name': "Potion",
},
{
'name': "Hero's shield"
}
],
'vampire': [
{
'name': "Vampire punch",
'attack_damage': 5,
'accuracy': 90/100,
},
{
'name': "Blood steal",
'attack_damage': 10,
'accuracy': 60/100,
},
{
'name': "Bloody Marie's blood sword",
'attack_damage': 20,
'accuracy': 40/100,
}
]
}
self.questions = [
{
'type': 'list',
'name': 'game',
'message': "What will the hero do?",
'choices': [
"Use {}, AD: {}".format(self.weapons['hero'][0]['name'], self.weapons['hero'][0]['attack_damage']),
"Use {}, AD: {}".format(self.weapons['hero'][1]['name'], self.weapons['hero'][1]['attack_damage']),
"Use {}, AD: {}".format(self.weapons['hero'][2]['name'], self.weapons['hero'][2]['attack_damage']),
"Use potion",
"Defend herself",
],
}
]
def vampire_attacks(self, answer):
choice = random.choice(self.weapons['vampire'])
print("Vampire uses {}!".format(choice['name']))
if choice['accuracy'] == 90/100:
striked = random.randint(1, 100)
if striked <= 90:
damage = None
attack_damage = None
if self.defense:
attack_damage = choice['attack_damage'] - 5
damage = self.hero_life - attack_damage
self.hero_life = damage
else:
damage = self.hero_life - choice['attack_damage']
attack_damage = choice['attack_damage']
self.hero_life = damage
print("Vampire inflicts {} damage to the Hero!".format(attack_damage))
elif choice['accuracy'] == 60/100:
striked = random.randint(1, 100)
if striked <= 60:
damage = None
attack_damage = None
if self.defense:
attack_damage = choice['attack_damage'] - 5
damage = self.hero_life - attack_damage
self.hero_life = damage
else:
attack_damage = choice['attack_damage']
damage = self.hero_life - choice['attack_damage']
self.hero_life = damage
print("Vampire inflicts {} damage to the Hero!".format(attack_damage))
elif choice['accuracy'] == 40/100:
striked = random.randint(1, 100)
if striked <= 40:
damage = None
attack_damage = None
if self.defense:
attack_damage = choice['attack_damage'] - 5
damage = self.hero_life - attack_damage
self.hero_life = damage
else:
attack_damage = choice['attack_damage']
damage = self.hero_life - choice['attack_damage']
self.hero_life = damage
print("Vampire inflicts {} damage to the Hero!".format(attack_damage))
return
def hero_attacks(self, answer):
success = random.randint(1, 100)
damage = None
attack_damage = None
if answer['game'] == "Use {}, AD: {}".format(self.weapons['hero'][0]['name'], self.weapons['hero'][0]['attack_damage']):
print("Hero uses {}!".format(self.weapons['hero'][0]['name']))
if success <= 50:
attack_damage = self.weapons['hero'][0]['attack_damage']
self.vampire_life = self.vampire_life - attack_damage
print("The hero inflicts {} damage to the Vampire!".format(attack_damage))
elif answer['game'] == "Use {}, AD: {}".format(self.weapons['hero'][1]['name'], self.weapons['hero'][1]['attack_damage']):
print("Hero uses {}!".format(self.weapons['hero'][1]['name']))
if success <= 25:
attack_damage = self.weapons['hero'][1]['attack_damage']
self.vampire_life = self.vampire_life - attack_damage
print("The hero inflicts {} damage to the Vampire!".format(attack_damage))
elif answer['game'] == "Use {}, AD: {}".format(self.weapons['hero'][2]['name'], self.weapons['hero'][2]['attack_damage']):
print("Hero uses {}!".format(self.weapons['hero'][2]['name']))
if success <= 12:
attack_damage = self.weapons['hero'][2]['attack_damage']
self.vampire_life = self.vampire_life - attack_damage
print("The hero inflicts {} damage to the Vampire!".format(attack_damage))
elif answer['game'] == 'Use potion':
self.potion_timeout = 4
print("Hero uses {}!".format(self.weapons['hero'][3]['name']))
sleep(2)
print("The hero will recover all its health in the near future... But her abilities are unusuable for a while...")
elif answer['game'] == 'Defend herself':
print("Hero uses {}!".format(self.weapons['hero'][4]['name']))
if success <= 80:
self.defense = True
print("The heroe reduces the attack damage of the vampire by 5!")
return
def clear_screen():
if name == 'nt':
system('cls')
else:
system('clear')
def defeated():
clear_screen()
print('The vampire has murdered the hero...')
sleep(2)
clear_screen()
print('Now the town faces a great danger...')
sleep(2)
clear_screen()
print('Game over!')
clear_screen()
game.game_over = True
def won():
clear_screen()
print('The hero has defeated the vampire!')
sleep(2)
clear_screen()
print('Now the town is safe, for a least one more day...')
sleep(3)
clear_screen()
game.game_over = True
if __name__ == '__main__':
clear_screen()
game = Game()
while game.game_over != True:
if game.hero_life <= 30:
game.hero_fainted = True
if game.vampire_life <= 20:
game.vampire_fainted = True
if game.hero_life <= 0:
defeated()
break
if game.vampire_life <= 0:
won()
break
if game.potion_timeout > 1:
game.potion_timeout = game.potion_timeout - 1
elif game.potion_timeout == 1:
game.potion_timeout = 0
game.hero_life = 150
clear_screen()
print("The hero has recovered all her health!")
sleep(2)
clear_screen()
print("Hero's healt: {} | Vampire's healt: {}".format(game.hero_life, game.vampire_life))
answer = prompt(game.questions)
clear_screen()
game.defense = False
if game.potion_timeout == 0:
game.hero_attacks(answer)
elif game.hero_fainted:
game.hero_life = game.hero_life + 2
print("The hero is passed out... But slowly recovering...")
sleep(2)
if game.vampire_fainted:
game.vampire_life = game.vampire_life + 2
if game.vampire_life >= 20:
game.vampire_fainted = False
print("The vampire has awaken again!")
sleep(2)
else:
print("The vampire is passed out... But slowly recovering...")
sleep(2)
else:
game.vampire_attacks(answer)
sleep(2)
clear_screen()
clear_screen()
system('exit')
| [
"diego.hernandez2@alumnos.uneatlantico.es"
] | diego.hernandez2@alumnos.uneatlantico.es |
8a88cfdb5eec2620ade3ee7b5d247e95d23ba520 | 3202f4da43428bbcc4f89341dc93738890632664 | /src/gen_graph.py | 5d91065d6ab171a0381f94672832da9218f22ed7 | [
"GPL-3.0-only"
] | permissive | yjzhang3/ec504_project | 906caf182bbcd8e9db3e581a9b899c20e27aa93f | 2984ea3ff612b96f10edcc7fafd716eded049342 | refs/heads/main | 2023-05-08T11:14:22.062727 | 2021-06-01T20:48:53 | 2021-06-01T20:48:53 | 372,079,539 | 0 | 0 | MIT | 2021-05-29T22:23:42 | 2021-05-29T22:23:41 | null | UTF-8 | Python | false | false | 638 | py | import sys
import networkx as nx
from itertools import combinations
from random import random
def ER(n, p):
V = set([v for v in range(n)])
E = set()
for combination in combinations(V, 2):
a = random()
if a < p:
E.add(combination)
g = nx.Graph()
g.add_nodes_from(V)
g.add_edges_from(E)
return g
if sys.argv[1] == 's':
filenum = sys.argv[2]
n = int(sys.argv[3])
# n = 15
p = 0.4
G = ER(n, p)
# print(G.edges)
with open('data/set' + filenum + '.txt', 'w') as f:
print('protein1','protein2','combined_score', file=f)
for i,j in G.edges:
print(i,j,1, file=f) | [
"ronrat@bu.edu"
] | ronrat@bu.edu |
a7c60b78f32abc44f71b77a5227cb86f6803806d | 659d41f0c737dffc2a6ebd5e773a6513da32e5ba | /scripts/experiments/Experiments729/dephasing_scan_duration.py | adf770c56bb5fd14721f410bb6a9d3b6978b1e37 | [] | no_license | HaeffnerLab/sqip | b3d4d570becb1022083ea01fea9472115a183ace | 5d18f167bd9a5344dcae3c13cc5a84213fb7c199 | refs/heads/master | 2020-05-21T23:11:10.448549 | 2019-11-21T02:00:58 | 2019-11-21T02:00:58 | 19,164,232 | 0 | 0 | null | 2019-11-04T04:39:37 | 2014-04-25T23:54:47 | Python | UTF-8 | Python | false | false | 7,104 | py | from common.abstractdevices.script_scanner.scan_methods import experiment
from excitations import excitation_dephase
from sqip.scripts.scriptLibrary.common_methods_729 import common_methods_729 as cm
from sqip.scripts.scriptLibrary import dvParameters
import time
import labrad
from labrad.units import WithUnit
from numpy import linspace
#The following command brinfgs the sequence plotter.
#from common.okfpgaservers.pulser.pulse_sequences.plot_sequence import SequencePlotter
class dephase_scan_duration(experiment):
name = 'Dephase Scan Duration'
dephasing_required_parameters = [
('Dephasing_Pulses', 'preparation_line_selection'),
('Dephasing_Pulses', 'evolution_line_selection'),
('Dephasing_Pulses','preparation_sideband_selection'),
('Dephasing_Pulses','evolution_sideband_selection'),
('Dephasing_Pulses', 'scan_interaction_duration'),
('TrapFrequencies','axial_frequency'),
('TrapFrequencies','radial_frequency_1'),
('TrapFrequencies','radial_frequency_2'),
('TrapFrequencies','rf_drive_frequency'),
]
@classmethod
def all_required_parameters(cls):
parameters = set(cls.dephasing_required_parameters)
parameters = parameters.union(set(excitation_dephase.all_required_parameters()))
parameters = list(parameters)
#removing parameters we'll be overwriting, and they do not need to be loaded
parameters.remove(('Dephasing_Pulses','evolution_ramsey_time'))
parameters.remove(('Dephasing_Pulses','evolution_pulses_frequency'))
parameters.remove(('Dephasing_Pulses','preparation_pulse_frequency'))
return parameters
def initialize(self, cxn, context, ident):
self.ident = ident
self.excite = self.make_experiment(excitation_dephase)
self.excite.initialize(cxn, context, ident)
self.scan = []
self.cxnlab = labrad.connect('192.168.169.49') #connection to labwide network
self.drift_tracker = cxn.sd_tracker
self.dv = cxn.data_vault
self.data_save_context = cxn.context()
self.setup_data_vault()
def setup_sequence_parameters(self):
p = self.parameters.Dephasing_Pulses
trap = self.parameters.TrapFrequencies
prep_line_frequency = cm.frequency_from_line_selection('auto', None, p.preparation_line_selection, self.drift_tracker)
frequency_preparation = cm.add_sidebands(prep_line_frequency, p.preparation_sideband_selection, trap)
#if same line is selected, match the frequency exactly
same_line = p.preparation_line_selection == p.evolution_line_selection
same_sideband = p.preparation_sideband_selection.aslist == p.evolution_sideband_selection.aslist
print 'same line', same_line
print 'same sideband', same_sideband
if same_line and same_sideband:
frequency_evolution = frequency_preparation
else:
evo_line_frequency = cm.frequency_from_line_selection('auto', None, p.evolution_line_selection, self.drift_tracker)
frequency_evolution = cm.add_sidebands(evo_line_frequency, p.evolution_sideband_selection, trap)
self.parameters['Dephasing_Pulses.preparation_pulse_frequency'] = frequency_preparation
self.parameters['Dephasing_Pulses.evolution_pulses_frequency'] = frequency_evolution
self.max_second_pulse = p.evolution_pulses_duration
minim,maxim,steps = self.parameters.Dephasing_Pulses.scan_interaction_duration
minim = minim['us']; maxim = maxim['us']
self.scan = linspace(minim,maxim, steps)
self.scan = [WithUnit(pt, 'us') for pt in self.scan]
def setup_data_vault(self):
localtime = time.localtime()
dirappend = [time.strftime("%Y%b%d",localtime) ,time.strftime("%H%M_%S", localtime)]
directory = ['','Experiments']
directory.extend([self.name])
directory.extend(dirappend)
self.dv.cd(directory, True,context = self.data_save_context)
def data_vault_new_trace(self):
localtime = time.localtime()
datasetNameAppend = time.strftime("%Y%b%d_%H%M_%S",localtime)
output_size = self.excite.output_size
dependants = [('Excitation','Ion {}'.format(ion),'Probability') for ion in range(output_size)]
self.dv.new('{0} {1}'.format(self.name, datasetNameAppend),[('Excitation', 'us')], dependants , context = self.data_save_context)
window_name = ['Dephasing, Scan Duration']
self.dv.add_parameter('Window', window_name, context = self.data_save_context)
self.dv.add_parameter('plotLive', True, context = self.data_save_context)
def run(self, cxn, context):
p = self.parameters.Dephasing_Pulses
self.data_vault_new_trace()
self.setup_sequence_parameters()
for i,interaction_duration in enumerate(self.scan):
should_stop = self.pause_or_stop()
if should_stop:
return False
second_pulse_dur = min(self.max_second_pulse, interaction_duration)
ramsey_time = max(WithUnit(0,'us'), interaction_duration - self.max_second_pulse)
#ramsey_time = WithUnit(0,'us')
p.evolution_ramsey_time = ramsey_time
p.evolution_pulses_duration = second_pulse_dur
self.excite.set_parameters(self.parameters)
excitation, readout = self.excite.run(cxn, context)
submission = [interaction_duration['us']]
submission.extend(excitation)
self.dv.add(submission, context = self.data_save_context)
self.update_progress(i)
self.save_parameters(self.dv, cxn, self.cxnlab, self.data_save_context)
####### FROM DYLAN -- PULSE SEQUENCE PLOTTING #########
#ttl = self.cxn.pulser.human_readable_ttl()
#dds = self.cxn.pulser.human_readable_dds()
#channels = self.cxn.pulser.get_channels().asarray
#sp = SequencePlotter(ttl.asarray, dds.aslist, channels)
#sp.makePlot()
############################################3
return True
def finalize(self, cxn, context):
pass
def update_progress(self, iteration):
progress = self.min_progress + (self.max_progress - self.min_progress) * float(iteration + 1.0) / len(self.scan)
self.sc.script_set_progress(self.ident, progress)
def save_parameters(self, dv, cxn, cxnlab, context):
measuredDict = dvParameters.measureParameters(cxn, cxnlab)
dvParameters.saveParameters(dv, measuredDict, context)
dvParameters.saveParameters(dv, dict(self.parameters), context)
if __name__ == '__main__':
cxn = labrad.connect()
scanner = cxn.scriptscanner
exprt = dephase_scan_duration(cxn = cxn)
ident = scanner.register_external_launch(exprt.name)
exprt.execute(ident)
| [
"haeffnerlab@gmail.com"
] | haeffnerlab@gmail.com |
e622b509f76fcf2d70ef7558ea382ec013669bdf | e8af6df98cb905e839cc2b8a45befbb56109abf7 | /NetCoding/with_operation.py | 27a10bcdb1b1c0d4a9c0e128d2af613c20d458f9 | [] | no_license | csy1030/pyCourse | d075333c5d89d81eebc1ef72f43f96ea40c171ae | 67c25ea7cee9c0ea4b0583bfb798575bca8488d0 | refs/heads/master | 2021-05-23T11:03:11.360747 | 2020-04-09T21:05:42 | 2020-04-09T21:05:42 | 253,257,917 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 228 | py | # with open('test') as f:
# data = f.read()
# print(data)
f_name = input("input name:")
with open(f_name) as f:
while True:
data = f.read(1024)
f_new = open('new_file','w')
f_new.write(data)
| [
"shuyangc97@gmail.com"
] | shuyangc97@gmail.com |
b6a2760e083ef2662b8cb1a29ee20d3d09c6f19b | e76aa4de68988abcfceb7f90ea680505a9159995 | /outrigger/__init__.py | 358e6751f654522e24e8680c88312573f25843fb | [
"BSD-3-Clause"
] | permissive | ggraham/outrigger | 3ab1798fbeb3c871cae4d2a12bcd721032c3a96c | 135388192bd8b15fc248653ee50943448ff19160 | refs/heads/master | 2021-05-26T09:58:02.547479 | 2020-04-29T19:32:34 | 2020-04-29T19:32:34 | 254,086,816 | 0 | 0 | BSD-3-Clause | 2020-04-29T19:32:35 | 2020-04-08T12:52:08 | null | UTF-8 | Python | false | false | 201 | py | # -*- coding: utf-8 -*-
__author__ = 'Olga Botvinnik'
__email__ = 'olga.botvinnik@gmail.com'
__version__ = '1.1.1'
__all__ = ['psi', 'region', 'util', 'io', 'validate', 'index',
'common']
| [
"olga.botvinnik@gmail.com"
] | olga.botvinnik@gmail.com |
6c298475750028ddc507024c20d0d2fb7fe96055 | c3e2fd391265f13e9104e2d441d4b046e1b892b5 | /Pattern_TD_Trap.py | d0ce2792f0af14cb63f1c9dd57b1ae33cc87077a | [] | no_license | jamesliu1/The-Book-of-Trading-Strategies | 262f0824ca3b6ba34daea63fba80bca6492c4a6e | 11ecc59db0e0d6ff5ec0685bb6d3dcd26780cacf | refs/heads/main | 2023-08-23T15:28:38.112508 | 2021-10-19T13:28:19 | 2021-10-19T13:28:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,246 | py |
# Base parameters
expected_cost = 0.5 * (lot / 10000)
assets = asset_list(1)
window = 1000
# Trading parameters
horizon = 'H1'
# Mass imports
my_data = mass_import(0, horizon)
def signal(Data):
# Adding columns
# Bullish signal
for i in range(len(Data)):
if Data[i - 1, 1] < Data[i - 2, 1] and Data[i - 1, 2] > Data[i - 2, 2] and Data[i, 3] > Data[i - 1, 1]:
Data[i, 6] = 1
# Bearish signal
for i in range(len(Data)):
if Data[i - 1, 1] < Data[i - 2, 1] and Data[i - 1, 2] > Data[i - 2, 2] and Data[i, 3] < Data[i - 1, 2]:
Data[i, 7] = -1
return Data
############################################################################## 1
my_data = adder(my_data, 10)
my_data = signal(my_data)
if sigchart == True:
signal_chart_ohlc_color(my_data, assets[0], 3, 6, 7, window = 250)
holding(my_data, 6, 7, 8, 9)
my_data_eq = equity_curve(my_data, 8, expected_cost, lot, investment)
performance(my_data_eq, 8, my_data, assets[0])
plt.plot(my_data_eq[:, 3], linewidth = 1, label = assets[0])
plt.grid()
plt.legend()
plt.axhline(y = investment, color = 'black', linewidth = 1) | [
"noreply@github.com"
] | noreply@github.com |
444220ef6344dec68b3c69e3cc8015b90ab484c2 | a9e4c2e9b84481761fc7517f79258a3bea327906 | /0x0B-python-input_output/5-save_to_json_file.py | b7a25b7d524e0f31240ab1e706298ad89718a9e4 | [] | no_license | jeanpierreba/holbertonschool-higher_level_programming | 91aa222d04515358c79714e1d08e3d64d26b73a0 | 8ddaa1bb0fc157b1e71816fe801d9ed4ec66aa16 | refs/heads/main | 2023-08-18T05:58:07.494956 | 2021-09-22T19:13:47 | 2021-09-22T19:13:47 | 361,840,380 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 270 | py | #!/usr/bin/python3
""" Module to save an object into a file """
import json
def save_to_json_file(my_obj, filename):
""" Write an object to a text file, using a JSON representation """
with open(filename, 'w') as my_file:
json.dump(my_obj, my_file)
| [
"2713@holbertonschool.com"
] | 2713@holbertonschool.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.